|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:52:20.230722Z" |
|
}, |
|
"title": "Offensive Language Detection Explained", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Risch", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Potsdam", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Ruff", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Passau", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ralf", |
|
"middle": [], |
|
"last": "Krestel", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Potsdam", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Many online discussion platforms use a content moderation process, where human moderators check user comments for offensive language and other rule violations. It is the moderator's decision which comments to remove from the platform because of violations and which ones to keep. Research so far focused on automating this decision process in the form of supervised machine learning for a classification task. However, even with machine-learned models achieving better classification accuracy than human experts in some scenarios, there is still a reason why human moderators are preferred. In contrast to black-box models, such as neural networks, humans can give explanations for their decision to remove a comment. For example, they can point out which phrase in the comment is offensive or what subtype of offensiveness applies. In this paper, we analyze and compare four attribution-based explanation methods for different offensive language classifiers: an interpretable machine learning model (naive Bayes), a model-agnostic explanation method (LIME), a model-based explanation method (LRP), and a self-explanatory model (LSTM with an attention mechanism). We evaluate these approaches with regard to their explanatory power and their ability to point out which words are most relevant for a classifier's decision. We find that the more complex models achieve better classification accuracy while also providing better explanations than the simpler models.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Many online discussion platforms use a content moderation process, where human moderators check user comments for offensive language and other rule violations. It is the moderator's decision which comments to remove from the platform because of violations and which ones to keep. Research so far focused on automating this decision process in the form of supervised machine learning for a classification task. However, even with machine-learned models achieving better classification accuracy than human experts in some scenarios, there is still a reason why human moderators are preferred. In contrast to black-box models, such as neural networks, humans can give explanations for their decision to remove a comment. For example, they can point out which phrase in the comment is offensive or what subtype of offensiveness applies. In this paper, we analyze and compare four attribution-based explanation methods for different offensive language classifiers: an interpretable machine learning model (naive Bayes), a model-agnostic explanation method (LIME), a model-based explanation method (LRP), and a self-explanatory model (LSTM with an attention mechanism). We evaluate these approaches with regard to their explanatory power and their ability to point out which words are most relevant for a classifier's decision. We find that the more complex models achieve better classification accuracy while also providing better explanations than the simpler models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Online news platforms (e.g., New York Times), question answering platforms (e.g., Stack Overflow), collaborative projects (e.g., Wikipedia), and social networks (e.g., Facebook): all these social media platforms have one thing in common. They provide a discussion space for users, where content moderators are employed to keep a respectful tone, and foster fruitful discussions. Moderators ensure that the platform's discussion rules are adhered to, including the ban of offensive language. They enforce these rules by partially or entirely removing a user comment. Typically, a platform's rules are listed in the form of guidelines, and they overlap considerably with the \"netiquette\", the basic rules about communication over the Internet. However, that does not mean all users have these rules in mind when they post comments. Moderators on online discussion platforms, therefore, explain why they intervene. For example, they replace a removed comment with the following text: \"Removed. Please refrain from insults.\" or \"Removed. Please refrain from insinuations and personal attacks.\". In case they ultimately close a comment section, they post a final comment, for example, stating: \"This comment section has been closed due to (racist) generalizations, baseless assumptions up to conspiracy theories and extreme polemics.\". On the one hand, the idea behind these explanations is transparency. On the other hand, they aim to educate users to adhere to the discussion rules. Research on comment classification focuses on supervised machine learning approaches and often uses black-box models. For example, there is research on detecting hate speech (Gao and Huang, 2017) , racism/sexism (Waseem and Hovy, 2016) or offensive/aggressive/abusive language (Stru\u00df et al., 2019; Kumar et al., 2018) . However, to support moderators, semi-automated comment moderation in the form of a pre-classification of comments (Risch and Krestel, 2018) is not enough. Black-box models lack the ability to give explanations for their automated decisions. Therefore, they cannot be properly applied to comment moderation. Users and moderators are skeptical about an incomprehensible automation. Explanations help to build trust and increase the acceptance of machine-learned classifiers. Only then can a fair and transparent moderation process be ensured. There are two more reasons for explanations in general. First, there are legal reasons to utilize machine-learned classifiers only if they can give explanations for their decisions. For example, under certain circumstances, the General Data Protection Regulation (GDPR) in the EU grants users the right to \"obtain an explanation of the decision reached\" if they are significantly affected by automated decision-making, e.g., if a credit application is refused. 1 A second reason is that explanations help to reveal the strengths and weaknesses of a model. They could also benefit the task of identifying a potential bias in a model's decisions. Researchers can then work on improving the models based on these insights.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1654, |
|
"end": 1675, |
|
"text": "(Gao and Huang, 2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1692, |
|
"end": 1715, |
|
"text": "(Waseem and Hovy, 2016)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1757, |
|
"end": 1777, |
|
"text": "(Stru\u00df et al., 2019;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1778, |
|
"end": 1797, |
|
"text": "Kumar et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1914, |
|
"end": 1939, |
|
"text": "(Risch and Krestel, 2018)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Need for Explanations", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Contributions The main contribution of this paper is the evaluation and comparison of attribution-based explanation methods for offensive language detection. To this end, we use a word deletion task to compare an interpretable machine learning model (naive Bayes), a model-agnostic explanation method (LIME), a model-based explanation method (LRP), and a self-explanatory model (LSTM with an attention mechanism). In a second experiment, we use the explanatory power index (EPI) as a metric to evaluate the approaches. Further, we take into account the classifi-cation accuracy of each approach and discuss strengths and weaknesses in the application context of automated content moderation. Based on this discussion, we give directions for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Need for Explanations", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Outline In the following, we summarize related work on explanation methods in Section 2 and describe which of these methods and what classifiers we implement for offensive language detection in Section 3. Section 4 evaluates the methods with the help of a word deletion task and the explanatory power index (EPI), while Section 5 discusses the results. We conclude with a summary of the contributions and an outlook on future work in Section 6.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Need for Explanations", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "There is plenty of research on offensive language detection, and the classification accuracy for this task drastically increased in recent years -not least due to deep learning approaches for natural language processing. However, one aspect of this classification task has gone mostly unnoticed: the need for explaining classification results. More precisely, research on explanation methods distinguishes explainability from interpretability. The former refers to locally comprehending individual decisions, while the latter refers to globally comprehending the decision function (Do\u0161ilovi\u0107 et al., 2018; Monroe, 2018; Montavon et al., 2017) . Unfortunately, there is no universal definition of these two terms. The definition used in this paper is:", |
|
"cite_spans": [ |
|
{ |
|
"start": 581, |
|
"end": 605, |
|
"text": "(Do\u0161ilovi\u0107 et al., 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 606, |
|
"end": 619, |
|
"text": "Monroe, 2018;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 620, |
|
"end": 642, |
|
"text": "Montavon et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "\u2022 A decision function f is called explainable if the decision f (x) for each single input x \u2208 X (in domain X) can be explained in understandable terms to humans.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "\u2022 A decision function f is called interpretable if the whole function f (for the whole domain X) can be explained in understandable terms to humans.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "In the field of image classification, CNN-based explanation methods are prominent. For example, DeConvNet (Zeiler and Fergus, 2014) inverts the convolutional operations to gain explanations and an approach by Simonyan et al. (2014) applies sensitivity analysis to achieve similar results. There have been several follow-up papers that compare these two approaches and propose combinations (Kindermans et al., 2018; Springenberg et al., 2015) . Explanation methods for text classification are rarely studied. For example, Nguyen (2018) compares human evaluation and automatic evaluation for explanation methods. The comparison uses the twenty newsgroups dataset and a dataset of movie reviews. To the best of our knowledge, the only publication on explanation methods in the field of offensive language detection is by Carton et al. (2018) . The authors use an attention mechanism to generate explanations for the detection of personal attacks. An empirical study by Chakrabarty et al. (2019) shows the importance of contextual or self-attention for abusive language detection. Whether attention weights can also be used as explanations is under discussion (Wiegreffe and Pinter, 2019; Jain and Wallace, 2019) . In this paper, we consider a long short-term memory (LSTM) neural network (Hochreiter and Schmidhuber, 1997; Gers et al., 1999) with an attention mechanism (Yang et al., 2016) as an example of a self-explanatory model. The inherent attention weights provide attribution-based explanations. Further, we consider a naive Bayes classifier, which is an example of an interpretable model. A classification result (and the entire model) can be understood with the help of the discrete conditional probabilities in the classifier. The relevance of a word w is the probability that the class c is predicted given w:", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 131, |
|
"text": "(Zeiler and Fergus, 2014)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 231, |
|
"text": "Simonyan et al. (2014)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 414, |
|
"text": "(Kindermans et al., 2018;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 441, |
|
"text": "Springenberg et al., 2015)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 818, |
|
"end": 838, |
|
"text": "Carton et al. (2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 966, |
|
"end": 991, |
|
"text": "Chakrabarty et al. (2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1156, |
|
"end": 1184, |
|
"text": "(Wiegreffe and Pinter, 2019;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1185, |
|
"end": 1208, |
|
"text": "Jain and Wallace, 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1285, |
|
"end": 1319, |
|
"text": "(Hochreiter and Schmidhuber, 1997;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1320, |
|
"end": 1338, |
|
"text": "Gers et al., 1999)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "P (c|w) = P (c) \u2022 P (w|c) P (w)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "The attention-based LSTM and the naive Bayes classifier are two a priori explainable models. We also consider two post-hoc explanation methods in our paper: layerwise relevance propagation (LRP) and local interpretable model-agnostic explanations (LIME). We describe these two methods in the following. The idea behind LRP (Bach et al., 2015) is to backpropagate the relevance scores from the output layer to the input layer of a neural network. To this end, the relevance of each input value (feature) is derived from the neuron activations in the output layer. This procedure makes LRP a model-based explanation method. The idea behind LIME (Ribeiro et al., 2016) is to use a local approximation of the classifier f at a point x and its neighborhood. This local approximation needs to be an interpretable classifier and a good approximation of f in the local neighborhood of point x. The authors evaluate their model-agnostic explanation method with text and image classification tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 323, |
|
"end": 342, |
|
"text": "(Bach et al., 2015)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 643, |
|
"end": 665, |
|
"text": "(Ribeiro et al., 2016)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "For our comparative study, we implement a variety of classifiers for offensive language detection and suitable explanation methods. To train the classifiers, we use a dataset of toxic comments published by Google Jigsaw in the context of a Kaggle challenge. 2 The Python code for all classifiers, a web application to visualize the explanations, and the training and evaluation procedures are published online. 3", |
|
"cite_spans": [ |
|
{ |
|
"start": 258, |
|
"end": 259, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explanation Methods", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "There are four different classifiers that we implement and pair with different attribution-based explanation methods. First, there is a multinomial naive Bayes classifier, which serves as a baseline. It is interpretable by default and provides explanations in the form of conditional probabilities. Further, we implement a support vector machine (SVM) and a long short-term memory (LSTM) neural network. The input to the SVM is a TF-IDF vector representation of the unigrams in the comment text. GloVe word embeddings (Pennington et al., 2014) serve as the input to the neural network. Both the SVM and the LSTM network are paired with the two explanation methods LRP and LIME. To this end, we adapt the LRP implementation by Arras et al. 4 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 518, |
|
"end": 543, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 739, |
|
"end": 740, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classifiers", |
|
"sec_num": "3.1." |
|
}, |
|
{ |
|
"text": "The toxic comments dataset contains about 220,000 comments, each labeled with regard to six non-exclusive classes: toxic, severe toxic, insult, threat, obscene, and identity hate. Table 1 shows the class distribution in the training set and test set. Note that a comment is always labeled as toxic if one of the other labels applies. Even if none of the other labels apply, it can still be labeled as toxic.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 187, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3.2." |
|
}, |
|
{ |
|
"text": "The GloVe word embeddings are trained from scratch on the training and test set. We restrict the input length of the basic LSTM network and the LSTM network with an attention mechanism to a maximum of 250 words. Further, we use 50 LSTM units, which means the output of this layer is 50-dimensional. The training of the networks runs for five, respectively, three epochs with the Adam optimizer until the validation loss increases. The task on our dataset is a multi-label classification task. Our network architecture addresses this multi-label task by sharing the same LSTM layer across all class labels. However, for each label, an independent fully-connected layer follows after the output of the last LSTM unit. The attention mechanism is also trained for each label individually and fits in between the LSTM output and the following fullyconnected layer. SVM and naive Bayes use stemming to reduce the vocabulary size. They are trained according to a one-against-all scheme to conform to the multi-label classification task. The trained models therefore can be seen as six independent 5 https://github.com/marcotcr/lime binary naive Bayes classifiers, respectively, six independent binary SVMs. The SVM uses a linear kernel. There is only one hyperparameter to choose, which is the regularization term c. We set C = 0.6 and thereby relax the penalty for misclassifications.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Procedure", |
|
"sec_num": "3.3." |
|
}, |
|
{ |
|
"text": "To give an example of the explanations, Figure 1 and Figure 2 visualize the word relevance scores generated by the different explanation methods for two toxic comments. The conditional probabilities of the naive Bayes approach and the attention weights of the attention-based LSTM define positive word relevance scores between 0 and 1. In contrast to that, LIME and LRP define unbound relevance scores, which can also be negative. A negative word relevance score means that the respective word indicates the absence of a particular class rather than its presence. Because the attention weights are class-independent, these weights can only explain the predicted class. All other methods can also be used to explain a class that was not predicted by the classifier. This property can be used to analyze which words speak in favor of a not predicted class.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 48, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 53, |
|
"end": 59, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Heatmap Visualization", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "In Figure 1 , the naive Bayes classifier marks the words killed and fool as most relevant for the decision to classify this comment as toxic. Similarly, the SVM classifier with LRP and LIME mark these two words. In contrast to that, the word killed is less relevant for the LSTM classifiers (with and without attention). Only the naive Bayes and the SVM classifiers use stemming but not the LSTM classifiers. The stemming collapses killed to kill. Therefore, our naive Bayes and SVM classifiers cannot distinguish the active form of the verb from other words with the same stem. In this particular context, the non-stemmed word is not toxic. The stemming misleads the classifiers to wrongly explain the toxicity of the comment with this word. The attention mechanism highlights the words ignorant and fool. The word killed is marked as slightly relevant and all other words as irrelevant. This explanation aligns with an explanation a human would give. In general, we find that the attention mechanism gives meaningful explanations for toxic comments. For non-toxic comments, however, its explanations can be misleading. The attention mechanism distributes a relevance score of one among the wordseven if there is nothing toxic in the comment. To our surprise, the attention mechanism often marks punctuation as relevant in non-toxic comments. The basic LSTM approach marks only a few words as relevant, and most words have relevance close to zero. These sparse explanations are suitable for our dataset, as there is typically a small set of toxic words, which explains the toxicity of the entire comment. In Figure 1c to 1f, LIME and LRP assign negative relevance scores to the word Please. This negative relevance score means that this word speaks against the toxicity of the comment. The heatmaps in Figure 2 visualize the word relevance scores of another comment. Only the basic LSTM classifies this short comment correctly. It contains no swear words, but it is still offensive. The negatively connoted association of a person with an animal falls into the category of dehumanizing language. Without the full context, none of the single words explains the toxicity of the comment. Therefore, it is difficult to provide an attribution-based explanation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 1609, |
|
"end": 1618, |
|
"text": "Figure 1c", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 1803, |
|
"end": 1811, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Heatmap Visualization", |
|
"sec_num": "3.4." |
|
}, |
|
{ |
|
"text": "The following evaluation is three-fold. First, we compare the different classification approaches (naive Bayes, SVM, LSTM, and LSTM with attention mechanism) with regard to their classification performance on the toxic comments dataset. Second, we pair the approaches with attributionbased explanation methods and evaluate the generated explanations based on a word deletion task. The third part of the evaluation uses the explanatory power index (EPI) by Arras et al. (2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 456, |
|
"end": 475, |
|
"text": "Arras et al. (2017)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "To evaluate the classification performance of the different classifiers, we use a multi-label classification task on the toxic comments dataset. Due to the imbalanced class distribution of this dataset, we refrain from using accuracy as the evaluation metric and instead use precision, recall, and F1-score. Table 2 lists the results on the test set and shows that the naive Bayes baseline is weakest, followed by the SVM approach. The basic LSTM network and the LSTM network with attention mechanism overall achieve similar F1-score with larger differences in the less populated classes severe toxic, threat, and identity hate. For the following evaluation of explanation methods, we consider a binary classification task based on the toxic class label only. All classifiers achieve their best performance for this most frequent label.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 308, |
|
"end": 315, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classification Performance", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "We consider a word deletion task to evaluate whether explanation methods correctly identify which input words are most relevant for the classifier's output. It is based on an idea by Arras et al. (2017) . The task evaluates whether the words that the explanation points out to be relevant for the classification indeed have a strong influence on it. Each explanation method, therefore, needs to calculate a relevance score for each input word. The word with the highest relevance is deleted, and it is checked whether the model's classification result changes with the perturbed input. Given the set of true positives (toxic comments that are correctly identified as toxic), we use each explanation method to calculate word relevance scores for each comment. For each method, we then delete the most relevant words from each comment. If the word is indeed relevant for the classifier's decision, the classification most likely changes for the perturbed comment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 202, |
|
"text": "Arras et al. (2017)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Deletion Task", |
|
"sec_num": "4.2." |
|
}, |
|
{ |
|
"text": "Step-by-step, we delete more and more words with decreasing relevance scores. An explanation method is considered to provide good relevance scores if the classification changes for a large number of comments after deleting only a few words. Figure 3 shows how the accuracy quickly drops as more and more words are deleted. By deleting four words, more than 80% of the comments that were previously correctly classified as toxic (true positives) are classified as non-toxic. This result confirms that the classifiers detect those words that often constitute the toxicity of a comment (e.g., swear words). Further, Figure 3 suggests that SVMs provide better explanations than LSTMs. This suggestion is misleading and reveals one limitation of the experiment. Each method starts with its own set of true positives. Therefore each line in the plot corresponds not only to a different explanation method but also to a slightly different dataset. While the overlap of the sets is relatively large, the LSTM network's set of true positives is slightly larger (almost a superset). It also contains some of the more difficult samples of toxic comments, which are correctly classified by the LSTM but misclassified by the naive Bayes approach. One idea to get rid of this problem is to use the intersection of all sets of true positives. The resulting comments are unanimously correctly classified. However, when we further explored this idea, we found that this set is rather small and, more importantly, it contains only the most simple comments -the comments that all classifiers detect correctly as toxic. Still, for those comments that it classifies correctly, the SVM classifier definitely provides the best explanations according to the word deletion experiment. However, the true positives of the LSTM approach also contain comments whose toxicity can only be detected with context. A comment that contains a single swear word is easier to perturb to be classified as non-toxic than a comment that is toxic in its entirety.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 241, |
|
"end": 249, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 613, |
|
"end": 621, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Deletion Task", |
|
"sec_num": "4.2." |
|
}, |
|
{ |
|
"text": "Arras et al. 2017propose a three-step approach to quantify the explanatory power of a text classifier with their explanatory power index (EPI). We follow this approach and first calculate one document summary vector per comment in the test set based on each combination of a classifier and an explanation method. The document summary vector is either calculated as a weighted average of the comment's GloVe word embeddings or as the comment's weighted TF-IDF vector representation. We compare a variety of approaches for weighting the words based on word relevance scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explanatory Power Index", |
|
"sec_num": "4.3." |
|
}, |
|
{ |
|
"text": "In the second step, we perform a k-nearest neighbor (kNN) classification on these document summary vectors based on each classifier's predictions. This step is repeated ten times on different random splits of the data and with different values of k. The classification accuracy of the KNN classifier is averaged for each k over the ten runs. The EPI is defined as the maximum achieved classification accuracy. We limit the dataset to all toxic comments and a random sample of non-toxic comments of the same size. This downsampling reduces the data to a balanced set of 4, 300 comments and allows to properly use accuracy as the evaluation metric. Intuitively speaking, the EPI mirrors how good the document summary vectors capture the semantic similarity of documents of the same class by clustering them closer to each other in the high-dimensional vector space. Table 3 lists the EPI for the different classifiers paired with the respective explanation methods. The results show that weighting a document's bag-of-words vector representation with conditional probabilities from the naive Bayes baseline has the weakest explanatory power. Its performance is followed by the other two baselines: the SVM approach with TF-IDF weights and the basic LSTM approach with averaged GloVe vectors to obtain document summary vectors. The explanatory power of the basic LSTM classifier combined either with LIME or LRP is superior to all other methods. Although the LSTM with attention mechanism achieves slightly better classification results (F1-score of 76.4% vs. 74.4%), the attention weights are not as suited for explanations as word relevance scores generated with LIME or LRP for the basic LSTM network.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 864, |
|
"end": 871, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Explanatory Power Index", |
|
"sec_num": "4.3." |
|
}, |
|
{ |
|
"text": "LIME and LRP achieve similar results in our experiments. However, they strongly differ in their computational costs. The runtime to generate explanations with LIME is about 40 times higher than with LRP. This difference is because LRP needs only one backpropagation run to propagate the relevance scores from the output layer to the input (word) layer. In contrast to that, LIME requires perturbing a large set of samples. These samples need to come from the local neighborhood of the comment to be explained. Fore example, they need to have many words in common. The more samples are used, the more stable are the explanations. In the word deletion experiment, LIME has an unfair advantage over the other explainability methods due to the way it is trained. The perturbation in its training process is similar to the perturbation in the word deletion task. Therefore, LIME is tailored to this task. A downside of the attention mechanism is that it cannot provide class-specific word relevance scores. Strictly speaking, the attention weights -and thus also the derived relevance scores -do not refer to the word level. The weights instead refer to the hidden states in the sequence of LSTM units. The attention mechanism explains which states are most relevant for the network's final output. The activation of a hidden state is the result of processing a subsequence of the input word sequence -regardless of the actual classification output (toxic/non-toxic). The heatmap visualizations in Figure 1b and Figure 2b show that the attention mechanism distributes the relevance only among a few words, more precisely, hidden states. One reason for that is that a single hidden state actually captures information gained from a sequence of input words. A limitation of attribution-based explanations for offensive language detection seems to be a focus on words that are toxic regardless of the context. This limitation might render them inappropriate for the detection of implicit offensive language. The latter defines offensiveness that is not directly expressed but only arises from the context, uses irony or sarcasm, or can be inferred from metaphors, comparisons, or ascribed properties (Stru\u00df et al., 2019) . In the application scenario of content moderation on an online platform, a classifier that achieves slightly worse accuracy might be preferable if it provides explanations. The reason for this trade-off is not only the importance of transparency of the moderation process and acceptance by the user community. Explanations also facilitate the maintenance of a trained classification model. As the topics of online news articles and the corresponding user discussions change daily, adaptation is necessary -also adaptation of machine-learned models. For example, on one day, an offensive comment might be removed from the platform. However, on the next day, the same comment might be the legitimate center of the discussion because it is a quotation by a well-known politician. In industry applications in general, explanations can support software developers and maintainers to understand machine-learned models and the associated software better.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2192, |
|
"end": 2212, |
|
"text": "(Stru\u00df et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1493, |
|
"end": 1502, |
|
"text": "Figure 1b", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 1507, |
|
"end": 1516, |
|
"text": "Figure 2b", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "Besides the need for automated offensive language detection, there is also a need for understanding these automated decisions. To this end, we studied explanation methods and compared four different approaches to make offensive language detection explainable: an interpretable machine learning algorithm (naive Bayes), a model-agnostic expla-nation method (LIME), a model-based explanation method (LRP), and a self-explanatory model (LSTM network with an attention mechanism). In future work, we plan to generate explanations for users on online discussion platforms. The goal there is to make content moderation more comprehensible by using a finegrained classifier (insult, threat, profanity, etc.) together with highlighting the most relevant input words as explanations. We also envision either selecting pre-defined text blocks or generating text as explanations and plan to compare these approaches to the explanations that a human moderator would provide. Last but not least, we are working on a journal article as an extended version of this paper (Risch et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 1056, |
|
"end": 1076, |
|
"text": "(Risch et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "https://eur-lex.europa.eu/eli/reg/2016/679/oj", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.kaggle.com/c/ jigsawtoxic-comment-classification-challenge 3 https://hpi.de/naumann/projects/ repeatability/text-mining.html 4 https://github.com/ArrasL/LRP_for_LSTM/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "What is relevant in a text document?: An interpretable machine learning approach", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Arras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Horn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-R", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "PLOS ONE", |
|
"volume": "12", |
|
"issue": "8", |
|
"pages": "1--23", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arras, L., Horn, F., Montavon, G., M\u00fcller, K.-R., and Samek, W. (2017). What is relevant in a text doc- ument?: An interpretable machine learning approach. PLOS ONE, 12(8):1-23.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "On pixel-wise explanations for non-linear classifier decisions by layer-wise relevance propagation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Bach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Binder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Klauschen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-R", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "PLOS ONE", |
|
"volume": "10", |
|
"issue": "7", |
|
"pages": "1--46", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bach, S., Binder, A., Montavon, G., Klauschen, F., M\u00fcller, K.-R., and Samek, W. (2015). On pixel-wise explana- tions for non-linear classifier decisions by layer-wise rel- evance propagation. PLOS ONE, 10(7):1-46.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Extractive adversarial networks: High-recall explanations for identifying personal attacks in social media posts", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Carton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Mei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Resnick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3497--3507", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carton, S., Mei, Q., and Resnick, P. (2018). Extractive ad- versarial networks: High-recall explanations for identi- fying personal attacks in social media posts. In Proceed- ings of the Conference on Empirical Methods in Natu- ral Language Processing (EMNLP), pages 3497-3507. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Pay \"attention\" to your context when classifying abusive language", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Chakrabarty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Muresan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Workshop on Abusive Language Online (ALW@ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "70--79", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chakrabarty, T., Gupta, K., and Muresan, S. (2019). Pay \"attention\" to your context when classifying abusive lan- guage. In Proceedings of the Workshop on Abusive Lan- guage Online (ALW@ACL), pages 70-79. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Explainable artificial intelligence: A survey", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Do\u0161ilovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Br\u010di\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Hlupi\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "210--0215", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Do\u0161ilovi\u0107, F. K., Br\u010di\u0107, M., and Hlupi\u0107, N. (2018). Ex- plainable artificial intelligence: A survey. In Interna- tional Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO), pages 0210-0215. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Detecting online hate speech using context aware models", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "260--266", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gao, L. and Huang, R. (2017). Detecting online hate speech using context aware models. In Proceedings of the International Conference on Recent Advances in Nat- ural Language Processing (RANLP), pages 260-266. IN- COMA Ltd.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Learning to forget: Continual prediction with LSTM", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Gers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Cummins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Neural Computation", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2451--2471", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gers, F. A., Schmidhuber, J., and Cummins, F. (1999). Learning to forget: Continual prediction with LSTM. Neural Computation, 12:2451-2471.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Long shortterm memory", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural Computation", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hochreiter, S. and Schmidhuber, J. (1997). Long short- term memory. Neural Computation, 9:1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Attention is not Explanation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3543--3556", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jain, S. and Wallace, B. C. (2019). Attention is not Expla- nation. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics (NAACL), pages 3543-3556. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Learning how to explain neural networks: PatternNet and PatternAttribution", |
|
"authors": [ |
|
{ |
|
"first": "P.-J", |
|
"middle": [], |
|
"last": "Kindermans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Sch\u00fctt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Alber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-R", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Erhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "D\u00e4hne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the International Conference on Learning Representations (ICLR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kindermans, P.-J., Sch\u00fctt, K. T., Alber, M., M\u00fcller, K.-R., Erhan, D., Kim, B., and D\u00e4hne, S. (2018). Learning how to explain neural networks: PatternNet and PatternAttri- bution. In Proceedings of the International Conference on Learning Representations (ICLR), pages 1-16.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Aggression-annotated Corpus of Hindi", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Reganti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Bhatia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Maheshwari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kumar, R., Reganti, A. N., Bhatia, A., and Mahesh- wari, T. (2018). Aggression-annotated Corpus of Hindi-", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "English Code-mixed Data", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proceedings of the International Conference on Language Resources and Evaluation (LREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "English Code-mixed Data. In Proceedings of the Inter- national Conference on Language Resources and Evalu- ation (LREC). ELRA.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "AI, explain yourself", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Monroe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Communications of the ACM", |
|
"volume": "61", |
|
"issue": "11", |
|
"pages": "11--13", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Monroe, D. (2018). AI, explain yourself. Communications of the ACM, 61(11):11-13.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Explaining nonlinear classification decisions with deep taylor decomposition", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Montavon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Lapuschkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Binder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Samek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-R", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Pattern Recognition", |
|
"volume": "65", |
|
"issue": "", |
|
"pages": "211--222", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Montavon, G., Lapuschkin, S., Binder, A., Samek, W., and M\u00fcller, K.-R. (2017). Explaining nonlinear classifica- tion decisions with deep taylor decomposition. Pattern Recognition, 65:211-222.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Comparing automatic and human evaluation of local explanations for text classification", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1069--1078", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nguyen, D. (2018). Comparing automatic and human eval- uation of local explanations for text classification. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguis- tics (NAACL), pages 1069-1078. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pennington, J., Socher, R., and Manning, C. D. (2014). Glove: Global vectors for word representation. In Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "why should I trust you?\": Explaining the predictions of any classifier", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Guestrin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the International Conference on Knowledge Discovery and Data Mining (KDD)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1135--1144", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ribeiro, M. T., Singh, S., and Guestrin, C. (2016). \"why should I trust you?\": Explaining the predictions of any classifier. In Proceedings of the International Confer- ence on Knowledge Discovery and Data Mining (KDD), pages 1135-1144.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Delete or not delete? semi-automatic comment moderation for the newsroom", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Risch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Krestel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Workshop on Trolling, Aggression and Cyberbullying (TRAC@COLING)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "166--176", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Risch, J. and Krestel, R. (2018). Delete or not delete? semi-automatic comment moderation for the newsroom. In Proceedings of the Workshop on Trolling, Aggression and Cyberbullying (TRAC@COLING), pages 166-176.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Explaining offensive language detection", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Risch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ruff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Krestel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Journal for Language Technology and Computational Linguistics (JLCL)", |
|
"volume": "34", |
|
"issue": "1", |
|
"pages": "1--19", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Risch, J., Ruff, R., and Krestel, R. (2020). Explaining of- fensive language detection. Journal for Language Tech- nology and Computational Linguistics (JLCL), 34(1):1- 19.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Vedaldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Workshop Proceedings of the International Conference on Learning Representations (ICLR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simonyan, K., Vedaldi, A., and Zisserman, A. (2014). Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps. In Workshop Proceedings of the International Conference on Learn- ing Representations (ICLR), pages 1-8.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Striving for simplicity: The all convolutional net", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Springenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Dosovitskiy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Brox", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Riedmiller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Workshop Proceedings of the International Conference on Learning Representations (ICLR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Springenberg, J. T., Dosovitskiy, A., Brox, T., and Ried- miller, M. A. (2015). Striving for simplicity: The all convolutional net. In Workshop Proceedings of the International Conference on Learning Representations (ICLR), pages 1-14.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Overview of germeval task 2, 2019 shared task on the identification of offensive language", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Stru\u00df", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Siegel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ruppenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Wiegand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Klenner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stru\u00df, J. M., Siegel, M., Ruppenhofer, J., Wiegand, M., and Klenner, M. (2019). Overview of germeval task 2, 2019 shared task on the identification of offensive language. In Proceedings of the Conference on Natural Language Processing (KONVENS).", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Hateful symbols or hateful people? predictive features for hate speech detection on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the NAACL Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "88--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Waseem, Z. and Hovy, D. (2016). Hateful symbols or hate- ful people? predictive features for hate speech detection on twitter. In Proceedings of the NAACL Student Re- search Workshop, pages 88-93.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Attention is not not explanation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Wiegreffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Pinter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing and the International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wiegreffe, S. and Pinter, Y. (2019). Attention is not not ex- planation. In Proceedings of the Conference on Empiri- cal Methods in Natural Language Processing and the In- ternational Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 11-20. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Hierarchical attention networks for document classification", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Smola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1480--1489", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang, Z., Yang, D., Dyer, C., He, X., Smola, A., and Hovy, E. (2016). Hierarchical attention networks for document classification. In Proceedings of the Conference of the North American Chapter of the Association for Compu- tational Linguistics (NAACL), pages 1480-1489.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Visualizing and understanding convolutional networks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Zeiler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Fergus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "European Conference on Computer Vision (ECCV)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "818--833", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeiler, M. D. and Fergus, R. (2014). Visualizing and un- derstanding convolutional networks. In European Con- ference on Computer Vision (ECCV), pages 818-833. Springer.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"num": null, |
|
"text": "This heatmap visualizes positive (red) and negative (blue) word relevance scores generated by combinations of different classifiers and explanation methods.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"num": null, |
|
"text": "This heatmap visualizes positive (red) and negative (blue) word relevance scores generated by combinations of different classifiers and explanation methods.", |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"num": null, |
|
"text": "Correct classifications into the toxic class change to non-toxic if the most relevant input words are deleted. This result shows that the word relevance scores successfully mirror a word's influence on the classification result.", |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Absolute and relative frequency of the six class labels in the training dataset and test dataset. The class distribution is highly imbalanced.Ribeiro et al. 5 . To generate explanations for SVM and LSTM with the model-agnostic method LIME, we first sample perturbations of the input text by randomly deleting words. For each sample, we calculate the class probabilities with the SVM and the LSTM by applying a softmax function as the final calculation step. The default ridge regression algorithm is used to train an interpretable linear model. This model learns the word relevance scores bases on the classified samples. Last but not least, we implement an LSTM network with an attention mechanism, which is an example of a selfexplanatory model. It uses attention weights on the word level (not on the sentence level) and implements the architecture byYang et al. (2016).", |
|
"num": null, |
|
"content": "<table><tr><td>Class</td><td>Training Set</td><td>Test Set</td></tr><tr><td>Toxic</td><td colspan=\"2\">19,235 9.56% 2,149 9.61%</td></tr><tr><td>Severe Toxic</td><td>1,757 0.87%</td><td>205 0.92%</td></tr><tr><td>Obscene</td><td colspan=\"2\">10,922 5.43% 1,218 5.45%</td></tr><tr><td>Threat</td><td>617 0.31%</td><td>72 0.32%</td></tr><tr><td>Insult</td><td colspan=\"2\">10,178 5.06% 1,126 5.04%</td></tr><tr><td>Identity Hate</td><td>1,906 0.95%</td><td>211 0.94%</td></tr><tr><td colspan=\"2\">LIME implementation by</td><td/></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Precision (P), Recall (R) and F1-score of the classifiers on the toxic comments dataset (in percent). Bold font indicates best F1-score per class.", |
|
"num": null, |
|
"content": "<table><tr><td>Class</td><td>Metric</td><td>NB</td><td>SVM LSTM ATT</td></tr><tr><td/><td>P</td><td colspan=\"2\">69.87 83.22 81.66 84.54</td></tr><tr><td>Toxic</td><td>R</td><td colspan=\"2\">63.89 65.98 68.36 69.74</td></tr><tr><td/><td>F1</td><td colspan=\"2\">66.75 73.60 74.42 76.43</td></tr><tr><td>Severe Toxic</td><td>P R F1</td><td colspan=\"2\">14.45 52.11 56.96 58.33 92.20 18.05 21.95 07.69 24.98 26.81 31.69 13.59</td></tr><tr><td/><td>P</td><td colspan=\"2\">51.89 85.64 81.09 86.15</td></tr><tr><td>Obscene</td><td>R</td><td colspan=\"2\">75.70 67.57 71.84 67.13</td></tr><tr><td/><td>F1</td><td colspan=\"2\">61.57 75.54 76.19 75.46</td></tr><tr><td/><td>P</td><td colspan=\"2\">03.95 72.41 31.43 89.29</td></tr><tr><td>Threat</td><td>R</td><td colspan=\"2\">59.72 29.17 15.28 35.21</td></tr><tr><td/><td>F1</td><td colspan=\"2\">07.41 41.58 20.56 50.51</td></tr><tr><td/><td>P</td><td colspan=\"2\">48.41 78.43 72.67 77.64</td></tr><tr><td>Insult</td><td>R</td><td colspan=\"2\">75.75 57.82 69.18 59.56</td></tr><tr><td/><td>F1</td><td colspan=\"2\">59.07 66.56 70.88 67.40</td></tr><tr><td>Identity Hate</td><td>P R F1</td><td colspan=\"2\">11.72 64.47 55.36 65.77 73.46 23.22 29.38 49.75 20.21 34.15 38.39 56.64</td></tr></table>" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Explanatory Power Index (EPI) for classifiers and explanation methods. Hyperparameter k denotes the number of nearest neighbors that maximizes the EPI.", |
|
"num": null, |
|
"content": "<table><tr><td>Classifier</td><td>Explanation Method</td><td>EPI</td><td>k</td></tr><tr><td colspan=\"3\">Naive Bayes Conditional Probability 82.29</td><td>3</td></tr><tr><td/><td>TF-IDF</td><td colspan=\"2\">87.59 25</td></tr><tr><td>SVM</td><td>LRP</td><td colspan=\"2\">93.38 19</td></tr><tr><td/><td>LIME</td><td colspan=\"2\">93.14 19</td></tr><tr><td/><td>GloVe</td><td colspan=\"2\">84.74 15</td></tr><tr><td>LSTM</td><td>LRP</td><td>99.67</td><td>3</td></tr><tr><td/><td>LIME</td><td>99.48</td><td>9</td></tr><tr><td colspan=\"2\">ATT LSTM Attention Mechanism</td><td colspan=\"2\">92.04 11</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |