|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:48:17.229406Z" |
|
}, |
|
"title": "BEEP! Korean Corpus of Online News Comments for Toxic Speech Detection", |
|
"authors": [ |
|
{ |
|
"first": "Jihyung", |
|
"middle": [], |
|
"last": "Moon", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Seoul National University", |
|
"location": { |
|
"addrLine": "INMC 2", |
|
"settlement": "Seoul" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Won", |
|
"middle": [ |
|
"Ik" |
|
], |
|
"last": "Cho", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Seoul National University", |
|
"location": { |
|
"addrLine": "INMC 2", |
|
"settlement": "Seoul" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Junbum", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Seoul National University", |
|
"location": { |
|
"addrLine": "INMC 2", |
|
"settlement": "Seoul" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Toxic comments in online platforms are an unavoidable social issue under the cloak of anonymity. Hate speech detection has been actively done for languages such as English, German, or Italian, where manually labeled corpus has been released. In this work, we first present 9.4K manually labeled entertainment news comments for identifying Korean toxic speech, collected from a widely used online news platform in Korea. The comments are annotated regarding social bias and hate speech since both aspects are correlated. The inter-annotator agreement Krippendorff's alpha score is 0.492 and 0.496, respectively. We provide benchmarks using CharCNN, BiL-STM, and BERT, where BERT achieves the highest score on all tasks. The models generally display better performance on bias identification, since the hate speech detection is a more subjective issue. Additionally, when BERT is trained with bias label for hate speech detection, the prediction score increases, implying that bias and hate are intertwined. We make our dataset publicly available and open competitions with the corpus and benchmarks.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Toxic comments in online platforms are an unavoidable social issue under the cloak of anonymity. Hate speech detection has been actively done for languages such as English, German, or Italian, where manually labeled corpus has been released. In this work, we first present 9.4K manually labeled entertainment news comments for identifying Korean toxic speech, collected from a widely used online news platform in Korea. The comments are annotated regarding social bias and hate speech since both aspects are correlated. The inter-annotator agreement Krippendorff's alpha score is 0.492 and 0.496, respectively. We provide benchmarks using CharCNN, BiL-STM, and BERT, where BERT achieves the highest score on all tasks. The models generally display better performance on bias identification, since the hate speech detection is a more subjective issue. Additionally, when BERT is trained with bias label for hate speech detection, the prediction score increases, implying that bias and hate are intertwined. We make our dataset publicly available and open competitions with the corpus and benchmarks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Online anonymity provides freedom of speech to many people and lets them speak their opinions in public. However, anonymous speech also has a negative impact on society and individuals (Banks, 2010) . With anonymity safeguards, individuals easily express hatred against others based on their superficial characteristics such as gender, sexual orientation, and age (ElSherief et al., 2018) . Sometimes the hostility leaks to the well-known people who are considered to be the representatives of targeted attributes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 198, |
|
"text": "(Banks, 2010)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 364, |
|
"end": 388, |
|
"text": "(ElSherief et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recently, Korea had suffered a series of tragic incidents of two young celebrities that are presumed to be caused by toxic comments (Fortin, 2019; Mc-Curry, 2019a,b) . Since the incidents, two major web portals in Korea decided to close the comment system in their entertainment news aggregating service (Yeo, 2019; Yim, 2020) . Even though the toxic comments are now avoidable in those platforms, the fundamental problem has not been solved yet.", |
|
"cite_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 146, |
|
"text": "(Fortin, 2019;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 165, |
|
"text": "Mc-Curry, 2019a,b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 304, |
|
"end": 315, |
|
"text": "(Yeo, 2019;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 326, |
|
"text": "Yim, 2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To cope with the social issue, we propose the first Korean corpus annotated for toxic speech detection. Specifically, our dataset consists of 9.4K comments from Korean online entertainment news articles. Each comment is annotated on two aspects, the existence of social bias and hate speech, given that hate speech is closely related to bias (Boeckmann and Turpin-Petrosino, 2002; Waseem and Hovy, 2016; Davidson et al., 2017) . Considering the context of Korean entertainment news where public figures encounter stereotypes mostly intertwined with gender, we weigh more on the prevalent bias. For hate speech, our label categorization refers that of Davidson et al. (2017) , namely hate, offensive, and none.", |
|
"cite_spans": [ |
|
{ |
|
"start": 342, |
|
"end": 380, |
|
"text": "(Boeckmann and Turpin-Petrosino, 2002;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 381, |
|
"end": 403, |
|
"text": "Waseem and Hovy, 2016;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 404, |
|
"end": 426, |
|
"text": "Davidson et al., 2017)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 651, |
|
"end": 673, |
|
"text": "Davidson et al. (2017)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The main contributions of this work are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We release the first Korean corpus manually annotated on two major toxic attributes, namely bias and hate 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We hold Kaggle competitions 234 and provide benchmarks to boost further research development.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We observe that in our study, hate speech detection benefits the additional bias context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The construction of hate speech corpus has been explored for a limited number of languages, such as English (Waseem and Hovy, 2016; Davidson et al., 2017; Zampieri et al., 2019; Basile et al., 2019) , Spanish (Basile et al., 2019) , Polish (Ptaszynski et al., 2019) , Portuguese (Fortuna et al., 2019) , and Italian (Sanguinetti et al., 2018) . For Korean, works on abusive language have mainly focused on the qualitative discussion of the terminology (Hong, 2016), whereas reliable and manual annotation of the corpus has not yet been undertaken. Though profanity termbases are currently available 56 , term matching approach frequently makes false predictions (e.g., neologism, polysemy, use-mention distinction), and more importantly, not all hate speech are detectable using such terms (Zhang et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 131, |
|
"text": "(Waseem and Hovy, 2016;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 154, |
|
"text": "Davidson et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 177, |
|
"text": "Zampieri et al., 2019;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 198, |
|
"text": "Basile et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 230, |
|
"text": "(Basile et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 265, |
|
"text": "(Ptaszynski et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 279, |
|
"end": 301, |
|
"text": "(Fortuna et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 342, |
|
"text": "(Sanguinetti et al., 2018)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 790, |
|
"end": 810, |
|
"text": "(Zhang et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In addition, hate speech is situated within the context of social bias (Boeckmann and Turpin-Petrosino, 2002) . Waseem and Hovy (2016) and Davidson et al. (2017) attended to bias in terms of hate speech, however, their interest was mainly in texts that explicitly exhibit sexist or racist terms. In this paper, we consider both explicit and implicit stereotypes, and scrutinize how these are related to hate speech.", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 109, |
|
"text": "(Boeckmann and Turpin-Petrosino, 2002)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 112, |
|
"end": 134, |
|
"text": "Waseem and Hovy (2016)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 161, |
|
"text": "Davidson et al. (2017)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We constructed the Korean hate speech corpus using the comments from a popular domestic entertainment news aggregation platform. Users had been able to leave comments on each article before the recent overhaul (Yim, 2020) , and we had scrapped the comments from the most-viewed articles.", |
|
"cite_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 221, |
|
"text": "(Yim, 2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In total, we retrieved 10,403,368 comments from 23,700 articles published from January 1, 2018 to February 29, 2020. We draw 1,580 articles using stratified sampling and extract the top 20 comments ranked in the order of Wilson score (Wilson, 1927) on the downvote for each article. Then, we remove duplicate comments, single token comments (to eliminate ambiguous ones), and comments composed with more than 100 characters (that could convey various opinions). Finally, 10K comments are randomly selected among the rest for annotation. We prepared other 2M comments by gathering the top 100 sorted with the same score for all articles and removed with any overlaps regarding the above 10K comments. This additional corpus is distributed without labels, expected to be useful for pre-training language models on Korean online text.", |
|
"cite_spans": [ |
|
{ |
|
"start": 234, |
|
"end": 248, |
|
"text": "(Wilson, 1927)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collection", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The annotation was performed by 32 annotators consisting of 29 workers from a crowdsourcing platform DeepNatural AI 7 and three natural language processing (NLP) researchers. Every comment was provided to three random annotators to assign the majority decision. Annotators are asked to answer two three-choice questions for each comment:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "1. What kind of bias does the comment contain?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Gender bias, Other biases, or None 2. Which is the adequate category for the comment in terms of hate speech?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Hate, Offensive, or None", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "They are allowed to skip comments which are too ambiguous to decide. Detailed instructions are described in Appendix A. Note that this is the first guideline of social bias and hate speech on Korean online comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Since hate speech is situated within the context of social bias (Boeckmann and Turpin-Petrosino, 2002), we first identify the bias implicated in the comment. Social bias is defined as a preconceived evaluation or prejudice towards a person/group with certain social characteristics: gender, political affiliation, religion, beauty, age, disability, race, or others. Although our main interest is on gender bias, other issues are not to be underestimated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Social Bias", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Thus, we separate bias labels into three: whether the given text contains gender-related bias, other biases, or none of them. Additionally, we introduce a binary version of the corpus, which counts only the gender bias, that is prevalent among the entertainment news comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Social Bias", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The inter-annotator agreement (IAA) of the label is calculated based on Krippendorff's alpha (Krippendorff, 2011) that takes into account an arbitrary number of annotators labeling any number of instances. IAA for the ternary classes is 0.492, which means that the agreement is moderate. For the binary case, we obtained 0.767, which implies that the identification of gender and sexuality-related bias reaches quite a substantial agreement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Social Bias", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Hate speech is difficult to be identified, especially for the comments which are context-sensitive. Since annotators are not given additional information, labeling would be diversified due to the difference in pragmatic intuition and background knowledge thereof. To collect reliable hate speech annotation, we attempt to establish a precise and clear guideline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hate Speech", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We consider three categories for hate speech: hate, offensive but not hate, and none. As socially agreed definition lacks for Korean 8 , we refer to the hate speech policies of Youtube; Facebook; Twitter. Drawing upon those, we define hate speech in our study as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hate Speech", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 If a comment explicitly expresses hatred against individual/group based on any of the following attributes: sex, gender, sexual orientation, gender identity, age, appearance, social status, religious affiliation, military service, disease or disability, ethnicity, and national origin \u2022 If a comment severely insults or attacks individual/group; this includes sexual harassment, humiliation, and derogation However, note that not all the rude or aggressive comments necessarily belong to the above definition, as argued in Davidson et al. (2017) . We often see comments that are offensive to certain individuals/groups in a qualitatively different manner. We identify these as offensive and set the boundary as follows: \u2022 If a comment conveys sarcasm via rhetorical expression or irony \u2022 If a comment states an opinion in an unethical, rude, coarse, or uncivilized manner \u2022 If a comment implicitly attacks individual/group while leaving rooms to be considered as freedom of speech", |
|
"cite_spans": [ |
|
{ |
|
"start": 525, |
|
"end": 547, |
|
"text": "Davidson et al. (2017)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hate Speech", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The instances that do not meet the boundaries above were categorized as none. The IAA on the hate categories is \u03b1 = 0.496, which implies a moderate agreement.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hate Speech", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Release From the 10k manually annotated corpus, we discard 659 instances that are either skipped or failed to reach an agreement. We split the final dataset into the train (7,896), validation (471), and test set (974) and released it on the Kaggle platform to leverage the leaderboard system. For a fair competition, labels on the test set are not disclosed. Titles of source articles for each comment are also provided, to help participants exploit context information. Table 1 depicts how the classes are composed of. The bias category distribution in our corpus is skewed towards none, while that of hate category is quite balanced. We also confirm that the existence of hate speech is correlated with the existence of social bias. In other words, when a comment incorporates a social bias, it is likely to contain hate or offensive speech.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 471, |
|
"end": 478, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "6 Benchmark Experiment", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Class distribution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We implemented three baseline classifiers: character-level convolutional neural network (CharCNN) (Zhang et al., 2015) , bidirectional long short-term memory (BiLSTM) (Schuster and Paliwal, 1997) , and bidirectional encoder representations from Transformer (BERT) (Devlin et al., 2018) based model. For BERT, we adopt KoBERT 9 , a pre-trained module for the Korean language, and apply its tokenizer to BiLSTM as well. The detailed configurations are provided in Appendix B, and we additionally report the term matching approach using the aforementioned profanity terms to compare with the benchmarks. Table 2 depicts F1 score of the three baselines and the term matching model. The results demonstrate that the models trained on our corpus have an advantage over the term matching method. Compared with the benchmarks, BERT achieves the best performance for all the three tasks: binary and ternary bias identification tasks, and hate speech detection. Each model not only shows different performances but also presents different characteristics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 118, |
|
"text": "(Zhang et al., 2015)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 167, |
|
"end": 195, |
|
"text": "(Schuster and Paliwal, 1997)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 285, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 601, |
|
"end": 608, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Bias detection When it comes to the gender-bias detection, the task benefits more on CharCNN than BiLSTM since the bias label is highly correlated with frequent gender terms (e.g., he, she, man, woman, ...) in the dataset. It is known that Char- CNN well captures the lexical components that are present in the document.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "However, owing to that nature, CharCNN sometimes yields results that are overly influenced by the specific terms which cause false predictions. For example, the model fails to detect bias in \"What a long life for a GAY\" but guesses \"I think she is the prettiest among all the celebs\" to contain bias. CharCNN overlooks GAY while giving a wrong clue due to the existence of female pronouns, namely she in the latter.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Similar to the binary prediction task, CharCNN outperforms BiLSTM on ternary classification. Table 3 demonstrates that BiLSTM hardly identifies gender and other biases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "BERT detects both biases better than the other models. From the highest score obtained by BERT, we found that rich linguistic knowledge and semantic information is helpful for bias recognition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We also observed that all the three models barely perform well on others (Table 3) . To make up a system that covers the broad definition of other bias, it would be better to predict the label as the non-gender bias. For instance, it can be performed as a two-step prediction: the first step to distinguish whether the comment is biased or not and the second step to determine whether the biased comment is gender-related or not.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 82, |
|
"text": "(Table 3)", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Hate speech detection For hate speech detection, all models faced performance degradation compared to the bias classification task, since the task is more challenging. Nonetheless, BERT is still the most successful, and we conjecture that hate speech detection also utilizes high-level semantic features. The significant performance gap between term matching and BERT explains how much our approach compensates for the false predictions mentioned in Section 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Provided bias label prepend to each comment as a special token, BERT exhibits better performance. As illustrated in Figure 2 , additional bias context helps the model to distinguish offensive and none clearly. This implies our observation on the correlation between bias and hate is empirically supported.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 124, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In this data paper, we provide an annotated corpus that can be practically used for analysis and modeling on Korean toxic language, including hate speech and social bias. In specific, we construct a corpus of a total of 9.4K comments from online entertainment news service.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Our dataset has been made publicly accessible with baseline models. We launch Kaggle competitions using the corpus, which may facilitate the studies on toxic speech and ameliorate the cyberbullying issues. We hope our initial efforts can be supportive not only to NLP for social good, but also as a useful resource for discerning implicit bias and hate in online languages. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Note that each model's configuration is the same for all tasks except for the last layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Model Configuration", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For character-level CNN, no specific tokenization was utilized. The sequence of Hangul characters was fed into the model at a maximum length of 150. The total number of characters was 1,685, including '[UNK]' and '[PAD]' token, and the embedding size was set to 300. 10 kernels were used, each with the size of [3, 4, 5] . At the final pooling layer, we used a fully connected network (FCN) of size 1,140, with a 0.5 dropout rate (Srivastava et al., 2014) . The training was done for 6 epochs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 314, |
|
"text": "[3,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 315, |
|
"end": 317, |
|
"text": "4,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 318, |
|
"end": 320, |
|
"text": "5]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 430, |
|
"end": 455, |
|
"text": "(Srivastava et al., 2014)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.1 CharCNN", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For bidirectional LSTM, we had a vocab size of 4,322, with a maximum length of 256. We used BERT SentencePiece tokenizer (Kudo and Richardson, 2018) . The width of the hidden layers was 512 (=256 \u00d7 2), with four stacked layers. The dropout rate was set to 0.3. An FCN of size 1,024 was appended to the BiLSTM output to yield the final softmax layer. We trained the model for 15 epochs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 148, |
|
"text": "(Kudo and Richardson, 2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.2 BiLSTM", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For BERT, a built-in SentencePiece tokenizer of KoBERT was adopted, which was also used for BiLSTM. We set a maximum length at 256 and ran the model for 10 epochs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B.3 BERT", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/kocohub/korean-hate-speech 2 www.kaggle.com/c/korean-gender-bias-detection 3 www.kaggle.com/c/korean-bias-detection 4 www.kaggle.com/c/korean-hate-speech-detection", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/doublems/korean-bad-words 6 https://github.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://app.deepnatural.ai/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Though a government report is available for the Korean language (Hong, 2016), we could not reach a fine extension to the quantitative study on online spaces.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Frequently observable in Korea, where the military service is mandatory for males.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We greatly thank Hyunjoong Kim for providing financial support and Sangwoong Yoon for giving helpful comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The first property is to note which social bias is implicated in the comment. Here, social bias means hasty guess or prejudice that 'a person/group with a certain social identity will display a certain characteristic or act in a biased way'. The three labels of the question are as follows.1. Is there a gender-related bias, either explicit or implicit, in the text?\u2022 If the text includes bias for gender role, sexual orientation, sexual identity, and any thoughts on gender-related acts (e.g., \"Wife must be obedient to her husband's words\", or \"Homosexual person will be prone to disease.\") 2. Are there any other kinds of bias in the text?\u2022 Other kinds of factors that are considered not gender-related but social bias, including race, background, nationality, ethnic group, political stance, skin color, religion, handicaps, age, appearance, richness, occupations, the absence of military service experience 10 , etc. 3. A comment that does not incorporate the bias", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.1 Existence of social bias", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The second property is how aggressive the comment is. Since the level of \"aggressiveness\" depends on the linguistic intuition of annotators, we set the following categorization to draw a borderline as precise as possible.1. Is strong hate or insulting towards the article's target or related figures, writers of the article or comments, etc. displayed in a comment?\u2022 In the case of insulting, it encompasses an expression that can severely harm the social status of the recipient. \u2022 In the case of hate, it is defined as an expression that displays aggressive stances towards individuals/groups with certain characteristics (gender role, sexual orientation, sexual identity, any thoughts on gender-related acts, race, background, nationality, ethnic group, political stance, skin color, religion, handicaps, age, appearance, richness, occupations, the absence of military service experience, etc.). \u2022 Additionally, it can include sexual harassment, notification of offensive rumors or facts, and coined terms for bad purposes or in bad use, etc. \u2022 Just an existence of bad words in the document does not always fall into this category.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A.2 Amount of hate, insulting, or offense", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Regulating hate speech online", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Banks", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "ternational Review of Law", |
|
"volume": "24", |
|
"issue": "", |
|
"pages": "233--239", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Banks. 2010. Regulating hate speech online. In- ternational Review of Law, Computers & Technol- ogy, 24(3):233-239.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Semeval-2019 task 5: Multilingual detection of hate speech against immigrants and women in twitter", |
|
"authors": [ |
|
{ |
|
"first": "Valerio", |
|
"middle": [], |
|
"last": "Basile", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cristina", |
|
"middle": [], |
|
"last": "Bosco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elisabetta", |
|
"middle": [], |
|
"last": "Fersini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Debora", |
|
"middle": [], |
|
"last": "Nozza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Viviana", |
|
"middle": [], |
|
"last": "Patti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco Manuel Rangel", |
|
"middle": [], |
|
"last": "Pardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manuela", |
|
"middle": [], |
|
"last": "Sanguinetti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "54--63", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Valerio Basile, Cristina Bosco, Elisabetta Fersini, Deb- ora Nozza, Viviana Patti, Francisco Manuel Rangel Pardo, Paolo Rosso, and Manuela Sanguinetti. 2019. Semeval-2019 task 5: Multilingual detection of hate speech against immigrants and women in twitter. In Proceedings of the 13th International Workshop on Semantic Evaluation, pages 54-63.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Understanding the harm of hate crime", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Robert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolyn", |
|
"middle": [], |
|
"last": "Boeckmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turpin-Petrosino", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Journal of social issues", |
|
"volume": "58", |
|
"issue": "2", |
|
"pages": "207--225", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert J Boeckmann and Carolyn Turpin-Petrosino. 2002. Understanding the harm of hate crime. Jour- nal of social issues, 58(2):207-225.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Automated hate speech detection and the problem of offensive language", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Davidson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dana", |
|
"middle": [], |
|
"last": "Warmsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Macy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingmar", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Eleventh international aaai conference on web and social media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Davidson, Dana Warmsley, Michael Macy, and Ingmar Weber. 2017. Automated hate speech de- tection and the problem of offensive language. In Eleventh international aaai conference on web and social media.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Hate lingo: A target-based linguistic analysis of hate speech in social media", |
|
"authors": [ |
|
{ |
|
"first": "Mai", |
|
"middle": [], |
|
"last": "Elsherief", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Kulkarni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dana", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"Yang" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Belding", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Twelfth International AAAI Conference on Web and Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mai ElSherief, Vivek Kulkarni, Dana Nguyen, William Yang Wang, and Elizabeth Belding. 2018. Hate lingo: A target-based linguistic analysis of hate speech in social media. In Twelfth International AAAI Conference on Web and Social Media.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Facebook's policy on hate speech", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Facebook", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2020--2024", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Facebook. Facebook's policy on hate speech. https: //www.facebook.com/communitystandards/ hate_speech. Accessed: 2020-04-19.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Sulli, south korean k-pop star and actress, is found dead", |
|
"authors": [ |
|
{ |
|
"first": "Jacey", |
|
"middle": [], |
|
"last": "Fortin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacey Fortin. 2019. Sulli, south korean k-pop star and actress, is found dead. New York Times.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A hierarchically-labeled portuguese hate speech dataset", |
|
"authors": [ |
|
{ |
|
"first": "Paula", |
|
"middle": [], |
|
"last": "Fortuna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jo\u00e3o", |
|
"middle": [], |
|
"last": "Rocha Da", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [], |
|
"last": "Silva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S\u00e9rgio", |
|
"middle": [], |
|
"last": "Wanner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nunes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Third Workshop on Abusive Language Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "94--104", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paula Fortuna, Jo\u00e3o Rocha da Silva, Leo Wanner, S\u00e9rgio Nunes, et al. 2019. A hierarchically-labeled portuguese hate speech dataset. In Proceedings of the Third Workshop on Abusive Language Online, pages 94-104.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Hate speech: Survey and Regulations. National Human Rights Commission of the Republic of Korea", |
|
"authors": [ |
|
{ |
|
"first": "Seong", |
|
"middle": [], |
|
"last": "Soo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seong Soo Hong. 2016. Hate speech: Survey and Regu- lations. National Human Rights Commission of the Republic of Korea.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Computing krippendorff's alpha-reliability", |
|
"authors": [ |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Krippendorff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Klaus Krippendorff. 2011. Computing krippendorff's alpha-reliability.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
|
"authors": [ |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1808.06226" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taku Kudo and John Richardson. 2018. Sentencepiece: A simple and language independent subword tok- enizer and detokenizer for neural text processing. arXiv preprint arXiv:1808.06226.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "K-pop singer goo hara found dead aged 28. The Guardian", |
|
"authors": [ |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Mccurry", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Justin McCurry. 2019a. K-pop singer goo hara found dead aged 28. The Guardian.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "K-pop under scrutiny over 'toxic fandom' after death of sulli. The Guardian", |
|
"authors": [ |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Mccurry", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Justin McCurry. 2019b. K-pop under scrutiny over 'toxic fandom' after death of sulli. The Guardian.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Results of the poleval 2019 shared task 6: First dataset and open shared task for automatic cyberbullying detection in polish twitter", |
|
"authors": [ |
|
{ |
|
"first": "Michal", |
|
"middle": [], |
|
"last": "Ptaszynski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Agata", |
|
"middle": [], |
|
"last": "Pieciukiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pawe\u0142", |
|
"middle": [], |
|
"last": "Dyba\u0142a", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michal Ptaszynski, Agata Pieciukiewicz, and Pawe\u0142 Dyba\u0142a. 2019. Results of the poleval 2019 shared task 6: First dataset and open shared task for auto- matic cyberbullying detection in polish twitter. Pro- ceedings ofthePolEval2019Workshop, page 89.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "An italian twitter corpus of hate speech against immigrants", |
|
"authors": [ |
|
{ |
|
"first": "Manuela", |
|
"middle": [], |
|
"last": "Sanguinetti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Poletto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cristina", |
|
"middle": [], |
|
"last": "Bosco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Viviana", |
|
"middle": [], |
|
"last": "Patti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Stranisci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manuela Sanguinetti, Fabio Poletto, Cristina Bosco, Vi- viana Patti, and Marco Stranisci. 2018. An italian twitter corpus of hate speech against immigrants. In Proceedings of the Eleventh International Confer- ence on Language Resources and Evaluation (LREC 2018).", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Bidirectional recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kuldip", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Paliwal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "IEEE Transactions on Signal Processing", |
|
"volume": "45", |
|
"issue": "11", |
|
"pages": "2673--2681", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Schuster and Kuldip K Paliwal. 1997. Bidirec- tional recurrent neural networks. IEEE Transactions on Signal Processing, 45(11):2673-2681.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "15", |
|
"issue": "", |
|
"pages": "1929--1958", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15(1):1929-1958.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Twitter's policy on hate speech", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Twitter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2020--2024", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Twitter. Twitter's policy on hate speech. https://help.twitter. com/en/rules-and-policies/ hateful-conduct-policy. Accessed: 2020-04-", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Hateful symbols or hateful people? predictive features for hate speech detection on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Zeerak", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the NAACL student research workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "88--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeerak Waseem and Dirk Hovy. 2016. Hateful symbols or hateful people? predictive features for hate speech detection on twitter. In Proceedings of the NAACL student research workshop, pages 88-93.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Probable inference, the law of succession, and statistical inference", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Edwin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wilson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1927, |
|
"venue": "Journal of the American Statistical Association", |
|
"volume": "22", |
|
"issue": "158", |
|
"pages": "209--212", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edwin B Wilson. 1927. Probable inference, the law of succession, and statistical inference. Journal of the American Statistical Association, 22(158):209-212.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Kakao suspends online comments for entertainment articles after sulli's death. The Korea Herald", |
|
"authors": [ |
|
{ |
|
"first": "Junsuk", |
|
"middle": [], |
|
"last": "Yeo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junsuk Yeo. 2019. Kakao suspends online comments for entertainment articles after sulli's death. The Ko- rea Herald.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Why naver is finally shutting down comments on celebrity news", |
|
"authors": [ |
|
{ |
|
"first": "Hyunsu", |
|
"middle": [], |
|
"last": "Yim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hyunsu Yim. 2020. Why naver is finally shutting down comments on celebrity news. The Korea Herald.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Youtube's policy on hate speech", |
|
"authors": [ |
|
{ |
|
"first": "Youtube", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Youtube. Youtube's policy on hate speech.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Semeval-2019 task 6: Identifying and categorizing offensive language in social media (offenseval)", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noura", |
|
"middle": [], |
|
"last": "Farra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ritesh", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "75--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Shervin Malmasi, Preslav Nakov, Sara Rosenthal, Noura Farra, and Ritesh Kumar. 2019. Semeval-2019 task 6: Identifying and catego- rizing offensive language in social media (offense- val). In Proceedings of the 13th International Work- shop on Semantic Evaluation, pages 75-86.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Character-level convolutional networks for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "649--657", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text clas- sification. In Advances in neural information pro- cessing systems, pages 649-657.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Detecting hate speech on twitter using a convolution-gru based deep neural network", |
|
"authors": [ |
|
{ |
|
"first": "Ziqi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Robinson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Tepper", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "European semantic web conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "745--760", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ziqi Zhang, David Robinson, and Jonathan Tepper. 2018. Detecting hate speech on twitter using a convolution-gru based deep neural network. In Eu- ropean semantic web conference, pages 745-760. Springer.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "A sample comment from the online news platform. It is composed of six parts: written date and time, masked user id, content, the number of replies, and the number of up/down votes (from top left to bottom right)." |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Confusion matrix on the model inference of hate categories." |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "" |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Distribution of the annotated corpus." |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "F1 score of benchmarks on the test set. Note that the term matching model checks the presence of hate or offensiveness. Therefore, in this case, we combine hate and offensive into a single category, turning the original ternary task into binary." |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "" |
|
}, |
|
"TABREF6": { |
|
"content": "<table><tr><td>\u2022 It can emit sarcasm through rhetorical</td></tr><tr><td>questions or irony.</td></tr><tr><td>\u2022 It may encompass an unethical expres-</td></tr><tr><td>sion (e.g., jokes or irrelevant questions</td></tr><tr><td>regarding the figures who passed away).</td></tr><tr><td>\u2022 A comment conveying unidentified ru-</td></tr><tr><td>mors can belong to this category.</td></tr><tr><td>3. A comment that does not incorporate any ha-</td></tr><tr><td>tred or insulting</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "2. Although a comment is not as much hateful or insulting as the above, does it make the target or the reader feel offended?\u2022 It may contain rude or aggressive contents, such as bad words, though not to the extent of hate or insult." |
|
} |
|
} |
|
} |
|
} |