|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:10:57.696183Z" |
|
}, |
|
"title": "Detecting Inappropriate Messages on Sensitive Topics that Could Harm a Company's Reputation *", |
|
"authors": [ |
|
{ |
|
"first": "Nikolay", |
|
"middle": [], |
|
"last": "Babakov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Skolkovo Institute of Science and Technology", |
|
"location": { |
|
"settlement": "Moscow", |
|
"country": "Russia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Varvara", |
|
"middle": [], |
|
"last": "Logacheva", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Skolkovo Institute of Science and Technology", |
|
"location": { |
|
"settlement": "Moscow", |
|
"country": "Russia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Kozlova", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Mobile TeleSystems (MTS)", |
|
"location": { |
|
"settlement": "Moscow", |
|
"country": "Russia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Semenov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Mobile TeleSystems (MTS)", |
|
"location": { |
|
"settlement": "Moscow", |
|
"country": "Russia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Panchenko", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Skolkovo Institute of Science and Technology", |
|
"location": { |
|
"settlement": "Moscow", |
|
"country": "Russia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Not all topics are equally \"flammable\" in terms of toxicity: a calm discussion of turtles or fishing less often fuels inappropriate toxic dialogues than a discussion of politics or sexual minorities. We define a set of sensitive topics that can yield inappropriate and toxic messages and describe the methodology of collecting and labeling a dataset for appropriateness. While toxicity in user-generated data is wellstudied, we aim at defining a more fine-grained notion of inappropriateness. The core of inappropriateness is that it can harm the reputation of a speaker. This is different from toxicity in two respects: (i) inappropriateness is topicrelated, and (ii) inappropriate message is not toxic but still unacceptable. We collect and release two datasets for Russian: a topic-labeled dataset and an appropriateness-labeled dataset. We also release pre-trained classification models trained on this data. * Warning: the paper contains textual data samples which can be considered offensive or inappropriate.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Not all topics are equally \"flammable\" in terms of toxicity: a calm discussion of turtles or fishing less often fuels inappropriate toxic dialogues than a discussion of politics or sexual minorities. We define a set of sensitive topics that can yield inappropriate and toxic messages and describe the methodology of collecting and labeling a dataset for appropriateness. While toxicity in user-generated data is wellstudied, we aim at defining a more fine-grained notion of inappropriateness. The core of inappropriateness is that it can harm the reputation of a speaker. This is different from toxicity in two respects: (i) inappropriateness is topicrelated, and (ii) inappropriate message is not toxic but still unacceptable. We collect and release two datasets for Russian: a topic-labeled dataset and an appropriateness-labeled dataset. We also release pre-trained classification models trained on this data. * Warning: the paper contains textual data samples which can be considered offensive or inappropriate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The classification and prevention of toxicity (malicious behaviour) among users is an important problem for many Internet platforms. Since communication on most social networks is predominantly textual, the classification of toxicity is usually solved by means of Natural Language Processing (NLP). This problem is even more important for developers of chatbots trained on a large number of usergenerated (and potentially toxic) texts. There is a well-known case of Microsoft Tay chatbot 1 which was shut down because it started producing racist, sexist, and other offensive tweets after having been fine-tuned on user data for a day.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, there exists a similar and equally important problem, which is nevertheless overlooked by the research community. This is a problem of texts which are not offensive as such but can express inappropriate views. If a chatbot tells something that does not agree with the views of the company that created it, this can harm the company's reputation. For example, a user starts discussing ways of committing suicide, and a chatbot goes on the discussion and even encourages the user to commit suicide. The same also applies to a wide range of sensitive topics, such as politics, religion, nationality, drugs, gambling, etc. Ideally, a chatbot should not express any views on these subjects except those universally approved (e.g. that drugs are not good for your health). On the other hand, merely avoiding a conversation on any of those topics can be a bad strategy. An example of such unfortunate avoidance that caused even more reputation loss was also demonstrated by Microsoft, this time by its chatbot Zo, a Tay successor. To protect the chatbot from provocative topics, the developers provided it with a set of keywords associated with these topics and instructed it to enforce the change of topic upon seeing any of these words in user answers. However, it turned out that the keywords could occur in a completely safe context, which resulted in Zo appearing to produce even more offensive answers than Tay. 2 Therefore, simple methods cannot eliminate such errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Thus, our goal is to make a system that can predict if an answer of a chatbot is inappropriate in any way. This includes toxicity, but also any answers which can express undesirable views and approve or prompt user towards harmful or illegal actions. To the best of our knowledge, this problem has not been considered before. We formalize it and present a dataset labeled for the presence of such inappropriate content.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Even though we aim at a quite specific taskdetection of inappropriate statements in the output of a chatbot to prevent the reputational harm of a company, in principle, the datasets could be used in other use-cases e.g. for flagging inappropriate frustrating discussion in social media.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "It is also important to discuss the ethical aspect of this work. While it can be considered as another step towards censorship on the Internet, we suggest that it has many use-cases which serve the common good and do not limit free speech. Such applications are parental control or sustaining of respectful tone in conversations online, inter alia. We would like to emphasize that our definition of sensitive topics does not imply that any conversation concerning them need to be banned. Sensitive topics are just topics that should be considered with extra care and tend to often flame/catalyze toxicity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The contributions of our work are three-fold:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We define the notions of sensitive topics and inappropriate utterances and formulate the task of their classification. \u2022 We collect and release two datasets for Russian: a dataset of user texts labeled for sensitive topics and a dataset labeled for inappropriateness. \u2022 We train and release models which define a topic of a text and define its appropriateness.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We open the access to the produced datasets, code, and pre-trained models for the research use. 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There exist a large number of English textual corpora labeled for the presence or absence of toxicity; some resources indicate the degree of toxicity and its topic. However, the definition of the term \"toxicity\" itself is not agreed among the research community, so each research deals with different texts. Some works refer to any unwanted behaviour as toxicity and do not make any further separation (Pavlopoulos et al., 2017) . However, the majority of researchers use more fine-grained labeling. The Wikipedia Toxic comment datasets by Jigsaw (Jigsaw, 2018 (Jigsaw, , 2019 (Jigsaw, , 2020 are the largest English toxicity datasets available to date operate with multiple types of toxicity (toxic, obscene, threat, insult, identity hate, etc). Toxicity differs across multiple axes. Some works concentrate solely on major offence (hate speech) , others research more subtle assaults (Breitfeller et al., 2019) . Offenses can be directed towards an individual, a group, or undirected (Zampieri et al., 2019) , explicit or implicit (Waseem et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 402, |
|
"end": 428, |
|
"text": "(Pavlopoulos et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 547, |
|
"end": 560, |
|
"text": "(Jigsaw, 2018", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 561, |
|
"end": 576, |
|
"text": "(Jigsaw, , 2019", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 577, |
|
"end": 592, |
|
"text": "(Jigsaw, , 2020", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 886, |
|
"end": 912, |
|
"text": "(Breitfeller et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 986, |
|
"end": 1009, |
|
"text": "(Zampieri et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1033, |
|
"end": 1054, |
|
"text": "(Waseem et al., 2017)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Insults do not necessarily have a topic, but there certainly exist toxic topics, such as sexism, racism, xenophobia. Waseem and Hovy (2016) tackle sexism and racism, Basile et al. (2019) collect texts which contain sexism and aggression towards immigrants. Besides directly classifying toxic messages for a topic, the notion of the topic in toxicity is also indirectly used to collect the data: Zampieri et al. (2019) pre-select messages for toxicity labeling based on their topic. Similarly, Hessel and Lee (2019) use topics to find controversial (potentially toxic) discussions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 139, |
|
"text": "Waseem and Hovy (2016)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 166, |
|
"end": 186, |
|
"text": "Basile et al. (2019)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 395, |
|
"end": 417, |
|
"text": "Zampieri et al. (2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Such a topic-based view of toxicity causes unintended bias in toxicity detection -a false association of toxicity with a particular topic (LGBT, Islam, feminism, etc.) (Dixon et al., 2018; Vaidya et al., 2020) . This is in line with our work since we also acknowledge that there exist acceptable and unacceptable messages within toxicity-provoking topics. The existing work suggests algorithmic ways for debiasing the trained models: Xia et al. (2020) train their model to detect two objectives: toxicity and presence of the toxicity-provoking topic, Zhang et al. (2020) perform re-weighing of instances, Park et al. (2018) create pseudo-data to level off the balance of examples. Unlike our research, these works often deal with one topic and use topic-specific methods.", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 188, |
|
"text": "(Dixon et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 209, |
|
"text": "Vaidya et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 551, |
|
"end": 570, |
|
"text": "Zhang et al. (2020)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The main drawback of topic-based toxicity detection in the existing research is the ad-hoc choice of topics: the authors select a small number of popular topics manually or based on the topics which emerge in the data often, as Ousidhoum et al. (2019) . Banko et al. (2020) suggest a taxonomy of harmful online behaviour. It contains toxic topics, but they are mixed with other parameters of toxicity (e.g. direction or severity). The work by Salminen et al. (2020) is the only example of an extensive list of toxicity-provoking topics. This is similar to sensitive topics we deal with. However, our definition is broader -sensitive topics are not only topics that attract toxicity, but they can also create unwanted dialogues of multiple types (e.g. incitement to law violation or to cause harm to oneself or others).", |
|
"cite_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 251, |
|
"text": "Ousidhoum et al. (2019)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 254, |
|
"end": 273, |
|
"text": "Banko et al. (2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 443, |
|
"end": 465, |
|
"text": "Salminen et al. (2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Consider the following conversation with an unmoderated chatbot: This discussion is related to the topics \"politics\" and \"racism\" and can indeed cause reputation damage to a developer. In some countries, such as France, it is a criminal offense to deny the Armenian Genocide during World War I. 4 Note, however, that no offensive or toxic words were employed. Detection of such content is thus desirable.", |
|
"cite_spans": [ |
|
{ |
|
"start": 295, |
|
"end": 296, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inappropriateness and Sensitive Topics", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The notion of inappropriateness of a text in our setting is tightly related to this text's topic. This is different from the notion of toxicity which does not have to be topic-dependent. Toxic texts are undoubtedly inappropriate in the sense that they should not appear in a respectful conversation. Still, they have been actively researched, so we do not consider them in our work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inappropriateness and Sensitive Topics", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We define sensitive topic as a topic which has a high chance of yielding a discussion which can harm the speaker's reputation. This suggests that there are no universally sensitive topics; their safety depends on the context and the goals of a conversation. The context may include the level of formality, the rules of a company that created the chatbot, the laws of the country where it operates. It is also important to emphasize that a message should not necessarily be banned for touching a sensitive topic. Instead, we introduce the notion of appropriateness acceptable statements on a sensitive topic.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Definitions", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We define inappropriate message as a message on a sensitive topic which can frustrate the reader and/or harm the reputation of the speaker. This definition is hard to formalize, so we rely on the intuitive understanding of appropriateness which is characteristic of human beings and is shared by people belonging to the same culture. Namely, we ask people if a given statement of a chatbot can harm the reputation of the company which developed it. We thus use human judgments as a main measure of appropriateness.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Definitions", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We manually select the set of sensitive topics which often fuel inappropriate statements. This set is heterogeneous: it includes topics related to dangerous or harmful practices (such as drugs or suicide), some of which are legally banned in most countries (e.g. terrorism, slavery) or topics that tend to provoke aggressive argument (e.g. politics) and may be associated with inequality and controversy (e.g. minorities) and thus require special system policies aimed at reducing conversational bias, such as response postprocessing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "List of Sensitive Topics", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "This set of topics is based on the suggestions and requirements provided by legal and PR departments of a large Russian telecommunication company. It could, for instance, be used to moderate a corporate dialogue system or flag inappropriate content for children, therefore mitigating possible operational damages. While this list is by no mean comprehensive, we nevertheless believe it could be useful in practical applications and as a starting point for work in this direction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "List of Sensitive Topics", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The list of the sensitive topics is as follows: ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "List of Sensitive Topics", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 gambling; \u2022 pornography, description", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "List of Sensitive Topics", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Our final goal is to label the data with inappropriateness, and the sensitive topics are not a goal per se, but mainly a way to define inappropriateness. Therefore, we use sensitive topics as a way of data pre-selection. Analogously to toxicity, inappropriateness does not often occur in randomly picked texts, so if we label all the messages we retrieve, the percentage of inappropriate utterances among them will be low. Thus, our labeling process includes three stages: (i) we collect the dataset of sentences on sensitive topics, (ii) we build a classifier of sensitive topics on this dataset, (iii) we collect the texts on sensitive topics using the classifier and then label them as appropriate or inappropriate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topic Labeling", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We retrieve the initial pool of texts from general sources with diverse topics, then filter them and hire crowd workers to label them for the presence of sensitive topics manually. We use the data from the following sources:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Selection", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 2ch.hk -a platform for communication in Russian similar to Reddit. The site is not moderated, suggesting a large amount of toxicity and controversy; this makes it a practical resource for our purposes. We retrieve 4.7 million sentences from it. \u2022 Otvet.Mail.ru -a question-answering platform that contains questions and answers of various categories and is also not moderated. We take 12 million sentences from it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Selection", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To pre-select the data for topic labeling, we manually create large sets of keywords for each sensi-tive topic. We first select a small set of words associated with a topic and then extract semantically close words using pre-trained word embeddings from RusVect\u014dr\u0113s 5 and further extend the keyword list (this can be done multiple times). In addition to that, for some topics we use existing lists of associated slang on topical websites, e.g. drugs 6 and weapons. 7 User-generated content which we collect for labeling is noisy and can contain personal information (e.g. usernames, email addresses, or even phone numbers), so it needs cleaning. At the same time, some non-textual information such as emojis is valuable and should be kept intact, so the cleaning should not be too rigorous. Thus, we remove links to any websites, usernames, long numbers, and other special characters such as HTML tags.", |
|
"cite_spans": [ |
|
{ |
|
"start": 465, |
|
"end": 466, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Selection", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The labeling is performed in a crowdsourcing platform Yandex.Toloka. 8 It was preferred to other analogous platforms like Amazon Mechanical Turk because the majority of its workers are Russian native speakers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Labeling", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The task of topic labeling is naturally repre-sented as a multiple-choice task with the possibility to select more than one answer: the worker is shown the text and possible topics and is asked to choose one or more of them. However, as far as we define 18 sensitive topics, choosing from such a long list of options is difficult. Therefore, we divide the topics into three clusters:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Labeling", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Cluster 1: gambling, pornography, prostitution, slavery, suicide, social injustice, \u2022 Cluster 2: religion, terrorism, weapons, offline crime, online crime, politics, \u2022 Cluster 3: body shaming, health shaming, drugs, racism, sex minorities, sexism.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Labeling", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Cluster 1 is associated with undesirable behavior; cluster 2 deals with crimes, military actions, and their causes; cluster 3 is about the offense. However, this division is not strict and was performed to ease the labeling process. Checking a text for one of six topics is a realistic task while selecting from 18 topics is too high a cognitive load.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Labeling", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Each cluster has a separate project in Yandex.Toloka. Every candidate text is passed to all three projects: we label each of them for all 18 topics. An example of a task interface is shown in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 200, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Labeling", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Before labeling the examples, we ask users to perform training. It consists of 20 questions with pre-defined answers. To be admitted to labeling, a worker has to complete the training with at least 65% correct answers. In addition to that, we perform extra training during labeling. One of each ten questions given to a worker has a pre-defined correct answer. If a worker makes a mistake in this question, she is shown the correct answer with an explanation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Labeling", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Likewise, we perform quality control using questions with pre-defined answers: one of ten questions given to the worker is used to control her performance. If the worker gives incorrect answers to more than 25% of control questions, she is banned from further labeling, and her latest answers are discarded. For the topic labeling task, the average performance of workers on control and training tasks was between 65 and 70%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Labeling", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In addition to that, we control the speed of task accomplishment. If a user answers ten questions (one page of questions) in less than 20 seconds, this almost certainly indicates that she has not read the examples and selected random answers. Such workers are banned. To ensure the diversity of answers we allow one user to do at most 50 pages of tasks (500 tasks) per 12 hours.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Labeling", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Each sample is labeled in each project by 3 to 5 workers. We use dynamic overlap technique implemented in Toloka. An example is first labeled by the minimum number of workers. If they agree, their answer is considered truth. Otherwise, the example is given for extra labeling to more workers to clarify the true label. This allows separating the occasional user mistakes from inherently ambiguous examples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Labeling", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We aggregate multiple answers into one score using the aggregation method by Dawid and Skene (1979) . This is an iterative method that maximizes the probability of labeling taking into account the worker agreement, i.e. it trusts more the workers who agree with other workers often. The result of this algorithm is the score from 0 to 1 for each labeled example which is interpreted as the label confidence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 99, |
|
"text": "Dawid and Skene (1979)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Labeling", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Besides the aggregation purposes, we use the confidence score as a measure of worker agreement. 9 Since the low score of an example is the sign of either the ambiguity of this example or the low reliability of annotators who labeled it, we assume that the high confidence indicates that the task is interpreted by all workers in a similar way and does not contain inherent contradictions. The average confidence of labeling in our topic dataset is 0.995.", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 97, |
|
"text": "9", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourced Labeling", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "While collecting manual topic labels, we faced some problems. First, some topics require special knowledge to be labeled correctly. For example, users tend to label any samples about programming or computer hardware as \"online crime\", even if there is no discussion of any crime. Likewise, some swear words, e.g. \"whore\", can be used as a general offense and not refer to a prostitute. However, this is not always clear to crowd workers or even to the authors of this research. This can make some sensitive topics unreasonably dependent on such kinds of keywords. 9 We cannot use Cohen's or Fleiss kappa which are usually employed to measure the inter-annotator agreement because these scores are inapplicable in the crowdsourcing scenario. While Fleiss kappa implies that we have a relatively small number of annotators (usually up to 5) each of whom labels a large percentage of examples, in the crowdsourcing setting we have a much larger number of workers, each labeling only a small number of sentences. Figure 1 : Example of topic labeling task. Translation: upper line -text for labeling: \"Nude -is it a new name for prostitution?\", middle line -task: \"Which topics does the text touch? (You can select more than one)\", possible answers: \"Gambling, pornography, prostitution, slavery, suicide, social inequality, nothing of the above\". Secondly, it is necessary to keep the balance of samples on different topics. If there are no samples related to the topics presented to the worker within numerous tasks she can overthink and try to find the topic in unreasonably fine details of texts. For example, if we provide three or four consecutive sets of texts about weapons and topic \"weapons\" is not among the proposed topics, the worker will tend to attribute these samples to other remotely similar topics, e.g. \"crime\", even though the samples do not refer to crime.", |
|
"cite_spans": [ |
|
{ |
|
"start": 564, |
|
"end": 565, |
|
"text": "9", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1009, |
|
"end": 1017, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Crowdsourcing Issues", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We should also point out that a different set of topics or labeling setup could yield other problems. It is difficult to foresee them and to find the best solutions for them. Therefore, we also test two alternative approaches to topic labeling which do not use crowd workers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Crowdsourcing Issues", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "After having collected almost 10,000 texts on sensitive topics, we were able to train a classifier that predicts the occurrence of a sensitive topic in the text. Although this classifier is not good enough to be used for real-world tasks, we suggest that samples classified as belonging to a sensitive topic with high confidence (more than 0.75 in our experiments) can be considered belonging to this topic. We perform an extra manual check by an expert (one of the authors) to eliminate mistakes. This method is also laborious, but it is an easier labeling scenario than the crowdsourcing task described in Section 4.2. Approving or rejecting a text as an entity of a single class is easier than classify it into one of six topics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automated Labeling", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "An alternative way of automated topic label-ing is to take the data from specialized sources and select topic-attributed messages using a list of keywords inherent for a topic, i.e. words which definitely indicate the presence of a topic. This approach can give many false positives when applied to general texts because many keywords can have an idiomatic meaning not related to a sensitive topic. One such example can be the word \"addiction\" which can be used in entirely safe contexts, e.g. a phrase \"I'm addicted to chocolate\" should not be classified as belonging to the topic \"drugs\". However, when occurring in a specialized forum on addictions, 10 this word almost certainly indicates this topic. We define a list of inherent keywords and select messages containing them from special resources related to a particular topic. We then manually check the collected samples. The disadvantage of this approach is that we cannot handle multilabel samples. However, according to dataset statistics, only 15% of samples had more than one label. Given the limited time and budget, we decided to use this approach to further extend the dataset. The resulting sensitive topics dataset in the form we opensource it is the combination of all three approaches. Specific, nearly 11,000 samples were labeled in a fully manual manner either via crowdsourcing or by members of our team, the rest samples (nearly 14,500) were labeled via the described semi-automatic approaches", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automated Labeling", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We should again emphasize that not every utterance concerning a sensitive topic should be banned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appropriateness Labeling", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "While the topic of a text can be sensitive, the text itself can nevertheless be appropriate. Thus, we collect the texts on sensitive topics and then label them as appropriate or inappropriate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appropriateness Labeling", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Our initial plan was to define the appropriate and inappropriate subtopics for each topic. However, determining the appropriateness criteria explicitly turned out to be infeasible. Therefore, we rely on the inherent human intuition of appropriateness. We provide annotators with the following context: a chatbot created by a company produces a given phrase. We ask to indicate if this phrase can harm the reputation of the company. We also reinforce the annotators' understanding of appropriateness with the training examples. As in the topic labeling setup, here we ask the workers to complete the training before labeling the data. We also fine-tune their understanding of appropriateness with extra training during labeling.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appropriateness Labeling", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Analogously to topic labeling, the appropriateness labeling is performed via Yandex.Toloka crowdsourcing platform. An example of the task interface is given in Figure 2 . Our crowdsourcing setup repeats the one we used in the topic labeling project. We perform training and quality control analogously to topic labeling. Although the appropriateness is not explicitly defined, the workers demonstrate a good understanding of it. Their average performance on the training and control tasks is around 75-80%, which indicates high agreement. The average labeling confidence computed via the Dawid-Skene method is 0.956.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 168, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Appropriateness Labeling", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The primary sources of the samples passed to appropriateness labeling are the same as in the topic labeling setup (2ch.hk and Otvet.Mail.ru websites). Before handing texts to workers, we filter them as described in Section 4.1 and also perform extra filtering. We filter out all messages containing obscene language and explicit toxicity. We identify toxicity with a BERT-based classifier for toxicity detection. We fine-tune ruBERT model (Kuratov and Arkhipov, 2019) on a concatenation of two Russian Language Toxic Comments datasets released on Kaggle (Kaggle, 2019 (Kaggle, , 2020 .We filter out sentences which were classified as toxic with the confidence greater than 0.75. As mentioned above, toxicity is beyond the scope of our work, because it has been researched before. Therefore, we make sure that messages which can be automatically recognized as toxic are not included in this dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 554, |
|
"end": 567, |
|
"text": "(Kaggle, 2019", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 568, |
|
"end": 583, |
|
"text": "(Kaggle, , 2020", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appropriateness Labeling", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Inappropriate messages in our formulation concern one of the sensitive topics. Therefore, we pre-select data for labeling by automatically classifying them with sensitive topics. We select the data for labeling in the following proportion:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appropriateness Labeling", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 1/3 of samples which belong to one or more sensitive topic with high confidence (> 0.75), \u2022 1/3 of samples classified as sensitive with medium confidence (0.3 > c < 0.75). This is necessary in case if multilabel classifier or crowd workers captured uncertain details of sensitive topics, \u2022 1/3 random samples -these are used to make the selection robust to classifier errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appropriateness Labeling", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The further labeling process is performed analogously to topic labeling. We use the same training and quality control procedures and define the number of workers per example dynamically.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appropriateness Labeling", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To get the final answer, we use the same Dawid-Skene aggregation method. It aggregates the labels given by workers (0 and 1, which state for \"appropriate\" and \"inappropriate\") into a single score from 0 to 1. We interpret this score as the appropriateness level, where the score in the interval [0, 0.2] indicates appropriate sentences, the score in [0.8, 1] means that the sentence is inappropriate, and other scores indicate ambiguous examples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appropriateness Labeling", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We collected two datasets: (i) the dataset of sensitive topics and (ii) the appropriateness dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets Statistics", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The dataset of sensitive topics consists of 25,679 unique samples. 9,946 samples were labeled with a crowdsourcing platform, nearly 1,500 samples were labeled by our team and the rest samples were collected by using keywords from specialized sources. The average confidence of the crowdsourcing annotation is 0.995; the average number of annotations per example is 4.3; the average time to label one example is 10.8 seconds.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets Statistics", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The appropriateness dataset consists of 82,063 unique samples. 8,687 of these samples also belong to the sensitive topics dataset and thus have manually assigned topic labels. The other 73,376 samples have topic labels defined automatically using a BERT-based topic classification model (described in Section 7). The average confidence of the annotation is 0.956; the average number of annotations per example is 3.5; the average time to label one example is 7 seconds. Table 2 shows the number of samples on each sensitive topic in both datasets. While we tried to keep the topic distribution in the topic dataset balanced, some topics (drugs, politics, health shaming) get considerably more samples in the appropriateness dataset. This might be related to the fact that the classifier performance for these topics was good, so utterances classified with these topics with high confidence emerged often.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 470, |
|
"end": 477, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets Statistics", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "One sample can relate to more than one topic. Our analysis showed 15% of such examples in the data (see Figure 3) . The co-occurrence of topics is not random. It indicates the intersection of multiple topics. The most common co-occurrences are \"politics, racism, social injustice\", \"prostitution, pornography\", \"sex minorities, pornography\". In contrast, 13% of samples in the topic dataset do not touch any sensitive topic. These are examples that were pre-selected for manual topic labeling using keywords and then were labeled as not related to the topics of interest. They were added to the dataset so that the classifier trained on this data does not rely solely on keywords.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 113, |
|
"text": "Figure 3)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets Statistics", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The samples in the datasets are mostly single sentences; their average length is 15 words for the appropriateness dataset and 18 words for the topic dataset. The sample length for different topics ranges from 14 to 21 words. We noticed a strong correlation (Spearman's r of 0.72) between the number of samples of a particular topic in the data and the average number of words per sample for this topic. We cannot define if this is a spurious correlation or topics that feature longer sentences tend to be better represented in the data. Longer sentences might be easier to annotate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets Statistics", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We confirm the usefulness of the collected data by training classification models on both datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We fine-tune pre-trained ruBERT model (BERT trained on Russian texts (Kuratov and Arkhipov, 2019)) on our data. We use the implementation of BERT-classifier from deeppavlov 11 library with pre-trained Conversational RuBERT weights. 12", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We build the topic classifier on 85% of the sensitive topics dataset and use the rest as a test. The proportions of instances of different topics in the training and test subsets are the same.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topic Classifier", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "We measure the classifier performance with F 1score. The macro-average F 1 -score is 0.78. We trained five classifiers with different train-test splits. It turned out that the classifier is unstable, which has already been reported for BERT-based models (Mosbach et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 276, |
|
"text": "(Mosbach et al., 2020)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Topic Classifier", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "The F 1 -scores for individual topics are shown in Figure 4 . The score is above 0.8 for 8 out of 18 classes. We noticed that the classifier performance for individual classes is correlated with the number of samples of these classes in the data (Spearman's r of 0.73 -strong correlation). This suggests that the performance could be improved by retrieving more samples of underrepresented classes. However, for some topics (e.g. politics) the score is low despite the fact that they have enough representation in the data. This can indicate the complexity and heterogeneity of a topic. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 59, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Topic Classifier", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "Analogously to the topic classifier, we train the appropriateness classifier on 85% of the 11 http://docs.deeppavlov.ai/en/master/ _modules/deeppavlov/models/bert/bert_ classifier.html#BertClassifierModel", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appropriateness Classifier", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "12 http://files.deeppavlov.ai/ deeppavlov_data/bert/ru_conversational_ cased_L-12_H-768_A-12.tar.gz ROC-AUC 0, 87 \u00b1 0, 01 Precision 0, 83 \u00b1 0, 01 Recall 0, 84 \u00b1 0, 01 F 1 -score 0, 83 \u00b1 0, 01 appropriateness-labeled messages and use the rest for testing. We use the same ruBERT-based model. In our data, appropriateness is represented as a number between 0 and 1 where 0 means inappropriate and 1 appropriate. Our initial experiments showed that using samples with low (in)appropriateness confidence for training results in poor results. Therefore, we drop all samples with confidence between 0.2 and 0.8. This results in a decrease of the dataset size to 74,376. Thus, our appropriateness classifier is trained on 63,000 samples. Its performance is outlined in Table 3 . The scores are quite high, and the results of training with ten splits are quite stable.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 762, |
|
"end": 769, |
|
"text": "Table 3", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Appropriateness Classifier", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "We introduce the task of detecting inappropriate utterances -utterances that can cause frustration or harm the reputation of a speaker in any way. We define the notion of a sensitive topic tightly related to the notion of appropriateness. We collect two datasets for the Russian language using a large-scale crowdsourcing study. One is labeled with sensitive topics and another with binary appropriateness labeling. We show that while being fine-grained notions, both inappropriateness and sensitivity of the topic can be detected automatically using neural models. Baseline models trained on the new datasets are presented and released.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "A promising direction of future work is improving the performance of the presented baselines, e.g. by using the topic and appropriateness labeling jointly, switching to other model architectures, or ensembling multiple models. Another prominent direction of future work is to transfer the notion of appropriateness to other languages by fine-tuning cross-lingual models on the collected datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "https://www.engadget.com/2017-07-04microsofts-zo-chatbot-picked-up-someoffensive-habits.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/skoltech-nlp/ inappropriate-sensitive-topics", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.bbc.com/news/worldeurope-16677986", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://rusvectores.org/ru/ associates/ 6 http://www.kantuev.ru/slovar 7 https://guns.allzip.org/topic/15/ 626011.html 8 https://toloka.yandex.ru", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://nenormaforum.info", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We are grateful to four anonymous reviewers for their helpful suggestions. This work was conducted under the framework of the joint Skoltech-MTS laboratory.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A unified taxonomy of harmful content", |
|
"authors": [ |
|
{ |
|
"first": "Michele", |
|
"middle": [], |
|
"last": "Banko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brendon", |
|
"middle": [], |
|
"last": "Mackeen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurie", |
|
"middle": [], |
|
"last": "Ray", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Fourth Workshop on Online Abuse and Harms", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "125--137", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.alw-1.16" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michele Banko, Brendon MacKeen, and Laurie Ray. 2020. A unified taxonomy of harmful content. In Proceedings of the Fourth Workshop on Online Abuse and Harms, pages 125-137, Online. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "SemEval-2019 task 5: Multilingual detection of hate speech against immigrants and women in twitter", |
|
"authors": [ |
|
{ |
|
"first": "Valerio", |
|
"middle": [], |
|
"last": "Basile", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cristina", |
|
"middle": [], |
|
"last": "Bosco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elisabetta", |
|
"middle": [], |
|
"last": "Fersini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Debora", |
|
"middle": [], |
|
"last": "Nozza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Viviana", |
|
"middle": [], |
|
"last": "Patti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco Manuel Rangel", |
|
"middle": [], |
|
"last": "Pardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manuela", |
|
"middle": [], |
|
"last": "Sanguinetti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "54--63", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/S19-2007" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Valerio Basile, Cristina Bosco, Elisabetta Fersini, Debora Nozza, Viviana Patti, Francisco Manuel Rangel Pardo, Paolo Rosso, and Manuela San- guinetti. 2019. SemEval-2019 task 5: Multilin- gual detection of hate speech against immigrants and women in twitter. In Proceedings of the 13th Inter- national Workshop on Semantic Evaluation, pages 54-63, Minneapolis, Minnesota, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Finding microaggressions in the wild: A case for locating elusive phenomena in social media posts", |
|
"authors": [ |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Breitfeller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Ahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Jurgens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1664--1674", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1176" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luke Breitfeller, Emily Ahn, David Jurgens, and Yu- lia Tsvetkov. 2019. Finding microaggressions in the wild: A case for locating elusive phenomena in so- cial media posts. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 1664-1674, Hong Kong, China. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Automated hate speech detection and the problem of offensive language", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Davidson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dana", |
|
"middle": [], |
|
"last": "Warmsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Macy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingmar", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Davidson, Dana Warmsley, Michael Macy, and Ingmar Weber. 2017. Automated hate speech detection and the problem of offensive language.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Maximum likelihood estimation of observer error-rates using the em algorithm", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Dawid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Skene", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1979, |
|
"venue": "Journal of The Royal Statistical Society Series C-applied Statistics", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "20--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. P. Dawid and A. Skene. 1979. Maximum likelihood estimation of observer error-rates using the em algo- rithm. Journal of The Royal Statistical Society Se- ries C-applied Statistics, 28:20-28.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Measuring and mitigating unintended bias in text classification", |
|
"authors": [ |
|
{ |
|
"first": "Lucas", |
|
"middle": [], |
|
"last": "Dixon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Sorensen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nithum", |
|
"middle": [], |
|
"last": "Thain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Vasserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, AIES '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "67--73", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3278721.3278729" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucas Dixon, John Li, Jeffrey Sorensen, Nithum Thain, and Lucy Vasserman. 2018. Measuring and mitigat- ing unintended bias in text classification. In Pro- ceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, AIES '18, page 67-73, New York, NY, USA. Association for Computing Machin- ery.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Something's brewing! early prediction of controversy-causing posts from discussion features", |
|
"authors": [ |
|
{ |
|
"first": "Jack", |
|
"middle": [], |
|
"last": "Hessel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1648--1659", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1166" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jack Hessel and Lillian Lee. 2019. Something's brew- ing! early prediction of controversy-causing posts from discussion features. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long and Short Papers), pages 1648-1659, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Toxic comment classification challenge", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jigsaw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jigsaw. 2018. Toxic comment classification chal- lenge. https://www.kaggle.com/c/jigsaw- toxic-comment-classification-challenge.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Jigsaw unintended bias in toxicity classification", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jigsaw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jigsaw. 2019. Jigsaw unintended bias in toxicity classification.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Jigsaw multilingual toxic comment classification", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2021--2024", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jigsaw. 2020. Jigsaw multilingual toxic comment classification. https://www.kaggle.com/ c/jigsaw-multilingual-toxic-comment- classification. Accessed: 2021-03-01.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Russian language toxic comments", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kaggle", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2021--2024", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaggle. 2019. Russian language toxic com- ments. https://www.kaggle.com/blackmoon/ russian-language-toxic-comments. Ac- cessed: 2021-03-01.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Toxic russian comments", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kaggle", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2021--2024", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaggle. 2020. Toxic russian comments. https: //www.kaggle.com/alexandersemiletov/ toxic-russian-comments. Accessed: 2021-03- 01.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Adaptation of deep bidirectional multilingual transformers for russian language", |
|
"authors": [ |
|
{ |
|
"first": "Yuri", |
|
"middle": [], |
|
"last": "Kuratov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mikhail", |
|
"middle": [], |
|
"last": "Arkhipov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuri Kuratov and Mikhail Arkhipov. 2019. Adaptation of deep bidirectional multilingual transformers for russian language.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "On the stability of fine-tuning bert: Misconceptions, explanations, and strong baselines", |
|
"authors": [ |
|
{ |
|
"first": "Marius", |
|
"middle": [], |
|
"last": "Mosbach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maksym", |
|
"middle": [], |
|
"last": "Andriushchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dietrich", |
|
"middle": [], |
|
"last": "Klakow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marius Mosbach, Maksym Andriushchenko, and Diet- rich Klakow. 2020. On the stability of fine-tuning bert: Misconceptions, explanations, and strong base- lines.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Multilingual and multi-aspect hate speech analysis", |
|
"authors": [ |
|
{ |
|
"first": "Nedjma", |
|
"middle": [], |
|
"last": "Ousidhoum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zizheng", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongming", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yangqiu", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dit-Yan", |
|
"middle": [], |
|
"last": "Yeung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4675--4684", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1474" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nedjma Ousidhoum, Zizheng Lin, Hongming Zhang, Yangqiu Song, and Dit-Yan Yeung. 2019. Multi- lingual and multi-aspect hate speech analysis. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 4675- 4684, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Reducing gender bias in abusive language detection", |
|
"authors": [ |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Ho Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamin", |
|
"middle": [], |
|
"last": "Shin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascale", |
|
"middle": [], |
|
"last": "Fung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2799--2804", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1302" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ji Ho Park, Jamin Shin, and Pascale Fung. 2018. Re- ducing gender bias in abusive language detection. In Proceedings of the 2018 Conference on Em- pirical Methods in Natural Language Processing, pages 2799-2804, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Deeper attention to abusive user content moderation", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Pavlopoulos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1125--1135", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1117" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Pavlopoulos, Prodromos Malakasiotis, and Ion Androutsopoulos. 2017. Deeper attention to abusive user content moderation. In Proceedings of the 2017 Conference on Empirical Methods in Natural Lan- guage Processing, pages 1125-1135, Copenhagen, Denmark. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Topic-driven toxicity: Exploring the relationship between online toxicity and news topics", |
|
"authors": [ |
|
{ |
|
"first": "Joni", |
|
"middle": [], |
|
"last": "Salminen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sercan", |
|
"middle": [], |
|
"last": "Seng\u00fcn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [], |
|
"last": "Corporan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jung", |
|
"middle": [], |
|
"last": "Soon Gyo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernard", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Jansen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "PLoS One", |
|
"volume": "", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1371/journal.pone.0228723" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joni Salminen, Sercan Seng\u00fcn, Juan Corporan, Soon gyo Jung, and Bernard J. Jansen. 2020. Topic-driven toxicity: Exploring the relationship between online toxicity and news topics. PLoS One, 15(2).", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Empirical analysis of multi-task learning for reducing identity bias in toxic comment detection", |
|
"authors": [ |
|
{ |
|
"first": "Ameya", |
|
"middle": [], |
|
"last": "Vaidya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feng", |
|
"middle": [], |
|
"last": "Mai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Ning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the International AAAI Conference on Web and Social Media", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "683--693", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ameya Vaidya, Feng Mai, and Yue Ning. 2020. Em- pirical analysis of multi-task learning for reducing identity bias in toxic comment detection. Proceed- ings of the International AAAI Conference on Web and Social Media, 14(1):683-693.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Understanding abuse: A typology of abusive language detection subtasks", |
|
"authors": [ |
|
{ |
|
"first": "Zeerak", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Davidson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dana", |
|
"middle": [], |
|
"last": "Warmsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingmar", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the First Workshop on Abusive Language Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "78--84", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-3012" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeerak Waseem, Thomas Davidson, Dana Warmsley, and Ingmar Weber. 2017. Understanding abuse: A typology of abusive language detection subtasks. In Proceedings of the First Workshop on Abusive Lan- guage Online, pages 78-84, Vancouver, BC, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Hateful symbols or hateful people? predictive features for hate speech detection on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Zeerak", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the NAACL Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "88--93", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-2013" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeerak Waseem and Dirk Hovy. 2016. Hateful sym- bols or hateful people? predictive features for hate speech detection on twitter. In Proceedings of the NAACL Student Research Workshop, pages 88-93, San Diego, California. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Demoting racial bias in hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Mengzhou", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anjalie", |
|
"middle": [], |
|
"last": "Field", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Eighth International Workshop on Natural Language Processing for Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--14", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.socialnlp-1.2" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mengzhou Xia, Anjalie Field, and Yulia Tsvetkov. 2020. Demoting racial bias in hate speech detection. In Proceedings of the Eighth International Work- shop on Natural Language Processing for Social Me- dia, pages 7-14, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Predicting the type and target of offensive posts in social media", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noura", |
|
"middle": [], |
|
"last": "Farra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ritesh", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1415--1420", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1144" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Shervin Malmasi, Preslav Nakov, Sara Rosenthal, Noura Farra, and Ritesh Kumar. 2019. Predicting the type and target of offensive posts in social media. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1415-1420, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Demographics should not be the reason of toxicity: Mitigating discrimination in text classifications with instance weighting", |
|
"authors": [ |
|
{ |
|
"first": "Guanhua", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junqi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Conghui", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tiejun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4134--4145", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.380" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guanhua Zhang, Bing Bai, Junqi Zhang, Kun Bai, Con- ghui Zhu, and Tiejun Zhao. 2020. Demographics should not be the reason of toxicity: Mitigating discrimination in text classifications with instance weighting. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 4134-4145, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "Interface of appropriateness labeling task. Translation: upper line -text: \"You should give up smoking and urgently consult cardiologist\", middle line -task: \"Read the sentence and indicate whether this phrase generated with chatbot can harm the reputation of the company which created this chatbot?\", possible answers -\"Yes/No\" Distribution of samples by number of topics.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"text": "F 1 -scores of the BERT-based topic classifier.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"text": "Examples of appropriate and inappropriate samples related to sensitive topics (translated from Russian).", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF5": { |
|
"text": "Number of samples per topic in sensitive topics and appropriateness datasets.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF6": { |
|
"text": "Performance of the best BERT-based appropriateness classifier (binary classification).", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |