|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:52:15.749761Z" |
|
}, |
|
"title": "Automatic Detection of Offensive Language in Social Media: Defining Linguistic Criteria to build a Mexican Spanish Dataset", |
|
"authors": [ |
|
{ |
|
"first": "Mar\u00eda", |
|
"middle": [], |
|
"last": "Jos\u00e9 D\u00edaz-Torres", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Universidad de las Am\u00e9ricas Puebla", |
|
"location": { |
|
"settlement": "M\u00e9xico" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Paulina", |
|
"middle": [ |
|
"Alejandra" |
|
], |
|
"last": "Mor\u00e1n-M\u00e9ndez", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Universidad de las Am\u00e9ricas Puebla", |
|
"location": { |
|
"settlement": "M\u00e9xico" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Luis", |
|
"middle": [], |
|
"last": "Villase\u00f1or-Pineda", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Laboratorio de Tecnolog\u00edas del Lenguaje", |
|
"institution": "", |
|
"location": { |
|
"country": "Mexico" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Manuel", |
|
"middle": [], |
|
"last": "Montes-Y-G\u00f3mez", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Laboratorio de Tecnolog\u00edas del Lenguaje", |
|
"institution": "", |
|
"location": { |
|
"country": "Mexico" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [], |
|
"last": "Aguilera", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Laboratorio de Tecnolog\u00edas del Lenguaje", |
|
"institution": "", |
|
"location": { |
|
"country": "Mexico" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Luis", |
|
"middle": [], |
|
"last": "Meneses-Ler\u00edn", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Centre de Recherche en Linguistique Fran\u00e7aise GRAMMATICA, Universit\u00e9 d'Artois", |
|
"institution": "", |
|
"location": { |
|
"country": "France" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Phenomena such as bullying, homophobia, sexism and racism have transcended to social networks, motivating the development of tools for their automatic detection. The challenge becomes greater when speakers make use of popular sayings, colloquial expressions and idioms which may contain vulgar, profane or rude words, but not always have the intention to offend; a situation often found in the Mexican Spanish variant. Under these circumstances, the identification of the offense goes beyond the lexical and syntactic elements of the message. This first work aims to define the main linguistic features of aggressive, offensive and vulgar language in social networks in order to establish linguistic-based criteria to facilitate the identification of abusive language. For this purpose, a Mexican Spanish Twitter corpus was compiled and analyzed. The dataset included words that, despite being rude, need to be considered in context to determine they are part of an offense. Based on the analysis of this corpus, linguistic criteria were defined to determine whether a message is offensive. To simplify the application of these criteria, an easy-to-follow diagram was designed. The paper presents an example of the use of the diagram, as well as the basic statistics of the corpus.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Phenomena such as bullying, homophobia, sexism and racism have transcended to social networks, motivating the development of tools for their automatic detection. The challenge becomes greater when speakers make use of popular sayings, colloquial expressions and idioms which may contain vulgar, profane or rude words, but not always have the intention to offend; a situation often found in the Mexican Spanish variant. Under these circumstances, the identification of the offense goes beyond the lexical and syntactic elements of the message. This first work aims to define the main linguistic features of aggressive, offensive and vulgar language in social networks in order to establish linguistic-based criteria to facilitate the identification of abusive language. For this purpose, a Mexican Spanish Twitter corpus was compiled and analyzed. The dataset included words that, despite being rude, need to be considered in context to determine they are part of an offense. Based on the analysis of this corpus, linguistic criteria were defined to determine whether a message is offensive. To simplify the application of these criteria, an easy-to-follow diagram was designed. The paper presents an example of the use of the diagram, as well as the basic statistics of the corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "As of today, social media platforms such as Facebook, Twitter and YouTube have facilitated and encouraged interpersonal communication. Through them, people interact and share their opinions through posts, messages and comments online. Unfortunately, since these platforms guarantee to some extent the freedom of expression of their users, they can and often use these means to attack or offend other persons. This situation leads to safety issues: online aggression and abuse not only create mental and psychological health problems for the victims but have also been proved to cause self-harm and even suicide (Kumar et al., 2018) . Some of the major challenges for detecting abusive language in social networks are the speed and volume of online communication. Every second, approximately 6,000 tweets are published, which is equivalent to more than 500 million tweets per day 1 , making manual monitoring impossible. The previous scenario has motivated the development of methods for the automatic detection of abusive messages. Current methods are of two main kinds: supervised (Burnap and Williams, 2016; Plaza-del Arco et al., 2019) which require labeled data for learning a classification model, and, unsupervised (Gitari et al., 2015; Wiegand et al., 2018; Guzm\u00e1n-Falc\u00f3n, 2018) , which detect hostile messages by searching for words in a given lexicon of profane words. Both kinds of approaches have their own advantages and disadvantages. In particular, the creation of supervised learning methods for offensive language detection requires of large, accurate, manually annotated resources. Nevertheless, most corpora available are in En-glish (Pamungkas and Patti, 2019) , which greatly hinders this task in low-resource languages. Annotation criteria for this type of datasets have only seldom been detailed (Ousidhoum et al., 2019) , and, moreover, the labeling of offensive and non-offensive messages is commonly a costly and highly subjective task due to several socio-cultural and domain dependent issues. A greater challenge is posed by the richness of colloquial expressions and vulgar language that characterizes communication in social networks, since the identification of offenses goes beyond the lexical and syntactic elements of the message, and requires the annotator to understand the context beyond individual terms. With this motivation, through the present research we sought to define the main linguistic features that characterize abusive language manifested in social networks. As a first step, our work departs from the fact that the language used in social networks is abundant in colloquial expressions, commonly composed of rude or profane words, but they are not used to offend. Hence, the interest of this work is the definition of an annotation scheme with enough elements to discriminate these situations. To this end, we defined the concepts of offensive, aggressive and vulgar language, based on Austin's Speech Acts theory (Austin, 1962) , with the aim of establishing criteria to facilitate their identification and thus define an accurate, fine-grained and linguistic-based annotation scheme.", |
|
"cite_spans": [ |
|
{ |
|
"start": 611, |
|
"end": 631, |
|
"text": "(Kumar et al., 2018)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1082, |
|
"end": 1109, |
|
"text": "(Burnap and Williams, 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1110, |
|
"end": 1138, |
|
"text": "Plaza-del Arco et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1221, |
|
"end": 1242, |
|
"text": "(Gitari et al., 2015;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1243, |
|
"end": 1264, |
|
"text": "Wiegand et al., 2018;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1265, |
|
"end": 1285, |
|
"text": "Guzm\u00e1n-Falc\u00f3n, 2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1652, |
|
"end": 1679, |
|
"text": "(Pamungkas and Patti, 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1818, |
|
"end": 1842, |
|
"text": "(Ousidhoum et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 2964, |
|
"end": 2978, |
|
"text": "(Austin, 1962)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "The task of automatically detecting aggressive content aimed at individuals or communities has recently been studied in different academic forums. However, most of them focus on the English language (\u00c1lvarez-Carmona et al., 2018). In 2017, the 1st Workshop on Abusive Language Online (ALW1) was organized, where different approaches were presented for the detection of abusive language in social networks, focusing particularly on written communications in English and German (Waseem et al., 2017a) . Subsequently, more workshops of the same court emerged, but due to the lack of consensus on a definition for \"offensive language\", the scope of the task was narrowed to more specific and identifiable behaviors. This was the case of the recent First Workshop on Trolling, Aggression and Cyberbullying (TRAC-2018) (Kumar et al., 2018) . In this workshop, the phenomena of online aggression such as trolling and cyberbullying were discussed. By the same token, issues such as racism (Tulkens et al., 2016) , sexism (Lee et al., 2010) , and bullying (Samghabadi et al., 2017) have been studied in this line of research. Along the definitions proposed for these abusive behaviors we can find certain patterns, such as the presence of curse words, discriminatory vocabulary, derogatory adjectives and the explicit mention of others; manifested through names, pronouns, and user tags (Waseem et al., 2017b) . With respect to the efforts made for Mexican Spanish, the last two years, the evaluation forum \"Authorship and Aggressiveness Analysis in Twitter: a case study in Mexican Spanish\" (MEX-A3T) has been held. This forum -which took place within the IberEval 2018 (\u00c1lvarez-Carmona et al., 2018) and IberLEF 2019 (Arag\u00f3n et al., 2019) conferences-evaluated an aggressiveness detection task in Mexican Spanish tweets. The results confirmed the complexity of this task, and the need for well-defined criteria to differentiate offensive, aggressive and vulgar language. Therefore, the goal of the present research was to establish criteria to facilitate the identification of offensive language and thus define a detailed, linguistic-based annotation scheme.", |
|
"cite_spans": [ |
|
{ |
|
"start": 476, |
|
"end": 498, |
|
"text": "(Waseem et al., 2017a)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 813, |
|
"end": 833, |
|
"text": "(Kumar et al., 2018)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 981, |
|
"end": 1003, |
|
"text": "(Tulkens et al., 2016)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1013, |
|
"end": 1031, |
|
"text": "(Lee et al., 2010)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1047, |
|
"end": 1072, |
|
"text": "(Samghabadi et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1378, |
|
"end": 1400, |
|
"text": "(Waseem et al., 2017b)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1710, |
|
"end": 1731, |
|
"text": "(Arag\u00f3n et al., 2019)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "To collect data, we considered Twitter as the source media since it is open and its anonymity allows people to write judgments or assessments about other people, including offenses or aggressions. The interest of this first work is the definition of criteria to distinguish the offense or the aggression when using the same vocabulary. That is, it is necessary to collect messages that, despite using the same words (i.e. rude words), it is the context that determines whether a word is used to offend, or is part of a colloquial expression that is not intended to offend. To build the corpus, we collected tweets from August to November of 2017. We used some rude words and controversial hashtags to narrow the search. We collected a set of 143 terms that served as seeds for extracting the tweets, which includede words classified as vulgar and non-colloquial in the Diccionario de Mexicanismos de la Academia Mexicana de la Lengua, as well as words and hashtags identified by the Instituto Nacional de las Mujeres as related to violence and sexual harassment against women on Twitter (Guzm\u00e1n-Falc\u00f3n, 2018). Table 1 shows examples of these seed words. To ensure their origin, the tweets were collected considering their geolocation. We considered Mexico City as the center and extracted all tweets that were within a radius of ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1110, |
|
"end": 1117, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Collection", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "The creation of the annotation scheme and the annotation task itself were part of an incremental and complementary process. Two linguists from our research team studied the abusive language phenomenon through the literature and analyzed the collected tweets, to arrive to a typology that identified the defining characteristics of vulgar, aggressive and offensive language. Then, the linguists wrote the annotation diagram and used it to classify the corpus. For the purpose of creating said linguistic-based annotation scheme, first, it was necessary to arrive at a definition for the concepts of offensive, aggressive, vulgar language. Having a conceptualization of each term is a critical task, since it allows to establish linguistic criteria for the iden- tification and classification of these linguistic phenomena.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Scheme", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "Once the theoretical framework on these linguistic manifestations was outlined, we looked for the lexical and semantic elements representative of the aggressive, offensive or vulgar messages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotation Scheme", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "In order to identify the most characteristic features of aggressive, offensive and vulgar language, we first studied the definitions formulated in several academic forums and workshops. Among the proposed conceptualizations, recurrent linguistic characteristics can be found: the presence of rudeness, discriminatory vocabulary, derogatory adjectives and the mention of others, which is manifested through names, pronouns, and user tags (Waseem et al., 2017b) . Beyond these lexical and syntactic elements, the pragmatic aspect of the messages is crucial to qualify them as aggressive, offensive or vulgar. According to the Speech Acts theory (Austin, 1962) , the production of a statement performs three types of actions or acts at the same time: the locutionary act, the linguistic expression itself, its syntactic structure and the literal meaning semantic; the illocutionary act, the force or intention of the expression provided by the speaker; and the perlocutionary act, the consequence or effect of the statement on the interlocutor. The second act is the one that interests the detection of abusive language, since the illocutionary force of a message is its underlying purpose, which could go from asking a question, an invitation, a reminder, to a warning, a promise, or a threat, among many others. This wide range of intentions is delineated in the classification of illocutionary speech acts by (Searle, 1976) . It is important to emphasize that the illocutionary force of a speech act always depends on the context of the expression (Fromkin et al., 2011) , and since tweets provide very little context other than the linguistic expression itself, the annotators must rely on their sociopragmatic knowledge of the language to identify the illocutionary force of the message. That is the reason why linguistic variation must be taken into account for the definition of these concepts. Linguistic variation is the intrinsic characteristic of all languages that refers to the systematic differences in pronunciation, vocabulary and grammar of different social and regional groups of speakers of a language (Holmes and Wilson, 2017) . This is a relevant phenomenon for any natural language processing task, and in the case of abusive language detection it should be considered not only because of the distinctive lexical and syntactic characteristics of the dialect, but also because these patterns convey social meanings (Wardhaugh, 2011) , which would affect the way of expressing aggressiveness. After revising the literature on the subject and analyzing the definitions of other related linguistic manifestations such as hate speech, cyberbullying, and racism, an offensive, aggressive and vulgar language typology was reached:", |
|
"cite_spans": [ |
|
{ |
|
"start": 437, |
|
"end": 459, |
|
"text": "(Waseem et al., 2017b)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 643, |
|
"end": 657, |
|
"text": "(Austin, 1962)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1409, |
|
"end": 1423, |
|
"text": "(Searle, 1976)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1548, |
|
"end": 1570, |
|
"text": "(Fromkin et al., 2011)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 2118, |
|
"end": 2143, |
|
"text": "(Holmes and Wilson, 2017)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 2433, |
|
"end": 2450, |
|
"text": "(Wardhaugh, 2011)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Offensive, Aggressive and Vulgar Language", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "\u2022 Offensive language: aims at insulting or humiliating a group or individual, usually using derogatory or derogatory terms. An example from the corpus is: No es que est\u00e9s gorda, lo gordo se quita. Es tu cara de caballo. This tweet humiliates a woman, makes fun of her body and compares her to an animal.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Offensive, Aggressive and Vulgar Language", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "\u2022 Aggressive language: seeks to harm or hurt a group or individual by referring to or inciting violence. An example from the corpus is: pero estas gorda... aprovecha tu fin pendeja que el lunes te violo. This tweet involves insults and a rape threat.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Offensive, Aggressive and Vulgar Language", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "\u2022 Vulgar language: it involves profanity, with sexual connotation and sometimes double entendre, but may or may not refer to an individual or collective. An example from the corpus is: Martes con de M de Mamando onvre se arreglan las cosas... creo... eso dicen.. This tweet uses obscene vocabulary and is sexually explicit.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Offensive, Aggressive and Vulgar Language", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "Our annotation scheme was designed as a flowchart, for the purpose of supporting abusive language categorization into aggressive, offensive and vulgar in a clear, visual way. It was devised with the goal to be easy to read and useful for annotators without strong linguistics knowledge, to account for the diversity of backgrounds in the field of natural language processing. The typology portrays each concept as a non-exclusive quality of the message or tweet. This way, the tool allows for a better characterization of the texts when considering the possibility of a tweet belonging to one, two or even all classes, which represents more accurately the nature of these messages in social networks. The flowchart presents questions regarding the form and function of the message, about the presence of insults, derogatory, or sexually-charged vocabulary, but most prominently it is concerned on the illocutionary force of the message; that is, the intention and target of the tweet. As shown in Fig. 1 , the labeling process begins with the selection of a tweet, and the first question that asks if the tweet uses coarse language or with a sexual connotation. If the answer is yes, this indicates the message is vulgar, otherwise it is not. Following, the annotator is asked whether the tweet refers to an individual or to a group of people, or not. This question serves to make an early discard of aggressiveness and offensiveness, since these classes, unlike vulgar language, require of a target to qualify as such. If the message does not have a specific referent, the labeling process ends there. On the contrary, if the answer is positive, then the next question concerns aggressiveness, and asks if the tweet incites violence or tries to force the will of its referent. Finally, to determine if the message is offensive, the diagram directs the annotator to observe if the tweet uses pejorative, derogatory or negative intensifiers of a term to refer to its target; if the tweet seeks to humiliate or insult its referent. Be any of these questions answered affirmatively, the tweet shall be labeled as offensive. It should be noted that each of these classifications, vulgar, aggressive, and offensive, are non-exclusive qualities of the tweet. That is the reason why the flowchart continues after every decision, with the exception of the message having no referent. Table 2 shows examples that correspond to each of the categories.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 997, |
|
"end": 1003, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 2376, |
|
"end": 2383, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Diagram Description", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "This research work generated two digital linguistic resources: a linguistic annotation scheme for the classification of offensive, aggressive and vulgar language; and a corpus of offensive language in Mexican Spanish. As it was previously explained, the scheme was designed based on an abusive language typology, which served to annotate the dataset. This obtained a Kappa coefficient of interevaluator agreement of 0.91, which means that as a result we had a consistent annotation when making use of the proposed scheme while annotating the corpus with both of the evaluators. Clearly, the high level of agreement is because they labelled the corpus at the time of analysis. A second exercise with new annotators is needed to confirm the applicability of the proposed scheme. Table 3 shows the general characteristics of this corpus: the distribution of the messages in the offensive and nonoffensive classes, as well as the size of their vocabularies. Using this corpus, a first classification exercise was carried out. To do this, a traditional method for text classification was applied 2 . The objective of this exercise was to observe the strong overlap between both classes. As mentioned in previous sections, the collection of messages was done with a single set of seed words. Consequently, the common vocabulary between the two classes is high. However, although many of the messages in the non-offensive class use the same rude words, they are not considered offenses or aggressions. Table 4 shows the results obtained. As it can be seen, the non-offensive class achieves greater F1-measure, an effect expected by the imbalance in the classes. On the other hand, as expected, the classifier does not correctly discriminate between the two classes, because this simple representation (i.e. unigrams) does not consider the entire context. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 777, |
|
"end": 784, |
|
"text": "Table 3", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1495, |
|
"end": 1502, |
|
"text": "Table 4", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Towards automatic detection of abusive language", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "This research work focuses on the annotation process of corpora for the detection of abusive language. The proposed annotation scheme provides specific criteria to identify aggressive, offensive and vulgar language based on its linguistic characteristics and intent of the message. This initial scheme took special care to include in the analysis messages that, despite the use of rude words, are not considered offensive. On the other hand, the collected corpus of abusive language is representative of the variant of Mexican Spanish, encouraging the creation of more resources in our language and giving visibility to one of its many dialects. Our contribution encourages the emergence of proposals for automatic methods that will be able to obtain better results thanks to a more accurate dataset, consistent with the reality of this online language phenomenon. Lastly, it should be noted that the diagram will be made available, and our corpus will be made available through the MEX-A3T 2020 forum 3 . Any future participant in the forum will have access to the dataset presented in this work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7." |
|
}, |
|
{ |
|
"text": "Internet Live Stats, 2019 -www.internetlivestats.com/twitterstatistics", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A unigram based representation with frequency weights; frequency threshold >= 10; SVM classifier (linear kernel, C = 1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank CONACyT for partially supporting this work under grants CB-2015-01-257383 and the Thematic Networks program (Language Technologies Thematic Network).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "8." |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Overview of mex-a3t at ibereval 2018: Authorship and aggressiveness analysis in mexican spanish tweets", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"\u00c1" |
|
], |
|
"last": "Bibliographical Reference\u015b Alvarez-Carmona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Guzm\u00e1n-Falc\u00f3n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Montesy G\u00f3mez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Escalante", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Villasenor-Pineda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Reyes-Meza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Rico-Sulayes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Notebook Papers of 3rd SEPLN Workshop on Evaluation of Human Language Technologies for Iberian Languages (IBEREVAL)", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bibliographical Reference\u015b Alvarez-Carmona, M.\u00c1., Guzm\u00e1n-Falc\u00f3n, E., Montes- y G\u00f3mez, M., Escalante, H. J., Villasenor-Pineda, L., Reyes-Meza, V., and Rico-Sulayes, A. (2018). Overview of mex-a3t at ibereval 2018: Authorship and aggressiveness analysis in mexican spanish tweets. In Notebook Papers of 3rd SEPLN Workshop on Evaluation of Human Language Technologies for Iberian Languages (IBEREVAL), Seville, Spain, volume 6.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Overview of mex-a3t at iberlef 2019: Authorship and aggressiveness analysis in mexican spanish tweets", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Arag\u00f3n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"\u00c1" |
|
], |
|
"last": "\u00c1lvarez-Carmona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Montes-Y G\u00f3mez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Escalante", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Villasenor-Pineda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Moctezuma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Notebook Papers of 1st SE-PLN Workshop on Iberian Languages Evaluation Forum (IberLEF)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arag\u00f3n, M. E.,\u00c1lvarez-Carmona, M.\u00c1., Montes-y G\u00f3mez, M., Escalante, H. J., Villasenor-Pineda, L., and Moctezuma, D. (2019). Overview of mex-a3t at iberlef 2019: Authorship and aggressiveness analysis in mex- ican spanish tweets. In Notebook Papers of 1st SE- PLN Workshop on Iberian Languages Evaluation Forum (IberLEF), Bilbao, Spain.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "How to do things with words", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Austin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1962, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Austin, J. (1962). How to do things with words, 2nd edn, jo urmson and m. Sbasa (eds).", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Us and them: identifying cyber hate on twitter across multiple protected characteristics", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Burnap", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "EPJ Data Science", |
|
"volume": "5", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Burnap, P. and Williams, M. L. (2016). Us and them: iden- tifying cyber hate on twitter across multiple protected characteristics. EPJ Data Science, 5(1):11.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "An introduction to language, 9e", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Fromkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Rodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Hyams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fromkin, V., Rodman, R., and Hyams, V. (2011). An intro- duction to language, 9e. Boston, MA: Wadsworth, Cen- gage Learning.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A lexicon-based approach for hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Gitari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Zuping", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Damien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Long", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Journal of Multimedia and Ubiquitous Engineering", |
|
"volume": "10", |
|
"issue": "4", |
|
"pages": "215--230", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gitari, N. D., Zuping, Z., Damien, H., and Long, J. (2015). A lexicon-based approach for hate speech detection. In- ternational Journal of Multimedia and Ubiquitous Engi- neering, 10(4):215-230.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Detecci\u00f3n de lenguaje ofensivo en Twitter basada en expansi\u00f3n autom\u00e1tica de lex-3 sites.google.com/view/mex-a3t/ icones. Tesis de maestr\u00eda en ciencias computacionales", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Guzm\u00e1n-Falc\u00f3n", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guzm\u00e1n-Falc\u00f3n, E. (2018). Detecci\u00f3n de lenguaje ofen- sivo en Twitter basada en expansi\u00f3n autom\u00e1tica de lex- 3 sites.google.com/view/mex-a3t/ icones. Tesis de maestr\u00eda en ciencias computacionales, Instituto Nacional de Astrof\u00edsica,\u00d3ptica y Electr\u00f3nica.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "An introduction to sociolinguistics", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Holmes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Wilson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Holmes, J. and Wilson, N. (2017). An introduction to soci- olinguistics. Routledge.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Proceedings of the first workshop on trolling, aggression and cyberbullying (trac-2018). In Proceedings of the First Workshop on Trolling, Aggression and Cyberbullying", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Ojha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kumar, R., Ojha, A. K., Zampieri, M., and Malmasi, S. (2018). Proceedings of the first workshop on trolling, aggression and cyberbullying (trac-2018). In Proceed- ings of the First Workshop on Trolling, Aggression and Cyberbullying (TRAC-2018).", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Ambivalent sexism in close relationships:(hostile) power and (benevolent) romance shape relationship ideals", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Fiske", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Glick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Sex Roles", |
|
"volume": "62", |
|
"issue": "7-8", |
|
"pages": "583--601", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lee, T. L., Fiske, S. T., Glick, P., and Chen, Z. (2010). Ambivalent sexism in close relationships:(hostile) power and (benevolent) romance shape relationship ideals. Sex Roles, 62(7-8):583-601.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Multilingual and multi-aspect hate speech analysis", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Ousidhoum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D.-Y", |
|
"middle": [], |
|
"last": "Yeung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.11049" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ousidhoum, N., Lin, Z., Zhang, H., Song, Y., and Yeung, D.-Y. (2019). Multilingual and multi-aspect hate speech analysis. arXiv preprint arXiv:1908.11049.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Cross-domain and cross-lingual abusive language detection: A hybrid approach with deep learning and a multilingual lexicon", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Pamungkas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Patti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "363--370", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pamungkas, E. W. and Patti, V. (2019). Cross-domain and cross-lingual abusive language detection: A hybrid ap- proach with deep learning and a multilingual lexicon. In Proceedings of the 57th Annual Meeting of the Associ- ation for Computational Linguistics: Student Research Workshop, pages 363-370.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Sinai at semeval-2019 task 6: Incorporating lexicon knowledge into svm learning to identify and categorize offensive language in social media", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Plaza-Del Arco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Molina-Gonz\u00e1lez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Mart\u00edn-Valdivia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"A U" |
|
], |
|
"last": "Lopez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "735--738", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Plaza-del Arco, F. M., Molina-Gonz\u00e1lez, M. D., Mart\u00edn- Valdivia, M. T., and Lopez, L. A. U. (2019). Sinai at semeval-2019 task 6: Incorporating lexicon knowledge into svm learning to identify and categorize offensive language in social media. In Proceedings of the 13th International Workshop on Semantic Evaluation, pages 735-738.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Detecting nastiness in social media", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Samghabadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Maharjan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Sprague", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Diaz-Sprague", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Solorio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the First Workshop on Abusive Language Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "63--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samghabadi, N. S., Maharjan, S., Sprague, A., Diaz- Sprague, R., and Solorio, T. (2017). Detecting nastiness in social media. In Proceedings of the First Workshop on Abusive Language Online, pages 63-72.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A classification of illocutionary acts", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Searle", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1976, |
|
"venue": "Language in society", |
|
"volume": "5", |
|
"issue": "1", |
|
"pages": "1--23", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Searle, J. R. (1976). A classification of illocutionary acts. Language in society, 5(1):1-23.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "The automated detection of racist discourse in dutch social media", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Tulkens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Hilte", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Lodewyckx", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Verhoeven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Computational Linguistics in the Netherlands Journal", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "3--20", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tulkens, S., Hilte, L., Lodewyckx, E., Verhoeven, B., and Daelemans, W. (2016). The automated detection of racist discourse in dutch social media. Computational Linguistics in the Netherlands Journal, 6:3-20.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "An introduction to sociolinguistics", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Wardhaugh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wardhaugh, R. (2011). An introduction to sociolinguistics, volume 28. John Wiley & Sons.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Proceedings of the first workshop on abusive language online", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"H K" |
|
], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tetreault", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the First Workshop on Abusive Language Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Waseem, Z., Chung, W. H. K., Hovy, D., and Tetreault, J. (2017a). Proceedings of the first workshop on abusive language online. In Proceedings of the First Workshop on Abusive Language Online.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Understanding abuse: A typology of abusive language detection subtasks", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Davidson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Warmsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1705.09899" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Waseem, Z., Davidson, T., Warmsley, D., and Weber, I. (2017b). Understanding abuse: A typology of abusive language detection subtasks. arXiv preprint arXiv:1705.09899.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Inducing a lexicon of abusive wordsa feature-based approach", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Wiegand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ruppenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Greenberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1046--1056", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wiegand, M., Ruppenhofer, J., Schmidt, A., and Green- berg, C. (2018). Inducing a lexicon of abusive words- a feature-based approach. In Proceedings of the 2018 Conference of the North American Chapter of the Associ- ation for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1046- 1056.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "Annotation flowchart for abusive language categorization 500km. Finally, nearly 10,500 tweets in Mexican Spanish were collected and analyzed to define the annotation scheme." |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"text": "Sample of the vocabulary applied for the recovery of tweets.", |
|
"html": null, |
|
"content": "<table><tr><td>Spanish</td><td>English Translation</td></tr><tr><td>luchona</td><td>hard-working woman (single mother;</td></tr><tr><td/><td>derogatory)</td></tr><tr><td>pendejo(a)</td><td>asshole (masc./fem.)</td></tr><tr><td>prieto(a)</td><td>dark-skinned (masc./fem.; derogatory)</td></tr><tr><td>vergazos</td><td>strong blow (vulgar)</td></tr><tr><td>golfas</td><td>whores</td></tr><tr><td>puta</td><td>slut</td></tr><tr><td>lameculos</td><td>ass kisser</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"text": "Examples showing the use of the proposed scheme. The number in parentheses refers to the question in the annotation flowchart.Lo m\u00e1s rico de coger no es lo que t\u00fa sientes; sino ver al cabr\u00f3n retorcerse de placer... The best part about sex is not the feeling you get, but watching the man shiver of pleasure...", |
|
"html": null, |
|
"content": "<table><tr><td>Message</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"text": "Corpus' distribution.", |
|
"html": null, |
|
"content": "<table><tr><td>Class</td><td colspan=\"3\">Tweets Vocabulary Tweet size</td></tr><tr><td>Non-offensive</td><td>7,460</td><td>13,696</td><td>16.1\u00b15.9</td></tr><tr><td>Offensive</td><td>3,015</td><td>7,365</td><td>16.3\u00b15.8</td></tr><tr><td>Total</td><td>10,475</td><td>17,067</td><td>16.1\u00b15.9</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"text": "Offensive detection results, Acc=0.77\u00b10.06 (stratified 10-fold cross validation).", |
|
"html": null, |
|
"content": "<table><tr><td>Class</td><td>Precision</td><td>Recall</td><td>F1-measure</td></tr><tr><td colspan=\"3\">Non-offensive 0.83\u00b10.05 0.86\u00b10.06</td><td>0.84\u00b10.04</td></tr><tr><td>Offensive</td><td colspan=\"2\">0.63\u00b10.13 0.56\u00b10.18</td><td>0.58\u00b10.14</td></tr></table>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |