|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T05:58:56.496352Z" |
|
}, |
|
"title": "Arabic Offensive Language on Twitter: Analysis and Experiments", |
|
"authors": [ |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "HBKU 2\u00d6 zyegin University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ammar", |
|
"middle": [], |
|
"last": "Rashed", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "HBKU 2\u00d6 zyegin University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Darwish", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "HBKU 2\u00d6 zyegin University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Younes", |
|
"middle": [], |
|
"last": "Samih", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "HBKU 2\u00d6 zyegin University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Abdelali", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "HBKU 2\u00d6 zyegin University", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Detecting offensive language on Twitter has many applications ranging from detecting/predicting bullying to measuring polarization. In this paper, we focus on building a large Arabic offensive tweet dataset. We introduce a method for building a dataset that is not biased by topic, dialect, or target. We produce the largest Arabic dataset to date with special tags for vulgarity and hate speech. We thoroughly analyze the dataset to determine which topics, dialects, and gender are most associated with offensive tweets and how Arabic speakers use offensive language. Lastly, we conduct many experiments to produce strong results (F1 = 83.2) on the dataset using SOTA techniques.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Detecting offensive language on Twitter has many applications ranging from detecting/predicting bullying to measuring polarization. In this paper, we focus on building a large Arabic offensive tweet dataset. We introduce a method for building a dataset that is not biased by topic, dialect, or target. We produce the largest Arabic dataset to date with special tags for vulgarity and hate speech. We thoroughly analyze the dataset to determine which topics, dialects, and gender are most associated with offensive tweets and how Arabic speakers use offensive language. Lastly, we conduct many experiments to produce strong results (F1 = 83.2) on the dataset using SOTA techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Disclaimer: Due to the nature of the paper, some examples herein contain highly offensive language and hate speech. They don't reflect the views of the authors in any way. This work is an attempt to help fight such speech.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Much recent interest has focused on the detection of offensive language and hate speech in online social media. Offensiveness is often associated with undesirable behaviors such as trolling, cyberbullying, online extremism, political polarization, and propaganda. Thus, offensive language detection is instrumental for a variety of application such as: quantifying polarization (Barber\u00e1 and Sood, 2015; Conover et al., 2011) , trolls and propaganda account detection , hate crimes likelihood estimation (Waseem and Hovy, 2016) ; and predicting conflicts (Chadefaux, 2014) . In this paper, we describe our methodology for building a large dataset of Arabic offensive tweets. Given that roughly 1-2% of all Arabic tweets are offensive (Mubarak and Darwish, 2019) , targeted annotation is essential to efficiently build a large dataset. Since our methodology does not use a seed list of offensive words, it is not biased by topic, target, or dialect. Using our methodology, we tagged a 10,000 Arabic tweet dataset for offensiveness, where offensive tweets account for roughly 19% of the tweets. Further, we labeled tweets as vulgar or hate speech. To date, this is the largest available dataset, which we plan to make publicly available along with annotation guidelines. We use this dataset to characterize Arabic offensive language to ascertain the topics, dialects, and users' gender that are most associated with the use of offensive language. Though we suspect that there are common features that span different languages and cultures, some characteristics of Arabic offensive language are language and culture specific. Thus, we conduct a thorough analysis of how Arab users use offensive language. Next, we use the dataset to train strong Arabic offensive language classifiers using state-of-the-art representations and classification techniques. Specifically, we experiment with static and contextualized embeddings for representation along with a variety of classifiers such as Transformer-based and Support Vector Machine (SVM) classifiers. The contributions of this paper are as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 378, |
|
"end": 402, |
|
"text": "(Barber\u00e1 and Sood, 2015;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 403, |
|
"end": 424, |
|
"text": "Conover et al., 2011)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 526, |
|
"text": "(Waseem and Hovy, 2016)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 571, |
|
"text": "(Chadefaux, 2014)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 733, |
|
"end": 760, |
|
"text": "(Mubarak and Darwish, 2019)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We built the largest Arabic offensive language dataset to date that is also labeled for vulgar language and hate speech and is not biased by topic or dialect. We describe the methodology for building it along with annotation guidelines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We performed thorough analysis to describe the peculiarities of Arabic offensive language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We experimented with SOTA classification techniques to provide strong results on detecting offensive language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Many recent papers have focused on the detection of offensive language, including hate speech (Agrawal and Awekar, 2018; Badjatiya et al., 2017; Davidson et al., 2017; Djuric et al., 2015; Kwok and Wang, 2013; Malmasi and Zampieri, 2017; Nobata et al., 2016; Yin et al., 2009) . Offensive language can be categorized as: vulgar, which include explicit and rude sexual references, pornographic, and hateful, which includes offensive remarks concerning people's race, religion, country, etc. (Jay and Janschewitz, 2008) . Prior works have concentrated on building annotated corpora and training classification models. Concerning corpora, hatespeechdata.com attempts to maintain an updated list of hate speech corpora for multiple languages including Arabic and English. Further, SemEval 2019 ran an evaluation task targeted at detecting offensive language, which focused exclusively on English (Zampieri et al., 2019) . For SemEval 2020, they extended the task to include other languages including Arabic (Zampieri et al., 2020) . As for classification models, most studies used supervised classification at either word level (Kwok and Wang, 2013) , character sequence level (Malmasi and Zampieri, 2017) , and word embeddings (Djuric et al., 2015) . The studies used different classification techniques including Na\u00efve Bayes (Kwok and Wang, 2013) , Support Vector Machines (SVM) (Malmasi and Zampieri, 2017) , and deep learning (Agrawal and Awekar, 2018; Badjatiya et al., 2017; Nobata et al., 2016) classification. The accuracy of the aforementioned system ranged between 76% and 90%. Earlier work looked at the use of sentiment words as features as well as contextual features (Yin et al., 2009) . The work on Arabic offensive language detection is relatively nascent (Abozinadah, 2017; Alakrot et al., 2018; Albadi et al., 2018; Mubarak et al., 2017; Mubarak and Darwish, 2019) . Mubarak et al. (2017) suggested that certain users are more likely to use offensive languages than others, and they used this insight to build a list of offensive Arabic words and to construct a labeled set of 1,100 tweets. Abozinadah (2017) used supervised classification based on a variety of features including user profile features, textual features, and network features. They reported an accuracy of nearly 90%. Alakrot et al. (2018) used supervised classification based on word n-grams to detect offensive language in YouTube comments. They im-proved classification with stemming and achieved a precision of 88%. Albadi et al. (2018) focused on detecting religious hate speech using a recurrent neural network.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 120, |
|
"text": "(Agrawal and Awekar, 2018;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 144, |
|
"text": "Badjatiya et al., 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 145, |
|
"end": 167, |
|
"text": "Davidson et al., 2017;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 188, |
|
"text": "Djuric et al., 2015;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 209, |
|
"text": "Kwok and Wang, 2013;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 237, |
|
"text": "Malmasi and Zampieri, 2017;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 238, |
|
"end": 258, |
|
"text": "Nobata et al., 2016;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 276, |
|
"text": "Yin et al., 2009)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 517, |
|
"text": "(Jay and Janschewitz, 2008)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 892, |
|
"end": 915, |
|
"text": "(Zampieri et al., 2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1003, |
|
"end": 1026, |
|
"text": "(Zampieri et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1124, |
|
"end": 1145, |
|
"text": "(Kwok and Wang, 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1173, |
|
"end": 1201, |
|
"text": "(Malmasi and Zampieri, 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1224, |
|
"end": 1245, |
|
"text": "(Djuric et al., 2015)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1323, |
|
"end": 1344, |
|
"text": "(Kwok and Wang, 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1377, |
|
"end": 1405, |
|
"text": "(Malmasi and Zampieri, 2017)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1426, |
|
"end": 1452, |
|
"text": "(Agrawal and Awekar, 2018;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1453, |
|
"end": 1476, |
|
"text": "Badjatiya et al., 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1477, |
|
"end": 1497, |
|
"text": "Nobata et al., 2016)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1677, |
|
"end": 1695, |
|
"text": "(Yin et al., 2009)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 1768, |
|
"end": 1786, |
|
"text": "(Abozinadah, 2017;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1787, |
|
"end": 1808, |
|
"text": "Alakrot et al., 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1809, |
|
"end": 1829, |
|
"text": "Albadi et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1830, |
|
"end": 1851, |
|
"text": "Mubarak et al., 2017;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1852, |
|
"end": 1878, |
|
"text": "Mubarak and Darwish, 2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1881, |
|
"end": 1902, |
|
"text": "Mubarak et al. (2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 2299, |
|
"end": 2320, |
|
"text": "Alakrot et al. (2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 2501, |
|
"end": 2521, |
|
"text": "Albadi et al. (2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Arabic is a morphologically rich language with a standard variety called Modern Standard Arabic (MSA), which is typically used in formal communication, and many dialectal varieties that differ from MSA in lexical selection, morphology, phonology, and syntactic structures. In MSA, words are typically derived from a set of thousands of roots by fitting a root into a stem template and the resulting stem may accept a variety of prefixes and suffixes. Though word segmentation, which greatly improves word matching, is quite accurate for MSA (Abdelali et al., 2016) , with accuracy approaching 99%, dialectal segmentation is not sufficiently reliable, with accuracy ranging between 91-95% for different dialects (Samih et al., 2017) . Since dialectal Arabic is ubiquitous in Arabic tweets and many tweets have creative spellings of words, recent work on Arabic offensive language detection used character-level models (Mubarak and Darwish, 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 541, |
|
"end": 564, |
|
"text": "(Abdelali et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 711, |
|
"end": 731, |
|
"text": "(Samih et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 917, |
|
"end": 944, |
|
"text": "(Mubarak and Darwish, 2019)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "3 Data Collection", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our target is to build a large Arabic offensive language dataset that is representative of its appearance on Twitter and is hopefully not biased to specific dialects, topics, or targets. One of the main challenges is that offensive tweets constitute a very small portion of overall tweets. To quantify their proportion, we took 3 random samples of tweets from different days, with each sample composed of 1,000 tweets, and we found that only 1-2% of them were offensive (including pornographic advertisements). This percentage is consistent with previously reported percentages (Mubarak et al., 2017) . Thus, annotating random tweets is grossly inefficient. One way to overcome this problem is to use a seed list of offensive words to filter tweets. However, doing so is problematic, as it would skew the dataset to particular types of offensive language or to specific dialects. Offensiveness is often dialect and country specific.", |
|
"cite_spans": [ |
|
{ |
|
"start": 578, |
|
"end": 600, |
|
"text": "(Mubarak et al., 2017)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collecting Arabic Offensive Tweets", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "After inspecting many tweets, we observed that many offensive tweets have the vocative particle (\"yA\" -meaning \"O\") 1 , which is mainly used in directing the speech to a specific person or group. The ratio of offensive tweets increases to 5% if a tweet contains one vocative particle and to 19% if it has at least two vocative particles. Users often repeat this particle for emphasis, as in:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collecting Arabic Offensive Tweets", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "(\"yA Amy yA Hnwnp\" -O my mother, O kind one), which is endearing and nonoffensive, and (\"yA klb yA q*r\" -\"O dog, O dirty one\"), which is offensive. We decided to use this pattern to increase our chances of finding offensive tweets. One of the main advantages of the pattern (\"yA ... yA\") is that it is not associated with any specific topic or genre, and it appears in all Arabic dialects. Though the use of offensive language does not necessitate the appearance of the vocative particle, the particle does not favor any specific offensive expressions and greatly improves our chances of finding offensive tweets. Using Twitter APIs, we collected 660k Arabic tweets having this pattern between April 15 -May 6, 2019. To increase diversity, we sorted the word sequences between the vocative particles and took the most frequent 10,000 unique sequences. For each word sequence, we took a random tweet containing that sequence. Then we annotated those tweets, ending up with 1,915 offensive tweets which represent roughly 19% of all tweets. Each tweet was labeled as: offensive, which could additionally be labeled as vulgar and/or hate speech, or Clean. We describe in greater detail our annotation guidelines, which are compatible with the OffensEval2019 annotation guidelines (Zampieri et al., 2019) . For example, if a tweet has insults or threats targeting a group based on their nationality, ethnicity, gender, political affiliation, religious belief, or other common characteristics, this is considered hate speech (Zampieri et al., 2019) . It is worth mentioning that we also considered insulting groups based on their sport affiliation as a form of hate speech. Often, being a fan of a particular sporting club is considered a part of the personality that rarely changes over time (similar to religious and political affiliations). Many incidents of violence have occurred among fans of rival clubs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1276, |
|
"end": 1299, |
|
"text": "(Zampieri et al., 2019)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1519, |
|
"end": 1542, |
|
"text": "(Zampieri et al., 2019)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collecting Arabic Offensive Tweets", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Although we used a generic pattern that is used across dialects and topics, such may not cover all the stylistic diversity of offensive expressions. However, our approach considerably narrows the search space for offensive tweets, which constitransliteration and English translation. tute a small percentage of tweets in general, while being far more generic than using a seed list of offensive words, which may greatly skew the distribution of offensive tweets. For future work, we plan to explore other methods for identifying offensive tweets with greater stylistic diversity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collecting Arabic Offensive Tweets", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We developed annotation guidelines jointly with an experienced annotator, who is a native Arabic speaker with good knowledge of various Arabic dialects, in accordance to the OffensEval2019 guidelines. Tweets were given one or more of the following four labels: offensive, vulgar, hate speech, or clean. Since the offensive label covers both vulgar and hate speech and vulgarity and hate speech are not mutually exclusive, a tweet can be just offensive or offensive and vulgar and/or hate speech. The annotation adhered to the following guidelines: OFFENSIVE (OFF): Offensive tweets contain explicit or implicit insults or attacks against other people, or inappropriate language, such as: Direct threats or incitement, ex: (\"AHrqwA mqrAt AlmEArDp\" -\"burn opposition headquarters\") and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotating Tweets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(\"h*A AlmnAfq yjb qtlh\" -\"kill this hypocrite\"). Insults and expressions of contempt, which include: Animal analogies, ex:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotating Tweets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(\"yA klb\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotating Tweets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "-\"O dog\") and (\"kl tbn\" -\"eat hay\").;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotating Tweets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Insult to family, ex: (\"yA rwH Amk\" -\"O mother's soul\"); Sexually-related insults, ex:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotating Tweets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(\"yA dywv\" -\"O cuckold\"); Damnation, ex: (\"Allh ylEnk\" -\"may God curse you\"); and Attacks on morals, ex: (\"yA kA*b\" -\"O liar\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotating Tweets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Vulgar tweets are offensive tweets that contain profanity, such as mentions of private parts or sexual-related acts or references.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VULGAR (VLG):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Hate speech tweets are offensive tweets targeting group based on common characteristics such as: Race, ex:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "HATE SPEECH (HS):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(\"yA znjy\" -\"O Negro\"); Ethnicity, ex. (\"Alfrs AlAnjAs\" -\"Impure Persians\"); Group or party, ex: (\"Abwk $ywEy\" -\"your father is a communist\"); and Religion, ex:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "HATE SPEECH (HS):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(\"dynk Alq*r\" -\"your filthy religion\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "HATE SPEECH (HS):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Clean tweets do not contain vulgar or offensive language. We noticed that some tweets have some offensive words, but the whole tweet should not be considered as offensive due to the intention of users. This suggests that normal string match without considering contexts may fail in some cases. Examples of such ambiguous cases include: Humor, ex: (\"yA Edwp AlfrHp hhh\" -\"O enemy of happiness hahaha\"); Advice, ex:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CLEAN (CLN):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(\"lA tql lSAHbk yA xnzyr\" -\"don't say to your friend: You are a pig\"); Condition, ex: (\"A*A EArDthm yqwlwn yA Emyl\" -\"if you disagree with them, they call you a spy\"); Condemnation, ex:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CLEAN (CLN):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(\"lmA*A nsb bqwl: yA bqrp?\" -\"Why do we insult others by saying: O cow?\"); Self offense, ex:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CLEAN (CLN):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(\"tEbt mn lsAny Alq*r\" -\"I am tired of my dirty tongue\"); Non-human target, ex:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CLEAN (CLN):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(\"yA bnt Almjnwnp yA kwrp\" -\"O daughter of the crazy one O football\"); and Quotation from a movies or a story, ex:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CLEAN (CLN):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(\"tAny yA zky! tAny yA fA$l\" -\"again smarty! again O loser\"). For ambiguous expressions, the annotator searched Twitter to observe real sample usages. Table 1 shows the distribution of the annotated tweets. There are 1,915 offensive tweets, including 225 vulgar tweets and 506 hate speech tweets, and 8,085 clean tweets. To validate annotation quality, we asked three additional annotators to annotate two tweet sample sets. The first was a random sample of 100 tweets containing 50 offensive and 50 non-offensive tweets. The Inter-Annotator Agreement (IIA) between the annotators using Fleiss's Kappa coefficient (Fleiss, 1971) was 0.92. The second was general random samples containing 100 tweets each from the dataset, and the IIA with the dataset was: 0.97, 0.96, and 0.97. This high level of agreement gives more confidence in the quality of the annotation. Data can be downloaded from: ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 158, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "CLEAN (CLN):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://alt.qcri.org/resources/ OSACT2020-sharedTask-CodaLab-Train-Dev-Test. zip", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CLEAN (CLN):", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Given the annotated tweets, we wanted to ascertain the distribution of: types of offensive language, genres or topics where it is used, the dialects used, and the gender of users using such language. Accordingly, the annotator manually examined and tagged all the offensive tweets. Topic: Figure 1 shows the distribution of topics associated with offensive tweets. As the figure shows, sports and politics are most dominant for offensive language including vulgar and hate speech. Dialect: We looked at MSA and four major dialects, namely Egyptian (EGY), Leventine (LEV), Maghrebi (MGR), and Gulf (GLF). Figure 2 shows that 71% of vulgar tweets were written in EGY followed by GLF, which accounted for 13% of vulgar tweets. MSA was not used in any vulgar tweets. As for offensive tweets in general, EGY and GLF were used in 36% and 35% of the offensive tweets respectively. Unlike the case of vulgar language, 15% of the offensive tweets were written in MSA. For hate speech, GLF and EGY were again dominant and MSA constituted 21% of the tweets. This is consistent with findings for other languages, e.g. English and Italian, where vulgarity was more frequently associated with colloquial language (Mattiello, 2005; Maisto et al., 2017) . Gender: Figure 3 shows that the vast majority of offensive tweets, including vulgar and hate speech, were authored by males. Female Twitter users accounted for 14% of offensive tweets in general and 6% and 9% of vulgar and hate speech respectively. Figure 4 shows a detailed categorization of hate speech types, where the top three include insulting groups based on their political ideology, origin, and sport affiliation. Religious hate speech appeared in only 15% of all hate speech tweets. Next, we analyzed all tweets labeled as offensive to better understand how Arabic speakers use offensive language. Here is a breakdown of usage: Direct name calling: The most frequent attack is to call a person an animal name, and the most used (\"klb\" -\"dog\"), (\"HmAr\" -\"donkey\"), and (\"bhym\" -\"beast\"). The second most common was insulting mental abilities using words such as (\"gby\" -\"stupid\") and", |
|
"cite_spans": [ |
|
{ |
|
"start": 1199, |
|
"end": 1216, |
|
"text": "(Mattiello, 2005;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1217, |
|
"end": 1237, |
|
"text": "Maisto et al., 2017)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 297, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 604, |
|
"end": 612, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1248, |
|
"end": 1256, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1489, |
|
"end": 1497, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Statistics and User Demographics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(\"EbyT\" -\"idiot\"). Culturally, not all animal names are used as insults. For example, animals such as (\"Asd\" -\"lion\"), (\"Sqr\" -\"falcon\"), and (\"gzAl\" -\"gazelle\") are typically used for praise. For other insults, people use: some bird names such as (\"djAjp\" -\"chicken\"), (\"bwmp\" -\"owl\"), and (\"grAb\" -\"crow\");", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Statistics and User Demographics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "insects such as (\"*bAbp\" -\"fly\"), (\"SrSwr\" -\"cockroach\"), and (\"H$rp\" -\"insect\"); microorganisms such as (\"jrvwmp\" -\"microbe\") and (\"THAlb\" -\"algae\"); inanimate objects such as (\"jzmp\" -\"shoes\") and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Statistics and User Demographics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(\"sTl\" -\"bucket\") among other usages. Simile and metaphor: Users use simile and metaphor were they would compare a person to: an animal as in (\"zy Alvwr\" -\"like a bull\"),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Statistics and User Demographics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(\"smEny nhyqk\" -\"let me hear your braying\"), and (\"hz dylk\" -\"wag your tail\"); a person with mental or physical disability such as (\"mngwly\" -\"Mongolian (Down syndrome)\"), (\"mEwq\" -\"disabled\"), and (\"qzm\" -\"dwarf\"); and to the opposite gender such as (\"jy$ nwAl\" -\"Nawal's army (Nawal is female name)\") and (\"nAdy zyzy\" -\"Zizi's club (Zizi is a female nickname)\"). Indirect speech: This includes: sarcasm such as (\"A*kY AxwAtk\" -\"smartest one of your siblings\") and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Statistics and User Demographics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(\"fylswf AlHmyr\" -\"the donkeys' philosopher\"); questions such as (\"Ayh kl AlgbA dh\" -\"what is all this stupidity\"); and indirect speech such as (\"AlnqA$ mE AlbhAym gyr mvmr\" -\"no use arguing with cattle\"). Wishing Evil: This entails wishing death or major harm to befall someone such as (\"rbnA yAxdk\" -\"May God take (kill) you\"),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Statistics and User Demographics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(\"Allh ylEnk\" -\"may God curse you\"), and (\"rwH fy dAhyp\" -equivalent to \"go to hell\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Statistics and User Demographics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Name alteration: One common way to insult others is to change a letter or two in their names to produce new offensive words that rhyme with the original names. Some such examples include changing (\"Aljzyrp\" -\"Aljazeera (channel)\") to (\"Alxnzyrp\" -\"the pig\") and (\"xl-fAn\" -\"Khalfan (person name)\") to (\"xrfAn\" -\"crazed\"). Societal stratification: Some insults are associated with: certain jobs such as (\"bwAb\" -\"doorman\") or (\"xAdm\" -\"servant\"); and specific societal components such (\"bdwy\" -\"bedouin\") and (\"flAH\" -\"farmer\"). Immoral behavior: These insults are associated with negative moral traits or behaviors such as (\"Hqyr\" -\"vile\"), (\"xAyn\" -\"traitor\"), and (\"mnAfq\" -\"hypocrite\"). Sexually related: They include expressions such as (\"xwl\" -\"gay\"), (\"wsxp\" -\"prostitute\"), and (\"ErS\" -\"pimp\"). Figure 5 shows the top words with the highest valance scores for individual words in the offensive tweets. Larger fonts are used to highlight words with highest scores and align as well with the categories mentioned in the breakdown for the offensive languages. We slightly modified the valence score described by (Conover et al., 2011) to magnify its value by multiplying valence with frequency of occurrence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1116, |
|
"end": 1138, |
|
"text": "(Conover et al., 2011)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 802, |
|
"end": 810, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Statistics and User Demographics", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We conducted an extensive battery of experiments on the dataset to establish strong Arabic offensive language classification results. Though offensive tweets have finer-grained labels where offensive tweet could also be vulgar and/or hate speech, we conducted coarser-grained classification to determine if a tweet was offensive or not. For classification, we experimented with several tweet representation and classification models. For tweet representations, we used: the count of positive and negative terms, based on a polarity lexicon; static embeddings, namely fastText and Skip-Gram; and deep contextual embeddings, namely BERT base-multilingual and AraBERT (Antoun et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 665, |
|
"end": 686, |
|
"text": "(Antoun et al., 2020)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We performed several text pre-processing steps. First, we tokenized the text using the Farasa Arabic NLP toolkit (Abdelali et al., 2016) . Second, we removed URLs, numbers, and all tweet specific tokens, namely mentions, retweets, and hashtags as they are not part of the language semantic structure, and therefore, not usable in pre-trained embeddings. Third, we performed basic Arabic letter normalization, namely variants of the letter alef to bare alef, ta marbouta to ha, and alef maqsoura to ya. We also separated words that are commonly incorrectly attached such as (\"yAklb\" -\"O dog\"), is split to (\"yA klb\"). Lastly, we normalized letter repetitions to allow for a maximum of 2 repeated letters. For example, the token (\"hhhhh\" -\"hahahahaha\") is normalized to (\"hh\"). We also removed Arabic diacritics and word elongations (kashida).", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 136, |
|
"text": "(Abdelali et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Pre-processing", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Lexical Features Since offensive words typically have a negative polarity, we wanted to test the effectiveness of using a polarity lexicon in detecting offensive tweets. For the lexicon, we used NileULex (El-Beltagy, 2016), which is an Arabic polarity lexicon containing 3,279 MSA and 2,674 Egyptian terms, out of which 4,256 are negative and 1,697 are positive. We used the counts of terms with positive polarity and terms with negative polarity in tweets as features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We experimented with various static embeddings that were pre-trained on different corpora with different vector dimensionality. We compared pre-trained embeddings to embeddings that were trained on our dataset. For pre-trained embeddings, we used: fastText Egyptian Arabic pre-trained embeddings with vector dimensionality of 300; Ar-aVec skip-gram embeddings (Mohammad et al., 2017) , trained on 66.9M Arabic tweets with 100dimensional vectors; and Mazajak skip-gram embeddings (Abu Farha and Magdy, 2019), trained on 250M Arabic tweets with 300-dimensional vectors. Sentence embeddings were calculated by taking the mean of the embeddings of their tokens. The importance of testing a character level n-gram model like fastText lies in the agglutinative nature of the Arabic language. We trained a new fastText text classification model on our dataset with vectors of 40 dimensions, 0.5 learning rate, 2\u221210 character n-grams as features, for 30 epochs. These hyper-parameters were tuned using a 5-fold cross-validated grid-search.", |
|
"cite_spans": [ |
|
{ |
|
"start": 360, |
|
"end": 383, |
|
"text": "(Mohammad et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Static Embeddings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also experimented with pre-trained contextualized embeddings with fine-tuning for down-stream tasks. Recently, deep contextualized language models such as BERT (Bidirectional Encoder Representations from Transformers) (Devlin et al., 2019) , UMLFIT (Howard and Ruder, 2018) , and Ope-nAI GPT (Radford et al., 2018) , have achieved ground-breaking results in many NLP classification and language understanding tasks. In this paper, we fine-tuned BERT base-multilingual (or simply BERT) and AraBERT embeddings to classify Arabic offensive language on Twitter as it eliminates the need for feature engineering. Although Robustly Optimized BERT (RoBERTa) embeddings perform better than (BERT large ) on GLUE (Wang et al., 2018) , RACE (Lai et al., 2017) , and SQuAD (Rajpurkar et al., 2016) tasks, pre-trained multilingual RoBERTa models are not available. BERT is pre-trained on Wikipedia text from 104 languages, and AraBERT is trained on a large Arabic news corpus containing 8.5M articles composed of roughly 2.5B tokens. Both use identical architectures and come with hundreds of millions of parameters. Both contain an encoder with 12 Transformer blocks, hidden size of 768, and 12 self-attention heads. These embedding use BP sub-word segments. Following Devlin et al. (2019) , the classification consists of introducing a dense layer over the final hidden state h corresponding to first token of the sequence, [CLS] , adding a softmax activation on the top of BERT to predict the probability of the l label: p(l|h) = sof tmax(W h), where W is the task-specific weight matrix. During finetuning, all BERT/AraBERT parameters together with W are optimized end-to-end to maximize the log-probability of the correct labels.", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 242, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 276, |
|
"text": "(Howard and Ruder, 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 317, |
|
"text": "(Radford et al., 2018)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 707, |
|
"end": 726, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 734, |
|
"end": 752, |
|
"text": "(Lai et al., 2017)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 765, |
|
"end": 789, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1261, |
|
"end": 1281, |
|
"text": "Devlin et al. (2019)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1417, |
|
"end": 1422, |
|
"text": "[CLS]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Contextualized Embeddings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We explored different classifiers. When using lexical features and pre-trained static embeddings, we primarily used an SVM classifier with a radial basis function kernel. Only when using the Mazajak embeddings, we experimented with other classifiers such as AdaBoost and Logistic regression. The SVM classifier performed the best on static embeddings, and we picked the Mazajak embeddings because they yielded the best results among all static embeddings. We used the Scikit Learn implementations of all the classifiers such as libsvm for the SVM classifier. We also experimented with fast-Text, which trained embeddings on our data. When using contextualized embeddings, we fine-tuned BERT and AraBERT by adding a fully-connected dense layer followed by a softmax classifier, minimizing the binary cross-entropy loss function for the training data. For all experiments, we used the PyTorch 2 implementation by HuggingFace 3 as it provides pre-trained weights and vocabularies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classification Models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "For all of our experiments, we used 5-fold cross validation with identical folds for all experiments. Table 2 reports on the results of using lexical features, static pre-trained embeddings with an SVM classifier, embeddings trained on our data with fast-Text classifier, and BERT and AraBERT over a dense layer with softmax activation. As the results show, using fine-tuned AraBERT yielded the best results overall, followed closely by Mazajak/SVM, with large improvements in precision over using BERT. The success of AraBERT was surprising given that it was not trained on social media text. Perhaps, pre-training a Transformer model on social media text may improve results further. We suspect that the Mazajak/SVM combination performed better than BERT due to the fact that the Mazajak embeddings, though static, were trained on in-domain data, as opposed to BERT. For completeness, we compared 7 other classifiers with SVM using Mazajak embeddings. As results in Table 3 show, using SVM yielded the best results. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 109, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 968, |
|
"end": 975, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We inspected the tweets of one fold that were misclassified by the Mazajak/SVM model (36 false positives/121 false negatives) to determine the most common errors. They were as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Four false positive types:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "\u2022 Gloating: ex. (\"yA hbydp\" -\"O you delusional\") referring to fans of rival sports team for thinking they could win.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "\u2022 Quoting: ex.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "(\"lmA Hd ysb wyqwl yA klb\" -\"when someone swears and says: O dog\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "\u2022 Idioms: ex. (\"yA fATr rmDAn yA xAsr dynk\" -\"o you who does not fast Ramadan, you have lost your faith\"), which is a colloquial idiom.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "\u2022 Implicit Sarcasm: ex.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "(\"yA xAyn Ant EAwz t$kk fy Hb Al$Eb llrys\" -\"O traitor, (you) want to question people's love for the president \") where the author is mocking the president's popularity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Two false negative types:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "\u2022 Mixture of offensiveness and admiration: ex. calling a girl a puppy (\"yA klbwbp\" -\"O puppy\") in a flirtatious manner.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "\u2022 Implicit offensiveness:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "ex. calling for cure while implying sanity:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "(\"wt$fy HkAm bldk mn AlmrD\" -\"and cure rulers of your country from illness\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "In this paper we presented a systematic method for building an Arabic offensive language tweet dataset that does not favor specific dialects, topics, or genres. We developed detailed guidelines for tagging the tweets as clean or offensive, including special tags for vulgar tweets and hate speech. We tagged 10,000 tweets, which we plan to release publicly and would constitute the largest available Arabic offensive language dataset. We characterized the offensive tweets in the dataset to determine the topics that illicit such language, the dialects that are most often used, the common modes of offensiveness, and the gender distribution of their authors. We performed this breakdown for offensive tweets in general and for vulgar and hate speech tweets separately. We believe that this is the first detailed analysis of its kind. Lastly, we conducted a large battery of experiments on the dataset, using crossvalidation, to establish a strong system for Arabic offensive language detection. We showed that using an Arabic specific BERT model (AraBERT) and static embeddings trained on tweets produced competitive results on the dataset. For future work, we plan to pursue several directions. First, we want explore target specific offensive language, where attacks against an entity or a group may employ certain expressions that are only offensive within the context of that target and completely innocuous otherwise. Second, we plan to examine the effectiveness of cross dialectal and cross lingual learning of offensive language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Arabic words are provided along with their Buckwalter", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://pytorch.org/ 3 https://github.com/huggingface/ transformers", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Farasa: A fast and furious segmenter for arabic", |
|
"authors": [ |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Abdelali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Darwish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadir", |
|
"middle": [], |
|
"last": "Durrani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 conference of the North American chapter of the association for computational linguistics: Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ahmed Abdelali, Kareem Darwish, Nadir Durrani, and Hamdy Mubarak. 2016. Farasa: A fast and furious segmenter for arabic. In Proceedings of the 2016 conference of the North American chapter of the as- sociation for computational linguistics: Demonstra- tions, pages 11-16.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Detecting Abusive Arabic Language Twitter Accounts Using a Multidimensional Analysis Model", |
|
"authors": [ |
|
{ |
|
"first": "Ehab", |
|
"middle": [], |
|
"last": "Abozinadah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ehab Abozinadah. 2017. Detecting Abusive Arabic Language Twitter Accounts Using a Multidimen- sional Analysis Model. Ph.D. thesis, George Mason University.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Mazajak: An online Arabic sentiment analyser", |
|
"authors": [ |
|
{ |
|
"first": "Ibrahim", |
|
"middle": [], |
|
"last": "Abu Farha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walid", |
|
"middle": [], |
|
"last": "Magdy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "192--198", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-4621" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ibrahim Abu Farha and Walid Magdy. 2019. Mazajak: An online Arabic sentiment analyser. In Proceed- ings of the Fourth Arabic Natural Language Process- ing Workshop, pages 192-198, Florence, Italy. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Deep learning for detecting cyberbullying across multiple social media platforms", |
|
"authors": [ |
|
{ |
|
"first": "Sweta", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Awekar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "European Conference on Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "141--153", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sweta Agrawal and Amit Awekar. 2018. Deep learn- ing for detecting cyberbullying across multiple so- cial media platforms. In European Conference on Information Retrieval, pages 141-153. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Towards accurate detection of offensive language in online communication in arabic", |
|
"authors": [ |
|
{ |
|
"first": "Azalden", |
|
"middle": [], |
|
"last": "Alakrot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liam", |
|
"middle": [], |
|
"last": "Murray", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikola S", |
|
"middle": [], |
|
"last": "Nikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Procedia computer science", |
|
"volume": "142", |
|
"issue": "", |
|
"pages": "315--320", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Azalden Alakrot, Liam Murray, and Nikola S Nikolov. 2018. Towards accurate detection of offensive lan- guage in online communication in arabic. Procedia computer science, 142:315-320.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Are they our brothers? analysis and detection of religious hate speech in the arabic twittersphere", |
|
"authors": [ |
|
{ |
|
"first": "Nuha", |
|
"middle": [], |
|
"last": "Albadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maram", |
|
"middle": [], |
|
"last": "Kurdi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shivakant", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "69--76", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nuha Albadi, Maram Kurdi, and Shivakant Mishra. 2018. Are they our brothers? analysis and detec- tion of religious hate speech in the arabic twitter- sphere. In 2018 IEEE/ACM International Confer- ence on Advances in Social Networks Analysis and Mining (ASONAM), pages 69-76. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Arabert: Transformer-based model for arabic language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Wissam", |
|
"middle": [], |
|
"last": "Antoun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fady", |
|
"middle": [], |
|
"last": "Baly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hazem", |
|
"middle": [], |
|
"last": "Hajj", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 4th Workshop on Open-Source Arabic Corpora and Processing Tools, with a Shared Task on Offensive Language Detection", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wissam Antoun, Fady Baly, and Hazem Hajj. 2020. Arabert: Transformer-based model for arabic lan- guage understanding. In Proceedings of the 4th Workshop on Open-Source Arabic Corpora and Pro- cessing Tools, with a Shared Task on Offensive Lan- guage Detection, pages 9-15.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Deep learning for hate speech detection in tweets", |
|
"authors": [ |
|
{ |
|
"first": "Pinkesh", |
|
"middle": [], |
|
"last": "Badjatiya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashank", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manish", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vasudeva", |
|
"middle": [], |
|
"last": "Varma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 26th International Conference on World Wide Web Companion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "759--760", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pinkesh Badjatiya, Shashank Gupta, Manish Gupta, and Vasudeva Varma. 2017. Deep learning for hate speech detection in tweets. In Proceedings of the 26th International Conference on World Wide Web Companion, pages 759-760. International World Wide Web Conferences Steering Committee.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Follow your ideology: Measuring media ideology on social networks", |
|
"authors": [ |
|
{ |
|
"first": "Pablo", |
|
"middle": [], |
|
"last": "Barber\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gaurav", |
|
"middle": [], |
|
"last": "Sood", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Annual Meeting of the European Political Science Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pablo Barber\u00e1 and Gaurav Sood. 2015. Follow your ideology: Measuring media ideology on social net- works. In Annual Meeting of the European Political Science Association, Vienna, Austria. Retrieved from http://www. gsood. com/research/papers/mediabias. pdf.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "135--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Early warning signals for war in the news", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Chadefaux", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of Peace Research", |
|
"volume": "51", |
|
"issue": "1", |
|
"pages": "5--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Chadefaux. 2014. Early warning signals for war in the news. Journal of Peace Research, 51(1):5-18.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Seminar users in the arabic twitter sphere", |
|
"authors": [ |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Darwish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitar", |
|
"middle": [], |
|
"last": "Alexandrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yelena", |
|
"middle": [], |
|
"last": "Mejova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Social Informatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "91--108", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kareem Darwish, Dimitar Alexandrov, Preslav Nakov, and Yelena Mejova. 2017. Seminar users in the ara- bic twitter sphere. In International Conference on Social Informatics, pages 91-108. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Automated hate speech detection and the problem of offensive language", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Davidson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dana", |
|
"middle": [], |
|
"last": "Warmsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Macy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ingmar", |
|
"middle": [], |
|
"last": "Weber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Eleventh International Conference on Web and Social Media (ICWSM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "512--515", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Davidson, Dana Warmsley, Michael Macy, and Ingmar Weber. 2017. Automated hate speech detection and the problem of offensive language. In Eleventh International Conference on Web and So- cial Media (ICWSM), pages 512-515.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Hate speech detection with comment embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Nemanja", |
|
"middle": [], |
|
"last": "Djuric", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Morris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihajlo", |
|
"middle": [], |
|
"last": "Grbovic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladan", |
|
"middle": [], |
|
"last": "Radosavljevic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Narayan", |
|
"middle": [], |
|
"last": "Bhamidipati", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 24th international conference on world wide web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "29--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nemanja Djuric, Jing Zhou, Robin Morris, Mihajlo Gr- bovic, Vladan Radosavljevic, and Narayan Bhamidi- pati. 2015. Hate speech detection with comment em- beddings. In Proceedings of the 24th international conference on world wide web, pages 29-30. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "NileULex: A phrase and word level sentiment lexicon for Egyptian and modern standard Arabic", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samhaa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "El-Beltagy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2900--2905", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samhaa R. El-Beltagy. 2016. NileULex: A phrase and word level sentiment lexicon for Egyptian and mod- ern standard Arabic. In Proceedings of the Tenth In- ternational Conference on Language Resources and Evaluation (LREC'16), pages 2900-2905, Portoro\u017e, Slovenia. European Language Resources Associa- tion (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Measuring nominal scale agreement among many raters", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Joseph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fleiss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1971, |
|
"venue": "Psychological bulletin", |
|
"volume": "76", |
|
"issue": "5", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph L Fleiss. 1971. Measuring nominal scale agree- ment among many raters. Psychological bulletin, 76(5):378.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Universal language model fine-tuning for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Howard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1031" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), Melbourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "The pragmatics of swearing", |
|
"authors": [ |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Jay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristin", |
|
"middle": [], |
|
"last": "Janschewitz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Journal of Politeness Research. Language, Behaviour", |
|
"volume": "4", |
|
"issue": "2", |
|
"pages": "267--288", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy Jay and Kristin Janschewitz. 2008. The prag- matics of swearing. Journal of Politeness Research. Language, Behaviour, Culture, 4(2):267-288.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Bag of tricks for efficient text classification", |
|
"authors": [ |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "427--431", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2017. Bag of tricks for efficient text classification. In Proceedings of the 15th Con- ference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Pa- pers, pages 427-431. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Locate the hate: Detecting tweets against blacks", |
|
"authors": [ |
|
{ |
|
"first": "Irene", |
|
"middle": [], |
|
"last": "Kwok", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuzhou", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Twenty-seventh AAAI conference on artificial intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Irene Kwok and Yuzhou Wang. 2013. Locate the hate: Detecting tweets against blacks. In Twenty-seventh AAAI conference on artificial intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Race: Large-scale reading comprehension dataset from examinations", |
|
"authors": [ |
|
{ |
|
"first": "Guokun", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qizhe", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanxiao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guokun Lai, Qizhe Xie, Hanxiao Liu, Yiming Yang, and Eduard Hovy. 2017. Race: Large-scale reading comprehension dataset from examinations.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Mining offensive language on social media", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Maisto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serena", |
|
"middle": [], |
|
"last": "Pelosi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simonetta", |
|
"middle": [], |
|
"last": "Vietri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierluigi", |
|
"middle": [], |
|
"last": "Vitale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Via Giovanni Paolo", |
|
"middle": [], |
|
"last": "Ii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "CLiCit", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandro Maisto, Serena Pelosi, Simonetta Vietri, Pierluigi Vitale, and Via Giovanni Paolo II. 2017. Mining offensive language on social media. CLiC- it 2017 11-12 December 2017, Rome, page 252.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Detecting hate speech in social media", |
|
"authors": [ |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1712.06427" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shervin Malmasi and Marcos Zampieri. 2017. De- tecting hate speech in social media. arXiv preprint arXiv:1712.06427.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "The pervasiveness of slang in standard and non-standard english", |
|
"authors": [ |
|
{ |
|
"first": "Elisa", |
|
"middle": [], |
|
"last": "Mattiello", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Mots Palabras Words", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "7--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elisa Mattiello. 2005. The pervasiveness of slang in standard and non-standard english. Mots Palabras Words, 5:7-41.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Aravec: A set of arabic word embedding models for use in arabic nlp", |
|
"authors": [ |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Abu Bakr Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samhaa", |
|
"middle": [], |
|
"last": "Eissa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "El-Beltagy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Procedia Computer Science", |
|
"volume": "117", |
|
"issue": "", |
|
"pages": "256--265", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.procs.2017.10.117" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abu Bakr Mohammad, Kareem Eissa, and Samhaa El- Beltagy. 2017. Aravec: A set of arabic word embed- ding models for use in arabic nlp. Procedia Com- puter Science, 117:256-265.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Arabic offensive language classification on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Darwish", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Social Informatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "269--276", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamdy Mubarak and Kareem Darwish. 2019. Arabic offensive language classification on twitter. In In- ternational Conference on Social Informatics, pages 269-276. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Abusive language detection on arabic social media", |
|
"authors": [ |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Darwish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walid", |
|
"middle": [], |
|
"last": "Magdy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the First Workshop on Abusive Language Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "52--56", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamdy Mubarak, Kareem Darwish, and Walid Magdy. 2017. Abusive language detection on arabic social media. In Proceedings of the First Workshop on Abu- sive Language Online, pages 52-56.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Abusive language detection in online user content", |
|
"authors": [ |
|
{ |
|
"first": "Chikashi", |
|
"middle": [], |
|
"last": "Nobata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Tetreault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Achint", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yashar", |
|
"middle": [], |
|
"last": "Mehdad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 25th international conference on world wide web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "145--153", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chikashi Nobata, Joel Tetreault, Achint Thomas, Yashar Mehdad, and Yi Chang. 2016. Abusive lan- guage detection in online user content. In Proceed- ings of the 25th international conference on world wide web, pages 145-153. International World Wide Web Conferences Steering Committee.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Improving language understanding by generative pre-training", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Salimans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language understanding by generative pre-training. URL https://s3-us-west-2. amazonaws. com/openai- assets/researchcovers/languageunsupervised/language understanding paper. pdf.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Squad: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100,000+ questions for machine comprehension of text.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Learning from relatives: unified dialectal arabic segmentation", |
|
"authors": [], |
|
"year": 2017, |
|
"venue": "Proceedings of the 21st Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "432--441", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Younes Samih, Mohamed Eldesouki, Mohammed At- tia, Kareem Darwish, Ahmed Abdelali, Hamdy Mubarak, and Laura Kallmeyer. 2017. Learning from relatives: unified dialectal arabic segmentation. In Proceedings of the 21st Conference on Compu- tational Natural Language Learning (CoNLL 2017), pages 432-441.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Glue: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2018. Glue: A multi-task benchmark and analysis platform for natural language understanding.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Hateful symbols or hateful people? predictive features for hate speech detection on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Zeerak", |
|
"middle": [], |
|
"last": "Waseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the NAACL student research workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "88--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeerak Waseem and Dirk Hovy. 2016. Hateful sym- bols or hateful people? predictive features for hate speech detection on twitter. In Proceedings of the NAACL student research workshop, pages 88-93.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Detection of harassment on web 2.0. Proceedings of the Content Analysis in the WEB", |
|
"authors": [ |
|
{ |
|
"first": "Dawei", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenzhen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liangjie", |
|
"middle": [], |
|
"last": "Hong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Brian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "April", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lynne", |
|
"middle": [], |
|
"last": "Kontostathis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Edwards", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dawei Yin, Zhenzhen Xue, Liangjie Hong, Brian D Davison, April Kontostathis, and Lynne Edwards. 2009. Detection of harassment on web 2.0. Pro- ceedings of the Content Analysis in the WEB, 2:1-7.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Semeval-2019 task 6: Identifying and categorizing offensive language in social media (offenseval)", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noura", |
|
"middle": [], |
|
"last": "Farra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ritesh", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1903.08983" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Shervin Malmasi, Preslav Nakov, Sara Rosenthal, Noura Farra, and Ritesh Kumar. 2019. Semeval-2019 task 6: Identifying and cate- gorizing offensive language in social media (offen- seval). arXiv preprint arXiv:1903.08983.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Zeses Pitenis, and \u00c7 agr\u0131 \u00c7\u00f6ltekin. 2020. Semeval-2020 task 12: Multilingual offensive language identification in social media (offenseval 2020)", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pepa", |
|
"middle": [], |
|
"last": "Atanasova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgi", |
|
"middle": [], |
|
"last": "Karadzhov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Derczynski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2006.07235" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Preslav Nakov, Sara Rosenthal, Pepa Atanasova, Georgi Karadzhov, Hamdy Mubarak, Leon Derczynski, Zeses Pitenis, and \u00c7 agr\u0131 \u00c7\u00f6ltekin. 2020. Semeval-2020 task 12: Multilingual offensive language identification in social media (offenseval 2020). arXiv preprint arXiv:2006.07235.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Topic distribution for offensive language and its sub-categories Figure 2: Dialect distribution for offensive language and its sub-categories animals were", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Distribution of Hate Speech Types. Note: A tweet may have more than one type.", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Tag cloud for words with top valence score among offensive class, e.g. name calling (animals), curses, insults, etc.", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"text": "Distribution of offensive and clean tweets." |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"4\">: Classification performance with different fea-</td></tr><tr><td>tures and models.</td><td/><td/></tr><tr><td>Model</td><td colspan=\"2\">Prec. Recall</td><td>F1</td></tr><tr><td>Decision Tree</td><td>51.2</td><td colspan=\"2\">53.8 52.4</td></tr><tr><td>Random Forest</td><td>82.4</td><td colspan=\"2\">42.4 56.0</td></tr><tr><td>Gaussian NB</td><td>44.9</td><td colspan=\"2\">86.0 59.0</td></tr><tr><td>Perceptron</td><td>75.6</td><td colspan=\"2\">67.7 66.8</td></tr><tr><td>AdaBoost</td><td>74.3</td><td colspan=\"2\">67.0 70.4</td></tr><tr><td>Gradient Boosting</td><td>84.2</td><td colspan=\"2\">63.0 72.1</td></tr><tr><td colspan=\"2\">Logistic Regression 84.7</td><td colspan=\"2\">69.5 76.3</td></tr><tr><td>SVM</td><td>88.6</td><td colspan=\"2\">72.4 79.7</td></tr></table>", |
|
"num": null, |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"text": "Performance of different classification models on Mazajak embeddings." |
|
} |
|
} |
|
} |
|
} |