|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:12:11.145705Z" |
|
}, |
|
"title": "hBert + BiasCorp -Fighting Racism on the Web", |
|
"authors": [ |
|
{ |
|
"first": "Olawale", |
|
"middle": [], |
|
"last": "Onabola", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Montreal Institute for Learning Algorithms (Mila)", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Zhuang", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "CMU", |
|
"institution": "Carnegie Mellon University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Akera", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Montreal Institute for Learning Algorithms (Mila)", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Abdulrahman", |
|
"middle": [], |
|
"last": "Ibraheem", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Montreal Institute for Learning Algorithms (Mila)", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jia", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Toronto", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Dianbo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Montreal Institute for Learning Algorithms (Mila)", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Montreal Institute for Learning Algorithms (Mila)", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Subtle and overt racism is still present both in physical and online communities today and has impacted many lives in different segments of the society. In this short piece of work, we present how we're tackling this societal issue with Natural Language Processing. We are releasing BiasCorp 12 , a dataset containing 139,090 comments and news segment from three specific sources-Fox News, BreitbartNews and YouTube. The first batch (45,000 manually annotated) is ready for publication. We are currently in the final phase of manually labelling the remaining dataset using Amazon Mechanical Turk. BERT has been used widely in several downstream tasks. In this work, we present hBERT, where we modify certain layers of the pretrained BERT model with the new Hopfield Layer. hBert generalizes well across different distributions with the added advantage of a reduced model complexity. We are also releasing a JavaScript library 3 and a Chrome Extension Application 4 , to help developers make use of our trained model in web applications (say chat application) and for users to identify and report racially biased contents on the web respectively.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Subtle and overt racism is still present both in physical and online communities today and has impacted many lives in different segments of the society. In this short piece of work, we present how we're tackling this societal issue with Natural Language Processing. We are releasing BiasCorp 12 , a dataset containing 139,090 comments and news segment from three specific sources-Fox News, BreitbartNews and YouTube. The first batch (45,000 manually annotated) is ready for publication. We are currently in the final phase of manually labelling the remaining dataset using Amazon Mechanical Turk. BERT has been used widely in several downstream tasks. In this work, we present hBERT, where we modify certain layers of the pretrained BERT model with the new Hopfield Layer. hBert generalizes well across different distributions with the added advantage of a reduced model complexity. We are also releasing a JavaScript library 3 and a Chrome Extension Application 4 , to help developers make use of our trained model in web applications (say chat application) and for users to identify and report racially biased contents on the web respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The internet has evolved to become one of the main sources of textual information for many people. Through social media, reviews, and comment sections across the internet, people are continuously consuming information through text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "With this, racially biased content has become more entrenched within the language of the internet. Racially biased content in this context refers to the attitudes or stereotypes expressed against marginalized races. This is often as a result of implicit bias resulting into hate speech. In this work, we attempt to automatically detect this racially biased content from data collected from the web, including comments from online news outlets such as Fox News and and comments from YouTube videos. We label this dataset with pointers to racial bias and use machine learning techniques to automate this task. Specifically, we implement BERT as a base model to do this. We also implement a browser extension as a tool to help people identify racially biased content in the information they are consuming. We will also be releasing our curated dataset -BiasCorp to allow more research to be done in this direction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One of the earliest papers to investigate machine learning approaches for the automatic detection of racially-biased online content is (Greevy and Smeaton, 2004) . The paper identified the potential use of bag-of-words, n-grams, and distributions of parts-of-speech tags as features for the task. Their bag-of-words features are informed by ideas from the field of information retrieval, and involve either word frequencies or counts of word occurrences. Using an SVM classifier, for bag-of-words features, they found that the use of frequency of words, rather than number of occurrence of words, yielded greater classification accuracies. The n-grams and parts-of-speech tags techniques were unavailable as of the time of their writing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 161, |
|
"text": "(Greevy and Smeaton, 2004)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In (Warner and Hirschberg, 2012) , authors followed the definition of (Nockleby and John, 2000) by defining hate speech as \"any communication that disparages a person or a group on the basis of some characteristic such as race, color, ethnicity, gender, sexual orientation, nationality, religion, or other characteristic.\" Their work focused more on detecting anti-Semitic hate speech. For their work, they created a dataset containing hate speech obtained from Yahoo! and the American Jewish Congress. Following the work of (Yarowsky, 1994) , they employed hand-crafted template-based features. Apart from the fact that these features are hand-engineered, a potential drawback is their sheer size: a total of 3,537 features, which is prone to the curse of dimensionality. A counter-intuitive result reported by the paper is that the uni-gram features contributed best to classification accuracies. They used linear-kernel SVMs for classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 32, |
|
"text": "(Warner and Hirschberg, 2012)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 70, |
|
"end": 95, |
|
"text": "(Nockleby and John, 2000)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 525, |
|
"end": 541, |
|
"text": "(Yarowsky, 1994)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The work of (C. et al., 2016) dealt with the broad category of abusive language. Authors of the work gave definitions for distinguishing between three categories of abusive language: hate speech which subsumes racial bias, derogatory remarks and profanity. Further, they described reasons why automatic detection of abusive language, which subsumes racial bias, is difficult. Reasons include: clever evasion of detection engines by users via the use of mischievous permutations of words (e.g. Niggah written as Ni99ah); evolution of ethnic slurs with time; role of cultural context in the perception and interpretation of slurs, as a phrase that is considered derogative in one culture might be perfectly neutral in another culture. Towards building their classification model, they employed four categories of features namely, n-grams, lexical features, syntactic/parser features, and word-level as well as comment-level embeddings. They found that character-level n-grams gave the highest contribution to the model's accuracy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 29, |
|
"text": "(C. et al., 2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The authors of (Burnap and Williams, 2016) also developed techniques for detecting multiple hate speech categories including the racially-based category. Towards creating their datasets, they harnessed hate speech event-triggers. For example, to create their racial bias dataset, they collected tweets in a two-week interval following the reelection of Barrack Obama as U.S president. They explored a number of potential features towards building their classification algorithm: bag of words, lexicon of hateful terms, and typed dependencies. In addition, they experimented into classification via SVMs versus classification via random forests, and reported that the former yielded superior performance over the latter. Also, they compared the use of classifiers trained for each hate speech category against the use of a single classifier trained on data spanning all categories. As expected, the specialized classifiers outperformed their multi-category counterpart. (Hasanuzzaman et al., 2017) followed the definition of (Gelber and Stone, 2007) , which states that hate speech is: \"speech or expression which is capable of instilling or inciting hatred of, or prejudice towards, a person or group of people on a specified ground, including race, nationality, ethnicity, country of origin, ethno-religious identity, religion, sexuality, gender identity or gender.\" The main research thrust of their work was to apply demographic embeddings (Bamman et al., 2014) , (Hovy, 2015) , for the task of racial bias detection in tweets. Compared to other works such as (Burnap and Williams, 2016) , for instance, a particularly distinguishing result of (Hasanuzzaman et al., 2017) is how their data extraction procedure is able to arrive at a better balanced ratio of racially-biased to non-racially-biased comments. For example, in the work, 40.58 percent of Canadian tweets were judged racially-biased by human annotators, whereas in (Burnap and Williams, 2016) only about 3.73 percent of the comments in the dataset are racially biased. Classification results using an SVM classifier revealed benefits of their proposed demographic embeddings over traditional features and embeddings. In (Saleh et al., 2020) , the authors explored the detection of hate speech in White supremacist forums. They explored BiLSTM, logistic regression and BERT for their task. Also, they compared the use of domain-agnostic pretrained word embedding (such as GloVe.6B.300d ) versus the use of a domain-aware 300-dimensional word2vec embedding trained on the specific dataset used in the work. Results showed that BERT yields better results than both logistic regression and BiLSTM. Further, results proved the domainaware embeddings to be superior to the pre-trained embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 42, |
|
"text": "(Burnap and Williams, 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 969, |
|
"end": 996, |
|
"text": "(Hasanuzzaman et al., 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1024, |
|
"end": 1048, |
|
"text": "(Gelber and Stone, 2007)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1443, |
|
"end": 1464, |
|
"text": "(Bamman et al., 2014)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1467, |
|
"end": 1479, |
|
"text": "(Hovy, 2015)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1563, |
|
"end": 1590, |
|
"text": "(Burnap and Williams, 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1647, |
|
"end": 1674, |
|
"text": "(Hasanuzzaman et al., 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1930, |
|
"end": 1957, |
|
"text": "(Burnap and Williams, 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 2185, |
|
"end": 2205, |
|
"text": "(Saleh et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The datasets used for training were obtained from discussion channels of online news media by programmed web crawler based on Scrapy framework with all crawled data stored in PostgreSQL database. Since existing comments of online article were generally loaded by asynchronous API accessed by a specific key hidden in the articles before presenting them on website, the web crawler parsed keys for each article after completing a list with URLs of all articles waiting to be further crawled and then matched the keys with their corresponding API to retrieved stored comments for each article.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data curation and processing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "First, sentences containing neural racial words from a curated list were selected. Second, the sentiment score of each comment was calculated according to two lookup tables: a combined and augmented (Jockers, 2015) and Rinker's augmented Hu and Liu (Tyler Rinker, 2016 ) (Hu and Liu, 2004) positive/negative word list as sentiment lookup values, and a racial-related English lookup table from Hatebase 5 . To guarantee these two tables influence the sentiment score consistently, the lookup values of the Hatebase table were adjusted by percentage. Then we extracted the data with bottom 20 percent of the sentiment score, and matched them up with other randomly selected comments appearing under the same articles or videos as random control. Finally, equal numbers of random controls are added into the data set, to ensure that approximately half of the data is racially discriminatory.", |
|
"cite_spans": [ |
|
{ |
|
"start": 199, |
|
"end": 214, |
|
"text": "(Jockers, 2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 238, |
|
"end": 268, |
|
"text": "Hu and Liu (Tyler Rinker, 2016", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 289, |
|
"text": "(Hu and Liu, 2004)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data curation and processing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Attention-based Transformer network (Vaswani et al., 2017) has been used widely across different natural language processing tasks. Based on the previous successes of the transformer network, we decided to use the BERT Architecture (Devlin et al., 2019 ) as our base model. Unlike previous variant of the attention-based language models such as (Radford et al., 2018) , BERT learns to jointly conditions on the right and left context of the input representation at all the layers by randomly masking out segments of the input token. This is particularly useful for extracting contextual information from the input representation, and it's very applicable to our use case. We aim to build a variant of the model that can generalize sufficiently well across different data distributions 6 . The notion of sufficiency is evaluated by training, validating and testing our model on data across the different sources. We fine-tune the pretrained BERT model on our curated dataset rather than training from scratch (this choice was based on empirical results). We are releasing a JavaScript library for developers to use our pretrained model in front facing applications such as chat app, to flag down racially biased comments. Consequently, we need to optimize for the model complexity without sacrificing performance gain. BERT has a huge number of parameters / large model size. Other methods have been employed to reduce the complexity without hurting the performance, such as knowledge distillation (Sanh et al., 2019) and quantization (Zafrir et al., 2019) . It has also been proven that pruning the weights of the pretrained model do not necessarily affect the model performance, within acceptable 'thresholds' (Gordon et al., 2020) . In a similar fashion, we aim to reduce the complexity of BERT without sacrificing performance by replacing certain layers with the Hopfield layer (Ramsauer et al., 2020) . Hopfield layer can be used to replace the attentionbased layer of the BERT model; as it has been shown to approximate the functionality of the attention mechanism with a new Energy update rule (modified version of the Hopfield network extended to continuous state representation). The learning dynamics of BERT as shown in (Ramsauer et al., 2020) shows that the attention heads in the higher layers are mostly responsible for extracting task-specific features from the input representation. We replaced the self-attention mechanism in the last X layers of the pretrained BERT model with a Hopfield layer, where X is an hyperparameter. In a similar approach described in (Vaswani et al., 2017) , we use residual connection around the Hopfield sub-layer, followed by layer normalization (Ba et al., 2016) . It has been shown that residual connections help propagate positional information across layers. The replaced Hopfield layer drastically reduced the parameter size of our model. To further improve the performance of the model, we use the Hopfield Pooling layer which acts as both a permutation equivariant layer and pools generated embedding from the modified BERT model. The Hopfield pooling layer also acts as a form of memory to store the hidden state of the last layer in the modified BERT model. Finally, we add a classification layer on top of the pooling layer for the task in question.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 58, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 252, |
|
"text": "(Devlin et al., 2019", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 345, |
|
"end": 367, |
|
"text": "(Radford et al., 2018)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1497, |
|
"end": 1516, |
|
"text": "(Sanh et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1534, |
|
"end": 1555, |
|
"text": "(Zafrir et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1711, |
|
"end": 1732, |
|
"text": "(Gordon et al., 2020)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1881, |
|
"end": 1904, |
|
"text": "(Ramsauer et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2230, |
|
"end": 2253, |
|
"text": "(Ramsauer et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2577, |
|
"end": 2599, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2692, |
|
"end": 2709, |
|
"text": "(Ba et al., 2016)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Given the disparity between the annotators for each sample in our dataset, averaging the labels with the confidence scores as weights might be noisy. We computed the coefficient of variation CV among annotators for each sample in our dataset. Using the recommended (JUDICE et al., 1999) (Veit et al., 2017) CV of 0.2 for the bias scores would imply dropping 90% of the dataset as seen in 2. In order to fully utilize the dataset and effectively manage the disparity between the annotators, we formulate a loss function L model given by", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 286, |
|
"text": "(JUDICE et al., 1999)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 287, |
|
"end": 306, |
|
"text": "(Veit et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "L model = 1/N N i=1 CE p x i , q x i (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where CE p x i , q x i is the cross entropy between p(x i ) and q(x i ) for the ith sample, and N is the size of the dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "CE(p, q) = \u2212 c i=1 p c (x) log( + q c (x)) (2) q c (x)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "is the predicted probability of sample x in class c, equivalently, the output probabilities from the model and is for numerical stability. p c (x) is the probability of sample x in class c, equivalently, p c (x) is a c \u2212 length vector with entries such that c i=1 p c (x) = 1. The entries of p c (x) are the normalized confidence scores of the annotators with indices given by the respective voted classes. As an example, following the algorithm described in 1, for a given sample shown in figure 1; the bias scores of the 3 different annotators with their confidence level is represented with an array of tuples, X where each tuple, (b i , s i ) is the bias score b i with the associated confidence score, s i by annotator i. To calculate p c (x), we first normalize the confidence scores across the 3 different annotators such that 3 i=1 s i = 1. The resulting p c (x) for the entry, S, shown in 1 is X = (4, 4), (3, 3), (2, 5) 0., 0., 0.4167, 0.25, 0.3333, 0.] ", |
|
"cite_spans": [ |
|
{ |
|
"start": 930, |
|
"end": 963, |
|
"text": "0., 0., 0.4167, 0.25, 0.3333, 0.]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "X norm = (4, 0.3333), (3, 0.25), (2, 0.4167) p c (X) = [", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Training", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We evaluate the model performance across the validation and test set, given that they are from Step 1:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Task and Metrics", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Initialize p c \u2190 [ .0 for in C]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Task and Metrics", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Step 2:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Task and Metrics", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Calculate normalizing constant K K \u2190 N i=1 s i ; Step 3: Set the values of p c for i in N do class index \u2190 t[i]; p c [class index] + \u2190 \u2212 s[i] K ; end", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Task and Metrics", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "different distributions or sources. The test set contains only comments from YouTube while the validation set was randomly sampled from Fox News and BreitbartNews. The particular choices were due to the fact that the first batch of the dataset used for training contained very relatively few samples from YouTube. We evaluate our approach using two methods; multiclass classification and multiclass-multilabel classification. Using the multiclass approach, for a given sample, k and using the method described previously in calculating the target class, the class with the maximum confidence score was used as the target. We calculate the average precision for each class, AP c and the mean average precision M AP averaged over the entire dataset with size N along the class dimension d as described in (Veit et al., 2017) AP c = The useHopfieldPool variable denotes whether the Hopfield Pooling layer was used The lr, pool num heads, num hf layers, val loss epoch variables in the graph are the learning rate, the number of heads in the Hopfield Pooling Layer (if used), the number of Hopfield Layer and the validation loss respectively", |
|
"cite_spans": [ |
|
{ |
|
"start": 803, |
|
"end": 822, |
|
"text": "(Veit et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Task and Metrics", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": ". With a reduced model complexity, the hBert performs relatively as good as the baseline where P recision(k, c) is the precision for class c for the kth sample and rel(k, c) is an indicator function that is 1 if the predicted and the target class for sample k is positive. We also report the topK accuracy, for k = [1, 3] since we had a max of 3 annotators for each k. Using the multilabel approach, for a given sample, k and using the method described previously in calculating the target class, we take the top k classes as the target classes. We do the same for the predictions (obtained after passing the output logits through a softmax function). We compute the AP c (for each class), mAP, F 1 score, and IoU", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Task and Metrics", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We run a multi-objective hyperparameter search (using Optuna (Akiba et al., 2019) ) optimizing for the following parameters: validation loss, FLOPs (indicative of the model complexity and ultimately the inference time), mAP on the validation and test set, and the Intersection over Union IoU scores (also known as the Jaccard Index) for the topk for k = [1, 3] transformations described above. We use 4 NVidia V100SXM2 (16G memory) GPUs on a single node, with batch size of 32. We reduced the batch size (instead of say 64) because we had to run multiple trials and to avoid the notorious OOM error. For each model configuration, we run 10 trials with 5 epochs each. As seen in 3, the hBert perform relatively better with a reduced model complexity. In 1, the models predictions were more accurate for an increasing k. The hBert perform better than the Baseline for the Top1 accuracy. The F1 scores and Jaccard Index (IoU) for the hBert were relatively higher for k = [1, 3]. The mAP , which is the average of the AP c over the classes, is relatively low because of the low performing classes as seen in 2", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 81, |
|
"text": "(Akiba et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Details & Result", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The data set contains 139,090 rows, and 67.70 percent of their sentiment scores are negative. Their average sentiment score is -0.1422, and the median value is -0.1203, ranging from -3.6206 to 2.1414. 66,998 of them are comments from Fox News, with an average sentiment score of -0.0997 and a median of -0.0884, ranging from -2.8591 to 2.1414. And 63,948 of the data are comments from Breitbart News, with an average sentiment score of -0.1760 and a median of -0.1721, ranging from -3.6206 to 1.3576. And 8,144 of the data are comments from YouTube, with an average sentiment score of -0.2259 and a median of -0.2694, ranging from -3.3000 to 1.4673. In this work, we used the first batch of the dataset; which have been manually annotated using Amazon Mechanical Turk. After pre-processing the input text (removing irrelevant tokens such as mentions), the maximum length was 478 (it was 623 before preprocessing).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data statistics", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In this work we have shown a way to detect racial bias in text. We experimented with a BERT-based model as we aim to reduce model complexity without sacrificing much of the performance. We also discussed the BiasCorp, a manually labelled dataset containing racially biased comments from Fox News, BreitbartNews and YouTube. To enable developers make use of our pretrained hBERT model, we are releasing a Javascript Library, optimized for inference on the edge. A Chrome Extension will also be available for users to help report and identify racially bias text on the web. We also plan to extend this work to other forms of biases such as Gender. In a future work, we plan to further reduce the model complexity by using Gaussian Kernel as described in (Ramsauer et al., 2020) and other quantization tricks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 752, |
|
"end": 775, |
|
"text": "(Ramsauer et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://hatebase.org/ 6 distributions here implies different use cases or data environments/sources", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research was enabled in part by support provided by Calcul Qu\u00e9bec (www.calculquebec.ca) and Compute Canada (www.computecanada.ca)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Optuna: A next-generation hyperparameter optimization framework", |
|
"authors": [ |
|
{ |
|
"first": "Takuya", |
|
"middle": [], |
|
"last": "Akiba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shotaro", |
|
"middle": [], |
|
"last": "Sano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toshihiko", |
|
"middle": [], |
|
"last": "Yanase", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takeru", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masanori", |
|
"middle": [], |
|
"last": "Koyama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 25rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Takuya Akiba, Shotaro Sano, Toshihiko Yanase, Takeru Ohta, and Masanori Koyama. 2019. Optuna: A next-generation hyperparameter optimization framework. In Proceedings of the 25rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Layer normalization", |
|
"authors": [ |
|
{ |
|
"first": "Jimmy", |
|
"middle": [ |
|
"Lei" |
|
], |
|
"last": "Ba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [ |
|
"Ryan" |
|
], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E. Hinton. 2016. Layer normalization.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Distributed representations of geographically situated language", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Bamman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "828--834", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Bamman, Chris Dyer, and Noah A. Smith. 2014. Distributed representations of geographically situated language. In Proceedings of the Association for Computational Linguistics, pages 828-834.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Us and them: identifying cyber hate on twitter across multiple protected characteristics", |
|
"authors": [ |
|
{ |
|
"first": "Pete", |
|
"middle": [], |
|
"last": "Burnap", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mathew", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "EPJ Data Science", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "1817--1853", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pete Burnap and Mathew Williams. 2016. Us and them: identifying cyber hate on twitter across multiple protected characteristics. EPJ Data Science, 5:1817-1853.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Abusive language detection in online user content", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Nobata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tetreault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Mehdad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chang", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 25th international conference on world wide web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "145--153", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nobata C., Tetreault J., Thomas A., Y. Mehdad, and Chang Y. 2016. Abusive language detection in online user content. In Proceedings of the 25th international conference on world wide web, pages 145-153.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Hate Speech and Freedom of Speech in Australia", |
|
"authors": [ |
|
{ |
|
"first": "Katharine", |
|
"middle": [], |
|
"last": "Gelber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrienne Sarah Ackary", |
|
"middle": [], |
|
"last": "Stone", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katharine Gelber and Adrienne Sarah Ackary Stone. 2007. Hate Speech and Freedom of Speech in Australia. Federation Press.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Compressing bert: Studying the effects of weight pruning on transfer learning", |
|
"authors": [ |
|
{ |
|
"first": "Mitchell", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Gordon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Duh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Andrews", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mitchell A. Gordon, Kevin Duh, and Nicholas Andrews. 2020. Compressing bert: Studying the effects of weight pruning on transfer learning.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Text categorisation of racist texts using a support vector machine", |
|
"authors": [ |
|
{ |
|
"first": "Edel", |
|
"middle": [], |
|
"last": "Greevy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Smeaton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "JADT 2004 : 7es Journ\u00e9es internationales d'Analyse statistique des Donn\u00e9es Textuelles", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "533--544", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edel Greevy and Alan Smeaton. 2004. Text categorisation of racist texts using a support vector machine. JADT 2004 : 7es Journ\u00e9es internationales d'Analyse statistique des Donn\u00e9es Textuelles, pages 533-544.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Demographic word embeddings for racism detection on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Mohammed", |
|
"middle": [], |
|
"last": "Hasanuzzaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gael", |
|
"middle": [], |
|
"last": "Dias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 8th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "926--936", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammed Hasanuzzaman, Gael Dias, and Andy Way. 2017. Demographic word embeddings for racism detection on twitter. In Proceedings of the 8th International Joint Conference on Natural Language Processing, pages 926-936.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Demographic factors improve classification performance", |
|
"authors": [ |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "752--762", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dirk Hovy. 2015. Demographic factors improve classification performance. In Proceedings of the Association for Computational Linguistics, pages 752-762.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Mining and summarizing customer reviews", |
|
"authors": [ |
|
{ |
|
"first": "Minqing", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "168--177", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1014052.1014073" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minqing Hu and Bing Liu. 2004. Mining and summarizing customer reviews. pages 168-177.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Syuzhet: Extract Sentiment and Plot Arcs from Text", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Jockers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew L. Jockers. 2015. Syuzhet: Extract Sentiment and Plot Arcs from Text.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Avalia\u00e7\u00e3o do coeficiente de varia\u00e7\u00e3o na experimenta\u00e7\u00e3o com su\u00ednos", |
|
"authors": [ |
|
{ |
|
"first": "Marcelo", |
|
"middle": [], |
|
"last": "Judice", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Augusto", |
|
"middle": [], |
|
"last": "Muniz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Carvalheiro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "MARCELO JUDICE, Muniz Augusto, and Roberto Carvalheiro. 1999. Avalia\u00e7\u00e3o do coeficiente de varia\u00e7\u00e3o na experimenta\u00e7\u00e3o com su\u00ednos. Ci\u00eancia e Agrotecnologia, 23.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Hate speech", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Nockleby", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Encyclopedia of the American Constitution", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1277--1279", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nockleby and John. 2000. Hate speech. In Encyclopedia of the American Constitution, pages 1277-1279.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Improving language understanding with unsupervised learning", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Narasimhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language understanding with unsupervised learning.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Detecting white supremacist hate speech using domain specific word embedding with deep learning and bert", |
|
"authors": [ |
|
{ |
|
"first": "Hind", |
|
"middle": [], |
|
"last": "Saleh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Areej", |
|
"middle": [], |
|
"last": "Alhothali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kawthar", |
|
"middle": [], |
|
"last": "Moria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Submission to Information Processing and Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "533--544", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hind Saleh, Areej Alhothali, and Kawthar Moria. 2020. Detecting white supremacist hate speech using domain specific word embedding with deep learning and bert. In Submission to Information Processing and Management, pages 533-544.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Distilbert, a distilled version of BERT: smaller, faster, cheaper and lighter", |
|
"authors": [ |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. Distilbert, a distilled version of BERT: smaller, faster, cheaper and lighter. CoRR, abs/1910.01108.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "sentimentr: Dictionary based sentiment analysis that considers valence shifters", |
|
"authors": [ |
|
{ |
|
"first": "Vitalie", |
|
"middle": [], |
|
"last": "Spinu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tyler", |
|
"middle": [], |
|
"last": "Rinker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vitalie Spinu Tyler Rinker. 2016. sentimentr: Dictionary based sentiment analysis that considers valence shifters.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Learning from noisy large-scale datasets with minimal supervision", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Veit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neil", |
|
"middle": [], |
|
"last": "Alldrin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gal", |
|
"middle": [], |
|
"last": "Chechik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Krasin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhinav", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Serge", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Belongie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas Veit, Neil Alldrin, Gal Chechik, Ivan Krasin, Abhinav Gupta, and Serge J. Belongie. 2017. Learning from noisy large-scale datasets with minimal supervision. CoRR, abs/1701.01619.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Detecting hate speech on the world wide web", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Warner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Hirschberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Second Workshop on Language in Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Warner and J. Hirschberg. 2012. Detecting hate speech on the world wide web. In Proceedings of the Second Workshop on Language in Social Media, pages 19-26.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Decision lists for lexical ambiguity resolution: Application to accent restoration in spanish and french", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Proceedings of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "88--95", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Yarowsky. 1994. Decision lists for lexical ambiguity resolution: Application to accent restoration in spanish and french. In Proceedings of the ACL, pages 88-95.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Q8BERT: quantized 8bit BERT. CoRR", |
|
"authors": [ |
|
{ |
|
"first": "Ofir", |
|
"middle": [], |
|
"last": "Zafrir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guy", |
|
"middle": [], |
|
"last": "Boudoukh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Izsak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Moshe", |
|
"middle": [], |
|
"last": "Wasserblat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ofir Zafrir, Guy Boudoukh, Peter Izsak, and Moshe Wasserblat. 2019. Q8BERT: quantized 8bit BERT. CoRR, abs/1910.06188.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Compute p c (x) for a sample x Result: p c (x) Input: An array of target scores t, and array of confidence scores s where s[i] is the confidence score by annotator i for choosing target score t[i] Both arrays are of equal length N where N is the number of annotators. C is the number of classes (equivalently the range/max of possible target scores if scores are integer.)", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Figure 1: Sample annotation", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Parallel Coordinate Graph for multiple runs/trials across model configurations The model configuration is the Baseline when the target variable (useHopfieldLayers in the graph is False.", |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>Model</td><td>0</td><td>1</td><td>2</td><td>AP 3</td><td>4</td><td>5</td></tr><tr><td colspan=\"7\">Baseline 0.2205 0.0967 0.1344 0.9564 0.1103 0.2340</td></tr><tr><td>hBert</td><td colspan=\"6\">0.1195 0.1111 0.2132 0.9607 0.5049 0.1914</td></tr></table>", |
|
"text": "Test Metrics for selected trial for each model configuration", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "The Average Precision (AP) for the different classes", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |