|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:06:13.623047Z" |
|
}, |
|
"title": "DeTox at GermEval 2021: Toxic Comment Classification", |
|
"authors": [ |
|
{ |
|
"first": "Mina", |
|
"middle": [], |
|
"last": "Sch\u00fctz", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Darmstadt University of Applied Sciences", |
|
"location": { |
|
"addrLine": "Max-Planck-Stra\u00dfe 2", |
|
"postCode": "64807", |
|
"settlement": "Dieburg" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Christoph", |
|
"middle": [], |
|
"last": "Demus", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Pitz", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Darmstadt University of Applied Sciences", |
|
"location": { |
|
"addrLine": "Max-Planck-Stra\u00dfe 2", |
|
"postCode": "64807", |
|
"settlement": "Dieburg" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Nadine", |
|
"middle": [], |
|
"last": "Probol", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Darmstadt University of Applied Sciences", |
|
"location": { |
|
"addrLine": "Max-Planck-Stra\u00dfe 2", |
|
"postCode": "64807", |
|
"settlement": "Dieburg" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Siegel", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Darmstadt University of Applied Sciences", |
|
"location": { |
|
"addrLine": "Max-Planck-Stra\u00dfe 2", |
|
"postCode": "64807", |
|
"settlement": "Dieburg" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Labudde", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this work, we present our approaches on the toxic comment classification task (subtask 1) of the GermEval 2021 Shared Task. For this binary task, we propose three models: a German BERT transformer model; a multilayer perceptron, which was first trained in parallel on textual input and 14 additional linguistic features and then concatenated in an additional layer; and a multilayer perceptron with both feature types as input. We enhanced our pre-trained transformer model by retraining it with over 1 million tweets and fine-tuned it on two additional German datasets of similar tasks. The embeddings of the final finetuned German BERT were taken as the textual input features for our neural networks. Our best models on the validation data were both neural networks, however our enhanced German BERT gained with a F1-score = 0.5895 a higher prediction on the test data.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this work, we present our approaches on the toxic comment classification task (subtask 1) of the GermEval 2021 Shared Task. For this binary task, we propose three models: a German BERT transformer model; a multilayer perceptron, which was first trained in parallel on textual input and 14 additional linguistic features and then concatenated in an additional layer; and a multilayer perceptron with both feature types as input. We enhanced our pre-trained transformer model by retraining it with over 1 million tweets and fine-tuned it on two additional German datasets of similar tasks. The embeddings of the final finetuned German BERT were taken as the textual input features for our neural networks. Our best models on the validation data were both neural networks, however our enhanced German BERT gained with a F1-score = 0.5895 a higher prediction on the test data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In recent years social media platforms became a popular medium to discuss all kinds of topics with people around the world. Also shops, companies, TV-shows and many more use social media to present their content to followers and discuss it with them. As it is possible to interact almost anonymously on the internet, such social media pages are often confronted with the problem of hate speech and toxic comments targeting single persons or whole groups (Watanabe et al., 2018) . Although hate speech detection has been a top research topic for several years, there exists no satisfactory solution yet (Stru\u00df et al., 2019) . The GermEval Shared Task 2021 (Risch et al., 2021) addresses this topic -especially the side of social media moderators that are responsible to filter such comments -in this years challenge with the following three tasks, where we participate in subtask 1: Over the last years transformer (Vaswani et al., 2017) models like BERT (Bidirectional Encoder Representations with Transformers) (Devlin et al., 2019) became state-of-the-art for many natural language processing (NLP) tasks and regularly outperformed traditional machine learning models and neural networks (Zampieri et al., 2020; Kumar et al., 2020) . Nevertheless, the GermEval Shared Task 2019 showed that traditional machine learning methods can still achieve comparable results to the transformer models if the features are well chosen (Stru\u00df et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 454, |
|
"end": 477, |
|
"text": "(Watanabe et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 602, |
|
"end": 622, |
|
"text": "(Stru\u00df et al., 2019)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 655, |
|
"end": 675, |
|
"text": "(Risch et al., 2021)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 914, |
|
"end": 936, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1012, |
|
"end": 1033, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1190, |
|
"end": 1213, |
|
"text": "(Zampieri et al., 2020;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1214, |
|
"end": 1233, |
|
"text": "Kumar et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1424, |
|
"end": 1444, |
|
"text": "(Stru\u00df et al., 2019)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Therefore, we decided to experiment with standard supervised machine learning models and neural networks, different word embeddings, and pretrained transformer models. We then chose our best performing transformer model, enhanced it with re-training on extracted tweets in German, and finetuned it with additional datasets. The extracted word embeddings by our transformer model were used as an textual input for our neural network architectures besides additional features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our presented work is structured as follows: Section 2 gives an overview of related work. In Section 3 we describe the GermEval 2021 data and the additional data we used for our final models. In Section 4 the feature extraction, the baseline and the final models are described. In Section 5, we show our final results and discuss our models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Toxic speech can be defined as a combination of hate speech and offensive language (D'Sa et al., 2020) or a type of aggressive writing style (Maslej-Kre\u0161\u0148\u00e1kov\u00e1 et al., 2020) . Many recent research uses deep neural networks for such detection tasks in social media content (Georgakopoulos et al., 2018; van Aken et al., 2018) . There has also been some research with transformer models, especially for English social media content. Maslej-Kre\u0161\u0148\u00e1kov\u00e1 et al. (2020) compared multiple transformers and neural networks for the classification of toxic content with different types of preprocessing steps, focussing on word embeddings. However, some related work to our modelling approach has been done by researchers in similar content detection tasks on social media. Sohn and Lee (2019) used, in their study on hate speech detection with transformer models, a similar approach to our proposed models, after they finetuned a multi-channel BERT model: they applied a dropout on the [CLS] token of BERT and added a feed forward layer before the softmax output and calculated the weighted sum of three transformers instead of only one. The [CLS] token is the final hidden vector of BERT used for classification, however it can also be extracted for the models embeddings (Devlin et al., 2019) . This was also done in (Rodr\u00edguez-S\u00e1nchez et al., 2020) for the task of automatic sexism classification, where the authors added features with a feed forward layer on top, however this did not improve their results. They also -in comparison to our concatenation strategy for our multilayer perceptron -created a Bi-LSTM (Bidirectional Long-Short-Term-Memory), where they concatenated the additional extracted features (in this case user and network information) after going through several layers of the neural network with only using textual input. Their work showed that using pre-trained embeddings for neural networks pushes the final classification by 3% (Rodr\u00edguez-S\u00e1nchez et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 102, |
|
"text": "(D'Sa et al., 2020)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 141, |
|
"end": 173, |
|
"text": "(Maslej-Kre\u0161\u0148\u00e1kov\u00e1 et al., 2020)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 272, |
|
"end": 301, |
|
"text": "(Georgakopoulos et al., 2018;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 302, |
|
"end": 324, |
|
"text": "van Aken et al., 2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 763, |
|
"end": 782, |
|
"text": "Sohn and Lee (2019)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1132, |
|
"end": 1137, |
|
"text": "[CLS]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1263, |
|
"end": 1284, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1946, |
|
"end": 1978, |
|
"text": "(Rodr\u00edguez-S\u00e1nchez et al., 2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The study of Zhao et al. (2021) found that using pre-trained models as an input for neural networks leads to better results than using complex deep neural networks or transformers as a stand-alone architecture. Comparingly, another approach by D'Sa et al. (2020) on hate speech detection analyzed FastText (Bojanowski et al., 2017) and BERT embeddings and used them as the input for deep neural networks without any additional feature ex- traction. They found that fine-tuning transformers without a neural network layer performs better. Those studies show that combining transformers that are fine-tuned for a specific NLP task with neural networks is a promising approach to create better models for predicting toxic comments. Since transformers are usually only used for training on the textual input, the feed forward layers can be concatenated with more extracted features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 31, |
|
"text": "Zhao et al. (2021)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 331, |
|
"text": "(Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this section we describe the GermEval 2021 Shared Task dataset as well as the supplementary datasets that we used for fine-tuning our model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The dataset for the GermEval 2021 Shared Task contains 3244 user comments from the Facebook discussion page of a German news broadcast within the first half of 2019. The comments were anonymized and cleared of any references to the show, moderators and users. The dataset was provided with manual annotated labels for each of the subtasks. Table 1 shows that 35.6% of all comments are labeled as Toxic for subtask 1 while 64.4% are labeled as Not Toxic.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 340, |
|
"end": 347, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "GermEval 2021 Data", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Augmentation allows a transformer model to be fine-tuned with additional labeled data (Sch\u00fctz et al., 2021) . In order to augment the GermEval 2021 training data we identified two German datasets that were labeled for hateful or offensive comment classification and shared a similar domain. We assumed that the tasks of identifying hateful and offensive comments should be similar to the task of identifying toxic comments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 107, |
|
"text": "(Sch\u00fctz et al., 2021)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 GermEval 2019: Task 2 of GermEval 2019 was a shared task on the identification and categorization of offensive language (Stru\u00df et al., 2019) . For subtask 1 of this shared task a total of 7025 tweets were collected and labeled as either OFFENSE or OTHER with 32.1% of the tweets being labeled the former. The label OFFENSE was given to any comment that was deemed abusive, insulting and/or profane. Comparably to what we would expect from comments about a daily talk show the tweets in this dataset were chosen to cover a broad range of topics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 142, |
|
"text": "(Stru\u00df et al., 2019)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 HASOC 2019: HASOC (Hate Speech and Offensive Content Identification in Indo-European Languages) 2019 was a shared task comparable to GermEval Task 2 but with the addition of providing 3 separate datasets for German, English and Hindi (Mandl et al., 2019) . The German dataset contains a total of 4669 tweets and Facebook posts collected by searching for offensive keywords and hashtags. 11.6% of the entries for subtask 1 are labeled as HOF while the rest is labeled as NOT. The categories HOF and NOT directly correspond to the categories OFFENSE and OTHER from Task 2 of GermEval 2019.", |
|
"cite_spans": [ |
|
{ |
|
"start": 236, |
|
"end": 256, |
|
"text": "(Mandl et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional Datasets", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "For several unsupervised training steps in our experiments we also collected a total of unlabeled 1,156,458 German tweets of the first half year of 2019 via the Twitter API. Mainly, we focused on general tweets in German, as well as tweets from the Twitter pages of German talk shows and other socially critical TV-formats: \"Hart aber Fair\", \"Maybrit Illner\", \"Anne Will\", \"Markus Lanz\", \"ZDF heute-show\" and \"Maischberger\". With this extra data we expected to enhance the predictions of our models, since the dataset hopefully contains tweets with a similar writing style and domainspecific politically discussed content by that time period.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "German Tweet Corpus:", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In this section the feature extraction methods as well as the baseline we used for comparison, the conducted preprocessing steps, and final models are described. Our baseline models include different combinations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For training some of our models, we used several features as listed in style of social media entries helps to improve the results of similar NLP tasks, such as hate speech and disinformation detection (Robinson et al., 2018; Volkova and Jang, 2018) . For toxic comment classification we considered the word count for each input and extracted the number of punctuation, exclamation, and question marks and their relation to the total number of words per comment. For some features we used additional non-public word lists and libraries and cross-checked them for each entry in the dataset:", |
|
"cite_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 224, |
|
"text": "(Robinson et al., 2018;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 225, |
|
"end": 248, |
|
"text": "Volkova and Jang, 2018)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Extraction", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 \"Sentiment\" features: list of 9,382 words and their sentiment values", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Extraction", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 \"Hate\" features: list of 3,550 words", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Extraction", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Lastly, we counted the number of emojis per comment, determined the emoji word ratio and used the emosent 1 library to compute the average sentiment over all emojis in a comment. We computed the mean values of each feature for both classes and found some significant differences between both categories: for example toxic comments are 22 words longer on average. Besides the length, there is a notable difference in the number of exclamation marks and emojis between toxic and not toxic comments. Contrary to the expectations the sentiment of the comments is in both cases slightly negative and does only differ by 0.0067 on a scale from -1 (most negative) to +1 (most positive). Nevertheless, we used all of the extracted features for our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Extraction", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For our baseline we used a Support Vector Machine (SVM) and a sequential neural network (multilayer perceptron, MLP). Additionally, a Robust Soft Learning Vector Quantization (RSLVQ) model was trained and evaluated. RSLVQ is an adaption of the LVQ Model introduced by Kohonen (1997). In these models, class regions are defined by prototype vectors in the vector space, where each class has one or more prototype vectors. In contrast to the basic LVQ, which is a heuristic, RSLVQ can be mathematically verified (Schneider et al., 2009 ). Additionally, we tested three pre-trained transformer (Vaswani et al., 2017 ) models with only using the provided training set by the Ger-mEval 21.", |
|
"cite_spans": [ |
|
{ |
|
"start": 510, |
|
"end": 533, |
|
"text": "(Schneider et al., 2009", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 591, |
|
"end": 612, |
|
"text": "(Vaswani et al., 2017", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Multiple preprocessing steps were applied to the SVM and RSLVQ, and the comments were vectorized. The steps included tokenization, stop word and punctuation removal and lemmatization. Hashtags and mentions were preserved in the data, only the characters \"#\" and \"@\" were removed. Afterwards 200-dimensional FastText word embeddings were trained on the preprocessed training dataset, on our self collected German Tweet corpus, and on the additional data. For the word embeddings, a skip-gram model with a window-size of 5 and a minimum word occurrence of 3 was used. All the word-vectors of every comment were averaged to receive a document vector.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "Additionally, a feature vector for every comment, including the features mentioned in Table 2 , was created from the original (not preprocessed) data and concatenated with the document vector.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 93, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "In contrast, we did not preprocess the data for the transformer models, since those models capture the context of a sentence and use a already specialized built-in tokenizer (Devlin et al., 2019) . All of our baseline models were evaluated on a stratified 90% training and 10% validation split.", |
|
"cite_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 195, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "The SVM was trained on the training split using a Radial Basis Function (RBF) and a linear kernel. The best results were achieved with the RBF-kernel. In the RSLVQ model the number of prototypes per class was varied having the best results with two prototypes per class. Already pre-trained FastText embeddings were used as an input for the MLP, where we concatenated the extracted features with the textual input during training (MLP-C) and before (MLP-B). Even though the precision and recall were higher compared to the other models, we found inconsistency in the evaluation plots of the metrics of both models -and due to a high loss during validation, it seemed that both MLPs were overfitting. Finally, we fine-tuned a German BERT (Devlin et al., 2019) and DistilBERT (Sanh et al., 2019) model (bert-base-german-cased (Chan et al.) , distilbert-base-german-cased (Chaumond)) provided by the HuggingFace library (Wolf et al., 2020) for 10 epochs, a batch size of 16, a learning rate of 2e-5, Adam (Kingma and Ba, 2015) as an optimizer and a maximum sequence length of 256. The multi-lingual transformer XLM-R (Conneau et al., 2019) was fine-tuned with the same parameters, except a learning rate of 1e-5 instead.", |
|
"cite_spans": [ |
|
{ |
|
"start": 737, |
|
"end": 758, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 774, |
|
"end": 793, |
|
"text": "(Sanh et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 824, |
|
"end": 837, |
|
"text": "(Chan et al.)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 917, |
|
"end": 936, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "In total we submitted three different models for each run as shown in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 78, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 Transformer (TAB): We decided to enhance our best transformer model from our baseline by using the additional German tweets for re-training. This has been shown to help boost the classification accuracy as shown in (Sch\u00fctz et al., 2021) . Re-training means that the pre-trained model is further trained in an unsupervised manner, before fine-tuning it for the NLP downstream task. We chose to re-train with the the german-bert-base-cased model for 5 epochs, with a batch size of 32 and a learning rate of 2e-5. Afterwards, we finetuned our re-trained German-BERT model on the GermEval 2021 training data, as well as the additional datasets (GermEval 2019 & HASOC 2019). The augmented dataset contained a total of 24,304 comments, where 5,414 we set as toxic and 18,890 as not toxic as described in section 3. However, we added one more preprocessing step, compared to the transformer baselines, for pre-training and fine-tuning our model, since the authors of the GermEval 2021 changed every username in the comments to \"@USER\". We applied this to the additional German tweets as well as to the GermEval 2019 and HASOC 2019 datasets to align all texts. For the evaluation of our model, we used 10% of the GermEval 2021 training dataset. Our final transformer model, called TAB (tweets-and-Additional-Datasets-BERT) was trained on this augmented data for 10 epochs, a batch size of 16, a learning rate of 2e-5, Adam as an optimizer, and a maximum sequence length of 256.", |
|
"cite_spans": [ |
|
{ |
|
"start": 217, |
|
"end": 238, |
|
"text": "(Sch\u00fctz et al., 2021)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 Multi-Layer Perceptron (TAB-MLP): For our second and third run, we used the MLP model we created for the baseline. Its architecture consists of 5 dense layers, a dropout of 0.2, ReLU (Rectified Linear Unit) as an activation function and sigmoid for our final classification layer. Since the FastText embeddings seemed to overfit the model, we extracted the already fine-tuned word embeddings of the TAB model via the [CLS] token of each input. Lastly, the additional extracted features were normalized and used for two different training strategies:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 TAB-MLP-B: the model was fed with the text input as well as the features combined as one input vector for training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "\u2022 TAB-MLP-C: the model was trained on the textual input for 3 layers, the numerical features for 1 layer, and then concatenated in the 4th layer as shown in Figure 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 165, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Both models were trained for 25 epochs, a batch size of 32, a learning rate of 1e-2, and Stochastic Gradient Descent (SGD) as an optimizer. After plotting the curves of the evaluation metrics and comparing them with the FastText embeddings (Table 3) we found that the MLP did not seem to overfit with the already pre-trained TAB embeddings. Since we used a sigmoid activation function in our classification layer, we set a threshold for the predictions on the test set at 0.7, after calculating the mean and median value for each of our neural networks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "All of our models were evaluated with precision, recall, and a macro-averaged F1-score as shown in Table 4 . The final results on the test data show that the transformer model gained by far the best results with its F1-score of 0.5895, even if it is still not as high as the value we expected after our training validation. Our neural networks TAB-MLP-B and TAB-MLP-C performed significantly worse on the test data, especially with regard to their high F1score on the validation split.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 106, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Therefore, we explored whether we set the threshold too high for our predictions on the test data. Even though we experimented with setting the threshold to different values, we found that the predictions did not improve significantly (only \u2248 0.01), which shows that the neural networks probably overfitted on one class. We suspect this is also the reason for the very high validation recall in comparison to the precision. We plotted the confusion matrix for each model, shown in Table 5 , which shows that both neural networks had a high count of false positives. In contrast to that, TAB had an issue with the false negatives. Therefore, we conclude several possible reasons why our neural networks did not perform well on the test set:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 481, |
|
"end": 488, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 the size of the dense layers, type of activation function and dropout have to be adjusted.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 the additional features have no positive impact on the models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 another embedding strategy for the transformer models carries more information than the extraction of the [CLS] token. A possible solution could be a concatenation of a number of hidden layer outputs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this work we presented our submitted models for the GermEval Shared Task 2021 on toxic comment classification. We decided to combine standard supervised methods with transformers and textual features, and to enhance the models with additional training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Our best model was a German BERT that was re-trained on over 1.5 million additional German tweets from the first half year of 2019 and finetuned with two augmented datasets from similar tasks, such as hate speech and offensive language detection, as well as the GermEval 2021 training data. Even though our two multilayer perceptronswhich were trained on the extracted word embeddings by our transformer -showed better evaluation results during validation, our BERT model still had a more robust prediction on the test set. For future work, we will further explore the combination of sequential neural networks and word embeddings by transformers and test several extraction and concatenation strategies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "This work is enhanced by the Darmstadt University of Applied Sciences in collaboration with the Fraunhofer Institute for Secure Information Technology. The Darmstadt University supported this work with the research in Information Science (https://sis.h-da.de/). Additionally, this contribution has been funded by the project \"DeTox\" (Cybersecurity research funding of the Hessian Ministry of the Interior and Sports).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments co-located with KONVENS", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://pypi.org/project/emosent-py/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Challenges for toxic comment classification: An in-depth error analysis", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Betty Van Aken", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ralf", |
|
"middle": [], |
|
"last": "Risch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Krestel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "L\u00f6ser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2nd Workshop on Abusive Language Online (ALW2)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--42", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Betty van Aken, Julian Risch, Ralf Krestel, and Alexan- der L\u00f6ser. 2018. Challenges for toxic comment clas- sification: An in-depth error analysis. In Proceed- ings of the 2nd Workshop on Abusive Language On- line (ALW2), pages 33-42, Brussels, Belgium. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "135--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "BERT and fastText embeddings for automatic detection of toxic speech", |
|
"authors": [ |
|
{ |
|
"first": "Ashwin", |
|
"middle": [], |
|
"last": "Geet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D'", |
|
"middle": [], |
|
"last": "Sa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Irina", |
|
"middle": [], |
|
"last": "Illina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dominique", |
|
"middle": [], |
|
"last": "Fohr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Organization of Knowledge and Advanced Technologies\" (OCTA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--5", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashwin Geet D'Sa, Irina Illina, and Dominique Fohr. 2020. BERT and fastText embeddings for automatic detection of toxic speech. In 2020 International Multi-Conference on: \"Organization of Knowledge and Advanced Technologies\" (OCTA), pages 1-5.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Convolutional neural networks for toxic comment classification", |
|
"authors": [ |
|
{ |
|
"first": "Spiros", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Georgakopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Sotiris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aristidis", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Tasoulis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vassilis", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Vrahatis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Plagianakos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 10th Hellenic Conference on Artificial Intelligence, SETN '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Spiros V. Georgakopoulos, Sotiris K. Tasoulis, Aris- tidis G. Vrahatis, and Vassilis P. Plagianakos. 2018. Convolutional neural networks for toxic comment classification. In Proceedings of the 10th Hellenic Conference on Artificial Intelligence, SETN '18, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "3rd International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Learning vector quantization", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Teuvo Kohonen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Self-Organizing Maps", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "203--217", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Teuvo Kohonen. 1997. Learning vector quantization. In Self-Organizing Maps, pages 203-217. Springer Berlin Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Evaluating aggression identification in social media", |
|
"authors": [ |
|
{ |
|
"first": "Ritesh", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atul", |
|
"middle": [], |
|
"last": "Kr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Ojha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Second Workshop on Trolling, Aggression and Cyberbullying", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--5", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ritesh Kumar, Atul Kr. Ojha, Shervin Malmasi, and Marcos Zampieri. 2020. Evaluating aggression iden- tification in social media. In Proceedings of the Second Workshop on Trolling, Aggression and Cy- berbullying, pages 1-5, Marseille, France. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Overview of the HASOC track at FIRE 2019: Hate speech and offensive content identification in indo-european languages", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Mandl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandip", |
|
"middle": [], |
|
"last": "Modha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prasenjit", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daksh", |
|
"middle": [], |
|
"last": "Patel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohana", |
|
"middle": [], |
|
"last": "Dave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chintak", |
|
"middle": [], |
|
"last": "Mandlia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Patel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 11th Forum for Information Retrieval Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "14--17", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Mandl, Sandip Modha, Prasenjit Majumder, Daksh Patel, Mohana Dave, Chintak Mandlia, and Aditya Patel. 2019. Overview of the HASOC track at FIRE 2019: Hate speech and offensive content identification in indo-european languages. In Pro- ceedings of the 11th Forum for Information Re- trieval Evaluation, pages 14-17.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Comparison of deep learning models and various text preprocessing techniques for the toxic comments classification", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Viera Maslej-Kre\u0161\u0148\u00e1kov\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Sarnovsk\u00fd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Krist\u00edna", |
|
"middle": [], |
|
"last": "Butka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Machov\u00e1", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Applied Sciences", |
|
"volume": "", |
|
"issue": "23", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Viera Maslej-Kre\u0161\u0148\u00e1kov\u00e1, Martin Sarnovsk\u00fd, Peter Butka, and Krist\u00edna Machov\u00e1. 2020. Compari- son of deep learning models and various text pre- processing techniques for the toxic comments clas- sification. Applied Sciences, 10(23).", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Overview of the GermEval 2021 shared task on the identification of toxic, engaging, and fact-claiming comments", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Risch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anke", |
|
"middle": [], |
|
"last": "Stoll", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lena", |
|
"middle": [], |
|
"last": "Wilms", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Wiegand", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments colocated with KONVENS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Risch, Anke Stoll, Lena Wilms, and Michael Wiegand. 2021. Overview of the GermEval 2021 shared task on the identification of toxic, engaging, and fact-claiming comments. In Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments co- located with KONVENS, pages 1-12.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Hate speech detection on twitter: Feature engineering v.s. feature selection", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Robinson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziqi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Tepper", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "The Semantic Web: ESWC 2018 Satellite Events", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "46--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Robinson, Ziqi Zhang, and Jonathan Tepper. 2018. Hate speech detection on twitter: Feature engineering v.s. feature selection. In The Seman- tic Web: ESWC 2018 Satellite Events, pages 46-49, Cham. Springer International Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Automatic classification of sexism in social networks: An empirical study on Twitter data", |
|
"authors": [ |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Rodr\u00edguez-S\u00e1nchez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jorge", |
|
"middle": [], |
|
"last": "Carrillo-De Albornoz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Plaza", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "IEEE Access", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "219563--219576", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Francisco Rodr\u00edguez-S\u00e1nchez, Jorge Carrillo-de Al- bornoz, and Laura Plaza. 2020. Automatic classi- fication of sexism in social networks: An empiri- cal study on Twitter data. IEEE Access, 8:219563- 219576.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter", |
|
"authors": [ |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.01108" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Distance learning in discriminative vector quantization", |
|
"authors": [ |
|
{ |
|
"first": "Petra", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Biehl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barbara", |
|
"middle": [], |
|
"last": "Hammer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Neural Computation", |
|
"volume": "21", |
|
"issue": "10", |
|
"pages": "2942--2969", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Petra Schneider, Michael Biehl, and Barbara Hammer. 2009. Distance learning in discriminative vector quantization. Neural Computation, 21(10):2942- 2969.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Automatic sexism detection with multilingual transformer models", |
|
"authors": [ |
|
{ |
|
"first": "Mina", |
|
"middle": [], |
|
"last": "Sch\u00fctz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaqueline", |
|
"middle": [], |
|
"last": "Boeck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daria", |
|
"middle": [], |
|
"last": "Liakhovets", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Djordje", |
|
"middle": [], |
|
"last": "Slijep\u010devi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armin", |
|
"middle": [], |
|
"last": "Kirchknopf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manuel", |
|
"middle": [], |
|
"last": "Hecht", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Bogensperger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sven", |
|
"middle": [], |
|
"last": "Schlarb", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Schindler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Zeppelzauer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2106.04908" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mina Sch\u00fctz, Jaqueline Boeck, Daria Liakhovets, Djordje Slijep\u010devi\u0107, Armin Kirchknopf, Manuel Hecht, Johannes Bogensperger, Sven Schlarb, Alexander Schindler, and Matthias Zeppelzauer. 2021. Automatic sexism detection with mul- tilingual transformer models. arXiv preprint arXiv:2106.04908.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "MC-BERT4HATE: Hate speech detection using multichannel BERT for different languages and translations", |
|
"authors": [ |
|
{ |
|
"first": "Hajung", |
|
"middle": [], |
|
"last": "Sohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyunju", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "2019 International Conference on Data Mining Workshops (ICDMW)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "551--559", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hajung Sohn and Hyunju Lee. 2019. MC- BERT4HATE: Hate speech detection using multi- channel BERT for different languages and transla- tions. In 2019 International Conference on Data Mining Workshops (ICDMW), pages 551-559.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Overview of GermEval task 2, 2019 shared task on the identification of offensive language", |
|
"authors": [ |
|
{ |
|
"first": "Julia", |
|
"middle": [ |
|
"Maria" |
|
], |
|
"last": "Stru\u00df", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Siegel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josep", |
|
"middle": [], |
|
"last": "Ruppenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Wiegand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Klenner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 15th Conference on Natural Language Processing (KONVENS 2019)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "354--365", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julia Maria Stru\u00df, Melanie Siegel, Josep Ruppen- hofer, Michael Wiegand, and Manfred Klenner. 2019. Overview of GermEval task 2, 2019 shared task on the identification of offensive language. In Proceedings of the 15th Conference on Natural Lan- guage Processing (KONVENS 2019), pages 354- 365, Erlangen, Germany. German Society for Com- putational Linguistics & Language Technology.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Misleading or falsification: Inferring deceptive strategies and types in online news and social media", |
|
"authors": [ |
|
{ |
|
"first": "Svitlana", |
|
"middle": [], |
|
"last": "Volkova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jin", |
|
"middle": [ |
|
"Yea" |
|
], |
|
"last": "Jang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Companion Proceedings of the The Web Conference", |
|
"volume": "18", |
|
"issue": "", |
|
"pages": "575--583", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Svitlana Volkova and Jin Yea Jang. 2018. Misleading or falsification: Inferring deceptive strategies and types in online news and social media. In Compan- ion Proceedings of the The Web Conference 2018, WWW '18, page 575-583, Republic and Canton of Geneva, CHE. International World Wide Web Con- ferences Steering Committee.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Hate speech on Twitter: A pragmatic approach to collect hateful and offensive expressions and perform hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Hajime", |
|
"middle": [], |
|
"last": "Watanabe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mondher", |
|
"middle": [], |
|
"last": "Bouazizi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoaki", |
|
"middle": [], |
|
"last": "Ohtsuki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE Access", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "13825--13835", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hajime Watanabe, Mondher Bouazizi, and Tomoaki Ohtsuki. 2018. Hate speech on Twitter: A pragmatic approach to collect hateful and offensive expressions and perform hate speech detection. IEEE Access, 6:13825-13835.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Patrick Von Platen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariama", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quentin", |
|
"middle": [], |
|
"last": "Drame", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Lhoest", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. 2020. Transformers: State-of-the-art natural language pro- cessing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Zeses Pitenis, and \u00c7 agr\u0131 \u00c7\u00f6ltekin. 2020. Semeval-2020 task 12: Multilingual offensive language identification in social media (offenseval 2020)", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pepa", |
|
"middle": [], |
|
"last": "Atanasova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgi", |
|
"middle": [], |
|
"last": "Karadzhov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Derczynski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2006.07235" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Preslav Nakov, Sara Rosenthal, Pepa Atanasova, Georgi Karadzhov, Hamdy Mubarak, Leon Derczynski, Zeses Pitenis, and \u00c7 agr\u0131 \u00c7\u00f6ltekin. 2020. Semeval-2020 task 12: Multilingual offensive language identification in social media (offenseval 2020). arXiv preprint arXiv:2006.07235.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A Comparative Study of Using Pre-Trained Language Models for Toxic Comment Classification", |
|
"authors": [ |
|
{ |
|
"first": "Zhixue", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziqi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Hopfgartner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Association for Computing Machinery", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "500--507", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhixue Zhao, Ziqi Zhang, and Frank Hopfgartner. 2021. A Comparative Study of Using Pre-Trained Lan- guage Models for Toxic Comment Classification, page 500-507. Association for Computing Machin- ery, New York, NY, USA.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Subtask 1: toxic comment classification \u2022 Subtask 2: engaging comment classification \u2022 Subtask 3: fact-claiming", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": "Experimental setup for training our submitted models (Green: datasets; grey: processing steps; blue: transformer re-training & fine-tuning steps; yellow: final models).", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "Architecture of TAB-MLP-C.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"text": "", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>: Class distribution for subtask 1 of the Ger-</td></tr><tr><td>mEval 2021 dataset. Percentages show the proportion</td></tr><tr><td>of toxic and non-toxic comments in the training and test</td></tr><tr><td>set.</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"text": "It has been shown that adding more specific features about the writing", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>Feature</td><td colspan=\"2\">Toxic Not Toxic</td></tr><tr><td>word count</td><td>201</td><td>179</td></tr><tr><td>punctuation count</td><td>7.41</td><td>6.84</td></tr><tr><td>exclamation count</td><td>0.69</td><td>0.31</td></tr><tr><td>question mark count</td><td>0.48</td><td>0.36</td></tr><tr><td>word punctuation ratio</td><td>0.0111</td><td>0.0138</td></tr><tr><td>word exclamation ratio</td><td>0.0027</td><td>0.0021</td></tr><tr><td colspan=\"2\">word question mark ratio 0.0020</td><td>0.0030</td></tr><tr><td>hate word count</td><td>0.32</td><td>0.24</td></tr><tr><td>hate word count ratio</td><td>0.0017</td><td>0.0014</td></tr><tr><td>character capslock ratio</td><td>0.0306</td><td>0.0168</td></tr><tr><td>sentiment</td><td>-0.0147</td><td>-0.0080</td></tr><tr><td>emoji count</td><td>0.49</td><td>0.13</td></tr><tr><td>emoji sentiment</td><td>0.0424</td><td>0.0191</td></tr><tr><td>word emoji ratio</td><td>0.0457</td><td>0.0227</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"text": "Baseline results on the validation split of the GermEval 2021 training data.", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"text": "Results of our proposed models on the validation (Val) split of the training set and the test data (T).", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"3\">TP TN FP FN</td></tr><tr><td>TAB</td><td>61</td><td>554 40</td><td>289</td></tr><tr><td colspan=\"4\">TAB-MLP-B 144 180 414 206</td></tr><tr><td colspan=\"4\">TAB-MLP-C 122 241 353 228</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"text": "Confusion matrix for each of our submitted models (TP: true positives, TN: true negatives, FP: false positives, FN: false negatives).", |
|
"html": null, |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |