|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:06:08.420134Z" |
|
}, |
|
"title": "AIT FHSTP at GermEval 2021: Automatic Fact Claiming Detection with Multilingual Transformer Models", |
|
"authors": [ |
|
{ |
|
"first": "Jaqueline", |
|
"middle": [], |
|
"last": "B\u00f6ck", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Daria", |
|
"middle": [], |
|
"last": "Liakhovets", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Austrian Institute of Technology GmbH", |
|
"location": { |
|
"addrLine": "Giefinggasse 4", |
|
"postCode": "1210", |
|
"settlement": "Vienna", |
|
"country": "Austria" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mina", |
|
"middle": [], |
|
"last": "Sch\u00fctz", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Austrian Institute of Technology GmbH", |
|
"location": { |
|
"addrLine": "Giefinggasse 4", |
|
"postCode": "1210", |
|
"settlement": "Vienna", |
|
"country": "Austria" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Armin", |
|
"middle": [], |
|
"last": "Kirchknopf", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Djordje", |
|
"middle": [], |
|
"last": "Slijep\u010devi\u0107", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Zeppelzauer", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Schindler", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Austrian Institute of Technology GmbH", |
|
"location": { |
|
"addrLine": "Giefinggasse 4", |
|
"postCode": "1210", |
|
"settlement": "Vienna", |
|
"country": "Austria" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Spreading ones opinion on the internet is becoming more and more important. A problem is that in many discussions people often argue with supposed facts. This year's Ger-mEval 2021 focuses on this topic by incorporating a shared task on the identification of fact-claiming comments. This paper presents the contribution of the AIT FHSTP team at the GermEval 2021 benchmark for task 3: \"identifying fact-claiming comments in social media texts\". Our methodological approaches are based on transformers and incorporate 3 different models: multilingual BERT, GottBERT and XML-RoBERTa. To solve the fact claiming task, we fine-tuned these transformers with external data and the data provided by the GermEval task organizers. Our multilingual BERT model achieved a precision-score of 72.71%, a recall of 72.96% and an F1-Score of 72.84% on the GermEval test set. Our fine-tuned XML-RoBERTa model achieved a precision-score of 68.45%, a recall of 70.11% and a F1-Score of 69.27%. Our best model is GottBERT (i.e., a BERT transformer pretrained on German texts) fine-tuned on the Ger-mEval 2021 data. This transformer achieved a precision of 74.13%, a recall of 75.11% and an F1-Score of 74.62% on the test set.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Spreading ones opinion on the internet is becoming more and more important. A problem is that in many discussions people often argue with supposed facts. This year's Ger-mEval 2021 focuses on this topic by incorporating a shared task on the identification of fact-claiming comments. This paper presents the contribution of the AIT FHSTP team at the GermEval 2021 benchmark for task 3: \"identifying fact-claiming comments in social media texts\". Our methodological approaches are based on transformers and incorporate 3 different models: multilingual BERT, GottBERT and XML-RoBERTa. To solve the fact claiming task, we fine-tuned these transformers with external data and the data provided by the GermEval task organizers. Our multilingual BERT model achieved a precision-score of 72.71%, a recall of 72.96% and an F1-Score of 72.84% on the GermEval test set. Our fine-tuned XML-RoBERTa model achieved a precision-score of 68.45%, a recall of 70.11% and a F1-Score of 69.27%. Our best model is GottBERT (i.e., a BERT transformer pretrained on German texts) fine-tuned on the Ger-mEval 2021 data. This transformer achieved a precision of 74.13%, a recall of 75.11% and an F1-Score of 74.62% on the test set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Today's social media platforms allow any individual to share information and opinions easily and quickly across a wide audience with almost no restrictions. However, not only obviously offensive comments, but also comments and posts with false information are becoming a serious problem on the Internet. The sheer amount of available information and content generated every day makes it impossible to verify all information. Thus, misinformation and false information can easily spread and influence people and their decisions, which has a strong impact on our society.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As a workshop part of the KONVENS 2021 (Konferenz zur Verarbeitung nat\u00fcrlicher Sprache / Conference of Natural Language Processing) the GermEval 2021 focuses on the problem of fact claiming, i.e., the identification of content in social media that contains potential facts that need to be checked (Risch et al., 2021) . The identification of such fact claiming content is a first step in the information verification process to separate relevant from irrelevant information for fact checking. Our team participated in the fact claiming task (task 3: identification of fact-claiming comments) of GermEval 2021 and this paper presents our methodology and the results. To solve the task we fine-tuned (supervised) the pre-trained transformer models with the original GermEval 2021 data and external data, i.e., the ClaimBuster dataset (Arslan et al., 2020) . The employed datasets and our general approach are described in Section 2. A detailed description of the transformer-based mod-els is provided in Section 3. In Section 4, our experimental setup is introduced. The results can be found in Section 5 followed by a brief discussion and conclusion in Section 6.", |
|
"cite_spans": [ |
|
{ |
|
"start": 297, |
|
"end": 317, |
|
"text": "(Risch et al., 2021)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 832, |
|
"end": 853, |
|
"text": "(Arslan et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The GermEval 2021 provided one labeled dataset for all three tasks (task 1 and 2 not considered in our contribution). The data for task 3 contained approx. 1/3 of content that mentions claimed facts and 2/3 with no claimed facts. We applied three pre-trained transformer models (Vaswani et al., 2017) to encode and classify the content for this task, namely: German OSCAR text trained BERT (GottBERT) (Scheible et al., 2020) , multilingual BERT (mBERT) (Devlin et al., 2019b) and XLM-RoBERTa (XLM-R) . Transformers are usually pre-trained on a large general corpus and can be used for many natural language processing (NLP) downstream tasks, which makes them especially useful for small training corpora (Liu et al., 2019) . Compared to mBERT and XLM-R, which are both pre-trained on multilingual data, GottBERT is the only one that was trained on one language (German) only. We fine-tuned these models in a supervised manner for binary classification into fact claiming comments and non fact claiming comments. Since we employ two multilingual models, we chose to fine-tune one of those (mBERT) on the GermEval 2021 data and an additional dataset. In comparison, we fine-tuned our second multilingual model (XLM-R) and the German GottBERT model using only the training data provided by the GermEval 2021 shared task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 278, |
|
"end": 300, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 424, |
|
"text": "(Scheible et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 453, |
|
"end": 475, |
|
"text": "(Devlin et al., 2019b)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 704, |
|
"end": 722, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodological Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The applied method is derived from our approach (Sch\u00fctz et al., 2021a) presented in the EX-IST 2021 challenge. The first shared task on sEXism Identification in Social neTworks (EXIST) at IberLEF 2021 (Rodr\u00edguez-S\u00e1nchez et al., 2021; Montes et al., 2021) , covering a wide spectrum of sexist content and aims to differentiate different types of sexist content. In our EXIST 2021 contribution a comparable set of transformer models and processing steps were applied (Sch\u00fctz et al., 2021a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 70, |
|
"text": "(Sch\u00fctz et al., 2021a)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 233, |
|
"text": "(Rodr\u00edguez-S\u00e1nchez et al., 2021;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 234, |
|
"end": 254, |
|
"text": "Montes et al., 2021)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 465, |
|
"end": 487, |
|
"text": "(Sch\u00fctz et al., 2021a)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodological Approach", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The data provided by the organizers of GermEval 2021 is an annotated dataset consisting of over 3,244 German Facebook comments on a political talk show of a German television broadcaster and user discussions from February to July 2019. The dataset was annotated and standardized. Links to users were anonymized with @USER, links to the show were replaced with @MEDIUM and links to the moderator were replaced with @MODER-ATOR. The original dataset was provided in CSV format. A subset of user comments from two shows were used for the train data. The comments in the test data were drawn from other shows. The dataset contained 1,103 (34%) instances which were labeled as fact claiming and 2,141 (66%) instances without any fact claims. The provided dataset is described in more detail in the GermEval 2021 overview paper (Risch et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 822, |
|
"end": 842, |
|
"text": "(Risch et al., 2021)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GermEval 2021 Data & Preprocessing", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "In initial experiments, we applied different preprocessing strategies to the dataset. We tested our models on a processed version where all links in the dataset were replaced with @MEDIUM, since not every link was connected to the show that was the source of the data. Similarly, as an additional step for our multilingual models, we replaced all emojis with their English translations 1 . However, the two preprocessing steps had a slightly negative impact of 1% on average for mBERT, while they had a clearly positive impact of 3% for XLM-R. Therefore, we used the preprocessed training data only for the XLM-R model. Similarly, the replacement of links did not have a positive influence for the monolingual GottBERT model, where we also used the unpreprocessed comments as an input for training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GermEval 2021 Data & Preprocessing", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We did not use conventional preprocessing steps, e.g., stop-word removal, lemmatization, or stemming, because transform models do not need these due to their ability to capture more context in their word embeddings through improved pretraining capabilities and multi-head attention mechanisms (Vaswani et al., 2017; Devlin et al., 2019a; Liu et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 293, |
|
"end": 315, |
|
"text": "(Vaswani et al., 2017;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 337, |
|
"text": "Devlin et al., 2019a;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 355, |
|
"text": "Liu et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GermEval 2021 Data & Preprocessing", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "As external data we use the ClaimBuster (Arslan et al., 2020) dataset, which consists of English statements from all U.S. presidential debates from 1960-2016. The original part of this dataset consists of 23,533 records. In total, 32,072 sentences were spoken in these debates. The presidential candidates spoke 26,322 sentences, debate moderators spoke 4,292 sentences and 1,319 sentences were spoken by the questioners. Sentences from the moderators and the questioners were discarded and only the sentences spoken by the presidential candidates were considered for creating the ClaimBuster dataset. Moreover, sentences shorter then 5 words were also removed (2,789 sentences). The resulting dataset (crowdsourced.csv) was annotated by recruited participants (mostly university students). In addition, three experts labeled a subset of this dataset containing 1,032 sentences to create a groundtruth dataset (groundtruth.csv). The provided ClaimBuster dataset consists of three CSV files:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "External Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 all sentences.csv (32,072 sentences): all sentences of the debates", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "External Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 crowdsourced.csv (23,533 sentences): sentences of presidential candidates longer than 5 words, labeled by recruited participants", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "External Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 groundtruth.csv (1,032 sentences): sentences of presidential candidates longer than 5 words, labeled by experts For the GermEval 2021 challenge we used only the groundtruth.csv file to ensure high-quality data. The records in the file are annotated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "External Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 non-factual statement (NFS)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "External Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 unimportant factual statement (UFS)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "External Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u2022 check-worthy factual statement (CFS)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "External Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Referring to the original paper (Arslan et al., 2020), the dataset is imbalanced in terms of class distribution: 23.87% belong to CFS, 10.45% to UFS and 65.68% to NFS. The instances (sentences) are annotated as numerical categories (\"-1\", \"0\", \"1\"). In order to match the ClaimBuster data with the original GermEval 2021 data, it was necessary to get an overview of the sentences first and afterwards match the labels to a unified format. Therefore, the comments with the labels \"0\" and \"-1\" have been mapped to \"0\" (not claiming). The instances labeled as \"1\" were not changed and thus assigned to the class of fact claiming comments. In a next step, we translated the whole dataset into German using the Google Translator API. The translation of the dataset was only used for the mBERT model, since in former work (Sch\u00fctz et al., 2021a) it was shown that using additional data for this exact model can improve the predictions on a similar NLP downstream task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 816, |
|
"end": 838, |
|
"text": "(Sch\u00fctz et al., 2021a)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "External Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In total we used three different architectures, which are all based on the original transformer (Vaswani et al., 2017) model:", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 118, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "mBERT is a multilingual transformer based on the original structure of BERT (Bidirectional Encoder Representations from Transformers) (Devlin et al., 2019a). However, BERT was only trained on English data in comparison to the multilingual model which was additionally trained on Wikipedia data in 100 languages (Devlin et al., 2019b) . BERT in general consists -unlike the original transformer with its encoder / decoder architecture (Vaswani et al., 2017) -only of an encoder and is pre-trained using two different strategies: Masked Language Modeling (MLM) and Next Sentence Prediction (NSP) (Devlin et al., 2019a) . MLM masks words with a specific pattern in a sequence that the model has to predict using its bidirectionality and multi-headed attention (reading a sentence from left-to-right and right-to-left). NSP is the task of predicting the following sentence in the text input (Devlin et al., 2019a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 333, |
|
"text": "(Devlin et al., 2019b)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 456, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 616, |
|
"text": "(Devlin et al., 2019a)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 887, |
|
"end": 909, |
|
"text": "(Devlin et al., 2019a)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "GottBERT: is a monolingual transformer model, which is based on RoBERTa (Robustly Optimized BERT Pretraining Approach) (Liu et al., 2019) . The latter used the BERT architecture, but was trained with more data over a longer time period. Additionally, NSP was not used for pre-training the model and MLM was changed from static to dynamic, where they use a different mask pattern for every sequence during training instead of the same as in BERT. RoBERTa outperforms BERT in several NLP downstream tasks (Liu et al., 2019) . Since the original RoBERTa model was only trained on English data, GottBERT was trained from scratch, with the same parameters as the German BERT version, on the German data of the OSCAR corpus (Scheible et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 137, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 521, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 718, |
|
"end": 741, |
|
"text": "(Scheible et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "XLM-R: is a self-supervised cross-lingual model that was -similarly as mBERT -trained with monolingual CommonCrawl data in 100 languages . The architecture is based on RoBERTa (similarly as GottBERT) in combination with the multilingual XLM transformer (Conneau and Lample, 2019) . XLM uses more language modeling approaches (Conneau and Lample, 2019) than RoBERTa and is only trained monolingually with MLM (Conneau et al., 2019). XLM-R outperforms mBERT on multiple tasks . Evaluation results showed that XLM-R especially works well for languages with less available data in comparison to other models .", |
|
"cite_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 279, |
|
"text": "(Conneau and Lample, 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The three models do not only differ in the number of languages that they were trained on: BERT and RoBERTa have different pre-training strategies, whereas the strategy of RoBERTa are used by GottBERT as well as XLM-R. As more training data is used, the vocabulary increases, resulting in longer pre-training and fine-tuning intervals. This usually has a positive influence on the performance of downstream tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "4 Experimental Setup Figure 1 provides an overview of our experimental setup and the training strategies used to solve the fact claiming task. The two main approaches take two distinct parts of input data, i.e., only Ger-mEval 2021 data or in addition ClaimBuster data as input. To evaluate the proposed methods we performed experiments by utilizing the following pretrained transformer models provided by the Hug-gingFace (Wolf et al., 2020) library: mBERT (Devlin et al., 2019a) , Gottbert 2 , and XLM-R 3 (Conneau et al., 2019). The experimental setup for the three models is described in detail below.", |
|
"cite_spans": [ |
|
{ |
|
"start": 423, |
|
"end": 442, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 458, |
|
"end": 480, |
|
"text": "(Devlin et al., 2019a)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 29, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The cased multilingual BERT transformer (Devlin et al., 2019a) was fine-tuned on the original Ger-mEval 2021 data as well as the additional English ClaimBuster data (Arslan et al., 2020) and its German translations. Note that since the model is multilingual, we expect the English ClaimBuster data to have a positive impact on model training. Both datasets were not subject to any further preprocessing. The model was trained for 4 epochs with a learning rate of 1e-5, batch size of 8 and a maximum sequence length of 284.", |
|
"cite_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 62, |
|
"text": "(Devlin et al., 2019a)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "mBERT", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We fine-tuned the German RoBERTa model (Got-tBERT) (Scheible et al., 2020) using the GermEval 2 https://huggingface.co/uklfr/ gottbert-base 3 https://huggingface.co/ xlm-roberta-base 2021 data without any additional preprocessing. We trained the model for 4 epochs with a learning rate of 3e-5 and one more epoch with a learning rate of 1e-5, with a batch size of 8, a maximum sequence length of 128, weight decay of 0.01 and 500 warm-up steps.", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 74, |
|
"text": "(Scheible et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GottBERT", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The XLM-R model was trained on the preprocessed (replacing links with @MEDIUM and replacing emojis with their translated text) GermEval 2021 training data. We finetuned the model for 10 epochs, with a batch size of 16, a maximum sequence length of 256, a learning rate of 2e-5 without warm-up steps and Adam as an optimizer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "XLM-R", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The validation and test results in terms of precision, recall, and macro-averaged F1-score are presented in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 115, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Run 1: The mBERT seems to generalize well, as the F1-score on the test set of 72.84% is at a similar performance level as on the validation set (76.09%). This result on the test set is the second highest achieved in our experiments. Table 1 : Accuracy (A), precision (P), recall (R), and macro-averaged F1-scores (F1) for the GermEval 2021. Abbreviation \"val\" stands for our validation set and \"test\" for the official benchmark test set. The performance measures are expressed in percent (%).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 233, |
|
"end": 240, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Run 2: The fine-tuned GottBERT on the Ger-mEval 2021 data achieved an overall F1-score of 74.62% on the test set (78.90% on the validation set). These results speak for the generalization ability of this network because the test performance is at a similar performance level as on the validation set. This result is the highest obtained for all our models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Run 3: The XLM-R fine-tuned on the Ger-mEval 2021 data achieved the lowest F1-score on the test set (69.27%). This approach seems to exhibit a strong overfitting behavior, as the results on the validation set are considerably higher (F1-score of 76.73%).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In conclusion, the GottBERT model (run 2) achieves the highest results in our experiments. These results indicate that the model that is pretrained on German data allows for a better modeling of the semantics of the task than a multilingual model. All other models are also beyond the zerorule baseline which is at 66% for the test set. A more detailed analysis of the results shows that all three models consistently predicted the same class in 560 cases (corresponds to approx. 60% of the test set). In the following, two examples are given for both classes:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Example 1 \"@USER Sie w\u00fcrden wahrscheinlich auch einen Kriegstreiber/in w\u00e4hlen, wenn es gegen Trump ginge, warten sie es ab , vielleicht geht ihr Wunsch ja in Erf\u00fcllung....\" The ground truth and predictions of all models for this example are \"0\" (not fact claiming).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Example 2 \"@USER , ich glaube,Sie verkrnnen gr\u00fcndlich die Situation. Deutschland mischt sich nicht ein, weil die letzte Einmischung in der Ukraine noch nicht bereinigt ist. Es geht nicht ums Milit\u00e4r\" The ground truth and predictions of all models for this example are \"1\" (fact claiming).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Furthermore, all three models consistently predicted the wrong class in 107 cases ( corresponds to approx. 11% of the test set). In the following, two examples are given for both classes:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Example 1 \"Hackt nicht nimmer auf den Fussball rum. Bei allem Sportarten sind wieder Zuschauer erlaubt. Hygienekonzept vorausgesetzt.\" The ground truth is \"1\" (fact claiming) and predictions of all models are \"0\" (not fact claiming).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Example 2 \"Biden gewinnt, Corona wird weggehen, Amerika wird reich,k alle bekommen ARbeit und die Welt wird sch\u00f6n. Also was sollst.\" The ground truth is \"0\" (not fact claiming) and predictions of all models are \"1\" (fact claiming).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the remaining 273 cases (corresponds to approx. 29% of the test set), one of the models did not predict the same as the others. mBERT and Got-tBERT predicted equally in 100 cases (70 correctly and 30 incorrectly). GottBERT and XLM-R predicted equally in 100 cases (47 correctly and 53 incorrectly). mBERT and XLM-R predicted equally in 73 cases (28 correctly and 45 incorrectly). These results show that even though both pairs mBERT and GottBERT on one side and mBERT and XLM-R on the other side predict equally in most cases (100), mBERT and GottBERT predict correctly in significantly more cases (70).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we described our submission to the \"Fact-Claiming Comment Classification\" task of the GermEval 2021. In our experiments GottBERT, a transformer-based machine learning model pretrained on German data only, achieved the best results, leading to an F1-score of 74.62% on the test set. For the multilingual transformer models, we obtained better results with mBERT (potentially because it was trained with an additional dataset) than with XLM-R, which seems to have slightly overfitted on the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion & Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Future work will focus on evaluating the different models and approaches in more detail and to investigate how they specifically adapt to the underlying data. We will further investigate how the use of external data impacts the performance of all three investigated models, especially GottBERT, which seem to be the most promising option. Due to the similarity of the presented approaches in this challenge and our previous submission to the EXIST 2021 challenge, see (Sch\u00fctz et al., 2021a) , we plan to perform comparisons on how the applied models converge with respect to the different datasets, semantic concepts and downstream tasks addressed in the benchmarks. Furthermore, we will analyze whether the findings from this comparison can be applied to related tasks such as disinformation detection (Sch\u00fctz et al., 2021b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 468, |
|
"end": 490, |
|
"text": "(Sch\u00fctz et al., 2021a)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 803, |
|
"end": 825, |
|
"text": "(Sch\u00fctz et al., 2021b)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion & Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Proceedings of the GermEval 2021 Shared Task on the Identification of Toxic, Engaging, and Fact-Claiming Comments co-located with KONVENS", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://pypi.org/project/emoji/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This contribution has been funded by the FFG Project \"Defalsif-AI\" (Austrian security research programme KIRAS of the Federal Ministry of Agriculture, Regions and Tourism(BMLRT), grant no. 879670) and the FFG Project \"Big Data Analytics\" (grant no. 866880).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "7" |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Claimbuster: A benchmark dataset of check-worthy factual claims", |
|
"authors": [ |
|
{ |
|
"first": "Naeemul", |
|
"middle": [], |
|
"last": "Fatma Arslan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengkai", |
|
"middle": [], |
|
"last": "Hassan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tremayne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fatma Arslan, Naeemul Hassan, Chengkai Li, and Mark Tremayne. 2020. Claimbuster: A benchmark dataset of check-worthy factual claims.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Crosslingual language model pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau and Guillaume Lample. 2019. Cross- lingual language model pretraining. In Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019a. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Multilingual BERT (mBERT)", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2010--2016", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019b. Multilingual BERT (mBERT). Accessed: 2010-06-02.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "2021. Proceedings of the iberian languages evaluation forum", |
|
"authors": [ |
|
{ |
|
"first": "Manuel", |
|
"middle": [], |
|
"last": "Montes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julio", |
|
"middle": [], |
|
"last": "Gonzalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ezra", |
|
"middle": [], |
|
"last": "Arag\u00f3n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodrigo", |
|
"middle": [], |
|
"last": "Agerri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel\u00e1ngel\u00e1lvarez", |
|
"middle": [], |
|
"last": "Carmona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena\u00e1lvarez", |
|
"middle": [], |
|
"last": "Mellado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jorge", |
|
"middle": [], |
|
"last": "Carrillo De Albornoz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luis", |
|
"middle": [], |
|
"last": "Chiruzzo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Larissa", |
|
"middle": [], |
|
"last": "Freitas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helena", |
|
"middle": [ |
|
"G\u00f3mez" |
|
], |
|
"last": "Adorno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoan", |
|
"middle": [], |
|
"last": "Guti\u00e9rrez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manuel Montes, Paolo Rosso, Julio Gonzalo, Ezra Arag\u00f3n, Rodrigo Agerri, Miguel\u00c1ngel\u00c1lvarez Carmona, Elena\u00c1lvarez Mellado, Jorge Car- rillo de Albornoz, Luis Chiruzzo, Larissa Fre- itas, Helena G\u00f3mez Adorno, Yoan Guti\u00e9rrez, Salud Mar\u00eda Jim\u00e9nez Zafra, Salvador Lima, Flor Miriam Plaza de Arco, and Mariona Taul\u00e9 (eds.). 2021. Proceedings of the iberian languages evalu- ation forum (iberlef 2021). In CEUR Workshop Pro- ceedings.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Overview of the GermEval 2021 shared task on the identification of toxic, engaging, and fact-claiming comments", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Risch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anke", |
|
"middle": [], |
|
"last": "Stoll", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lena", |
|
"middle": [], |
|
"last": "Wilms", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Wiegand", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the GermEval 2021 SharedTask on the Identification of Toxic, Engaging, and Fact-Claiming Comments colocated with KONVENS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Risch, Anke Stoll, Lena Wilms, and Michael Wiegand. 2021. Overview of the GermEval 2021 shared task on the identification of toxic, engaging, and fact-claiming comments. In Proceedings of the GermEval 2021 SharedTask on the Identification of Toxic, Engaging, and Fact-Claiming Comments co- located with KONVENS, pages 1-12.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Overview of exist 2021: sexism identification in social networks", |
|
"authors": [ |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Rodr\u00edguez-S\u00e1nchez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jorge", |
|
"middle": [], |
|
"last": "Carrillo De Albornoz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Plaza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julio", |
|
"middle": [], |
|
"last": "Gonzalo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miriam", |
|
"middle": [], |
|
"last": "Comet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trinidad", |
|
"middle": [], |
|
"last": "Donoso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Procesamiento del Lenguaje Natural", |
|
"volume": "", |
|
"issue": "0", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Francisco Rodr\u00edguez-S\u00e1nchez, Jorge Carrillo de Al- bornoz, Laura Plaza, Julio Gonzalo, Paolo Rosso, Miriam Comet, and Trinidad Donoso. 2021. Overview of exist 2021: sexism identification in so- cial networks. Procesamiento del Lenguaje Natural, 67(0).", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Got-tBERT: a pure german language model", |
|
"authors": [ |
|
{ |
|
"first": "Raphael", |
|
"middle": [], |
|
"last": "Scheible", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabian", |
|
"middle": [], |
|
"last": "Thomczyk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patric", |
|
"middle": [], |
|
"last": "Tippmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Jaravine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Boeker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raphael Scheible, Fabian Thomczyk, Patric Tippmann, Victor Jaravine, and Martin Boeker. 2020. Got- tBERT: a pure german language model. CoRR, abs/2012.02110.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Automatic sexism detection with multilingual transformer models", |
|
"authors": [ |
|
{ |
|
"first": "Mina", |
|
"middle": [], |
|
"last": "Sch\u00fctz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaqueline", |
|
"middle": [], |
|
"last": "Boeck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daria", |
|
"middle": [], |
|
"last": "Liakhovets", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Djordje", |
|
"middle": [], |
|
"last": "Slijep\u010devi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armin", |
|
"middle": [], |
|
"last": "Kirchknopf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manuel", |
|
"middle": [], |
|
"last": "Hecht", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Bogensperger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sven", |
|
"middle": [], |
|
"last": "Schlarb", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Schindler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Zeppelzauer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2106.04908" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mina Sch\u00fctz, Jaqueline Boeck, Daria Liakhovets, Djordje Slijep\u010devi\u0107, Armin Kirchknopf, Manuel Hecht, Johannes Bogensperger, Sven Schlarb, Alexander Schindler, and Matthias Zeppelzauer. 2021a. Automatic sexism detection with mul- tilingual transformer models. arXiv preprint arXiv:2106.04908.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Automatic fake news detection with pre-trained transformer models", |
|
"authors": [ |
|
{ |
|
"first": "Mina", |
|
"middle": [], |
|
"last": "Sch\u00fctz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Schindler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Siegel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kawa", |
|
"middle": [], |
|
"last": "Nazemi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Pattern Recognition. ICPR International Workshops and Challenges. ICPR 2021. Lecture Notes in Computer Sciences", |
|
"volume": "12667", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mina Sch\u00fctz, Alexander Schindler, Melanie Siegel, and Kawa Nazemi. 2021b. Automatic fake news detection with pre-trained transformer models. In Pattern Recognition. ICPR International Workshops and Challenges. ICPR 2021. Lecture Notes in Com- puter Sciences, volume 12667, Cham. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariama", |
|
"middle": [], |
|
"last": "Drame", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quentin", |
|
"middle": [], |
|
"last": "Lhoest", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. 2020. Transformers: State-of-the-art natural language pro- cessing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Overview of the setup of our submitted runs including the used models and data.", |
|
"type_str": "figure", |
|
"uris": null |
|
} |
|
} |
|
} |
|
} |