|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:35:07.315761Z" |
|
}, |
|
"title": "UoB at ProfNER 2021: Data Augmentation for Classification Using Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Frances", |
|
"middle": [], |
|
"last": "Laureano", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Birmingham", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "De", |
|
"middle": [], |
|
"last": "Leon", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Birmingham", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Harish", |
|
"middle": [ |
|
"Tayyar" |
|
], |
|
"last": "Madabushi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Birmingham", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Birmingham", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes the participation of the UoB-NLP team in the ProfNER-ST shared subtask 7a. The task was aimed at detecting the mention of professions in social media text. Our team experimented with two methods of improving the performance of pretrained models: Specifically, we experimented with data augmentation through translation and the merging of multiple language inputs to meet the objective of the task. While the best performing model on the test data consisted of mBERT fine-tuned on augmented data using back-translation, the improvement is minor possibly because multilingual pre-trained models such as mBERT already have access to the kind of information provided through backtranslation and bilingual data.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes the participation of the UoB-NLP team in the ProfNER-ST shared subtask 7a. The task was aimed at detecting the mention of professions in social media text. Our team experimented with two methods of improving the performance of pretrained models: Specifically, we experimented with data augmentation through translation and the merging of multiple language inputs to meet the objective of the task. While the best performing model on the test data consisted of mBERT fine-tuned on augmented data using back-translation, the improvement is minor possibly because multilingual pre-trained models such as mBERT already have access to the kind of information provided through backtranslation and bilingual data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The increase of user-generated content online has allowed researchers to extract information for studies on a variety of subjects, namely tracking infectious diseases and promoting public health (Wakamiya et al., 2018; Fine et al., 2020) . Consequently the emergence of COVID-19 has resulted in a rapid increase of information related to the virus on social media platforms (Zhao et al., 2020 ). ProfNER, a task under Social Media Mining for Health Applications (SMM4H) workshop (Magge et al., 2021) , requires the identification of occupations that might be particularly affected, either mentally or physically, by the exposure to COVID-19. The task organisers give participants tweets in Spanish and English. The English tweets were translated by means of a machine translation system. Of the tweets provided, 24% contain a mention of an occupation .", |
|
"cite_spans": [ |
|
{ |
|
"start": 195, |
|
"end": 218, |
|
"text": "(Wakamiya et al., 2018;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 219, |
|
"end": 237, |
|
"text": "Fine et al., 2020)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 392, |
|
"text": "(Zhao et al., 2020", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 499, |
|
"text": "(Magge et al., 2021)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Classifiers are dependent on the size and quality of the training data (Wei and Zou, 2020) , and are sensitive to class imbalance. We hypothesise that increasing the number of examples in the positive class using data augmentation techniques will successfully increase the performance of trained models. This work describes the training of four classifiers using pre-trained BERT models to detect the mention of occupations in tweets. In addition to training two baseline models, BERT-Base and mBERT, we train one model on augmented textual data, mBERT-Aug, and another model on bilingual data. We compare these models to each other and to fine-tuned pre-trained BERT models, described in Section 3.2. The small increase in F1 scores over the baselines, which is inconsistent across our validation and test experiments leads us to conclude that back-translation and bilingual data input are ineffective as methods of addressing class imbalance in pre-trained models, especially multi-lingual models (See Section 4). Our models were trained using the data provided by the task organisers for subtask 7a. Results are discussed in Section 4.", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 90, |
|
"text": "(Wei and Zou, 2020)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Augmenting textual data is challenging because it can introduce label noise and must be done before training a model (Shleifer, 2019) . Among techniques developed for text augmentation is synonym replacement, random insertion, swap and deletion, as presented by Wei and Zou (2020) . Shleifer (2019) uses back-translation, to translate the data in a second language and then back to the source language. They train their model on a binary classification task in a setting where low amounts of labelled data are available. Work continues to be done in back-translation for classification, as there is little research otherwise (Shleifer, 2019) . In this work, we use back-translation as a tool for augmenting the text data for the positive class. This work contributes to the field of generating synthetic data for text classification. Others have tried to add features to models to increase performance (Whang and Vosoughi, 2020; Lu et al., 2020) , we attempt to bring together representations in different languages so as to maximise the information available to the models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 133, |
|
"text": "(Shleifer, 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 280, |
|
"text": "Wei and Zou (2020)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 625, |
|
"end": 641, |
|
"text": "(Shleifer, 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 902, |
|
"end": 928, |
|
"text": "(Whang and Vosoughi, 2020;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 929, |
|
"end": 945, |
|
"text": "Lu et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "This section describes our experimental design. The code, models, and hyper-parameters are available on our team's GitHub repository for the task 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Overview and Experimental Set-Up", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Punctuation, hashtags, twitter handles, emojis and URL's were all removed from the English and Spanish tweets. Tweets were tokenised using the Hugging Face Transformers library (Wolf et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 196, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We trained four classifiers: mBERT-base, BERTbase, mBERT-Aug, and bilingual models. We utilised pre-trained mBERT-base and BERT-base to conduct our experiments (Devlin et al., 2019) using both the Spanish and English training data. Our team fine-tuned mBERT and BERT-base to use as a baseline for our experiments. We finetuned both models with the 6,000 train tweets provided by the task organisers; mBERT was trained on Spanish tweets and BERT-base on English tweets. Our augmented data model is mBERT-Aug, which we trained on 6,000 Spanish tweets, and an additional 1,393 back-translated tweets. The additional tweets consist of the English data belonging to the positive class, which were translated back into Spanish using Google Translate API. We also train a bilingual model, by concatenating the output of the two transformer models. We trained this model on both the Spanish and English tweets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 181, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The bilingual model obtains the best results on the validation data, while mBERT-Aug is the best scoring model on the test data, with a F-1 score of 0.83. Table 1 and Table 2 summarise the results.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 174, |
|
"text": "Table 1 and Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We perform experiments after the evaluation period to obtain results on the test data for BERT-base and the bilingual model. We do this to compare the results of all models on the test data. We find 1 https://github.com/francesita/ ProfnerTask7a that neither the addition of augmented data, nor combining representations in different languages significantly improves the results, with the bilingual model performing better on the validation data, and the mBERT-Aug performing better on the test data. We believe that a reason the BERT-base and bilingual models have lower scores on the test data is due to the quality of the machine translation system that we used whereas the validation data was provided by the task organisers. For example, Ultima Hora in Spanish was translated as last minute, when it should have been translated as breaking news in the context it was used in. Another example is consellera which translates to advisor was not translated at all in some tweets. While these methods of data augmentation provide a small improvement, fine-tuned pre-trained BERT models are quite robust. Training on parallel corpora gave these models everything that could be extracted through back-translation and bilingual data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our work presents experiments with pre-trained transformer based models to perform binary classification on an imbalanced dataset. We hypothesised that the use of data augmentation and parallel inputs in multiple languages will provide a method of addressing class imbalance (Section 1). However, our experiments showed that neither of these methods are particularly powerful in this regard (Section 4). In the future, we will continue to experiment with other techniques to handle imbalanced classes, such as one-class classification and reinforcement learning-based networks to generate text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Assessing population-level symptoms of anxiety, depression, and suicide risk in real time using NLP applied to social media data", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Fine", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Crutchley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenny", |
|
"middle": [], |
|
"last": "Blase", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Carroll", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Glen", |
|
"middle": [], |
|
"last": "Coppersmith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Fourth Workshop on Natural Language Processing and Computational Social Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.nlpcss-1.6" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Fine, Patrick Crutchley, Jenny Blase, Joshua Carroll, and Glen Coppersmith. 2020. Assessing population-level symptoms of anxiety, depression, and suicide risk in real time using NLP applied to so- cial media data. In Proceedings of the Fourth Work- shop on Natural Language Processing and Compu- tational Social Science, Stroudsburg, PA, USA. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "VGCN-BERT: Augmenting BERT with Graph Embedding for Text Classification", |
|
"authors": [ |
|
{ |
|
"first": "Zhibin", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pan", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian Yun", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), volume 12035 LNCS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "369--382", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-030-45439-5{_}25" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhibin Lu, Pan Du, and Jian Yun Nie. 2020. VGCN- BERT: Augmenting BERT with Graph Embedding for Text Classification. In Lecture Notes in Com- puter Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioin- formatics), volume 12035 LNCS, pages 369-382. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Overview of the sixth social media mining for health applications (# smm4h) shared tasks at naacl 2021", |
|
"authors": [ |
|
{ |
|
"first": "Arjun", |
|
"middle": [], |
|
"last": "Magge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Flores", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilseyar", |
|
"middle": [], |
|
"last": "Alimova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammed", |
|
"middle": [ |
|
"Ali" |
|
], |
|
"last": "Al-Garadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Miranda-Escalada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zulfat", |
|
"middle": [], |
|
"last": "Miftahutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eul\u00e0lia", |
|
"middle": [], |
|
"last": "Farr\u00e9-Maduell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salvador", |
|
"middle": [ |
|
"Lima" |
|
], |
|
"last": "L\u00f3pez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Banda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [ |
|
"O" |
|
], |
|
"last": "Connor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abeed", |
|
"middle": [], |
|
"last": "Sarker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Tutubalina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Krallinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Davy", |
|
"middle": [], |
|
"last": "Weissenbacher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graciela", |
|
"middle": [], |
|
"last": "Gonzalez-Hernandez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Sixth Social Media Mining for Health Applications Workshop & Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arjun Magge, Ari Klein, Ivan Flores, Ilseyar Al- imova, Mohammed Ali Al-garadi, Antonio Miranda- Escalada, Zulfat Miftahutdinov, Eul\u00e0lia Farr\u00e9- Maduell, Salvador Lima L\u00f3pez, Juan M Banda, Karen O'Connor, Abeed Sarker, Elena Tutubalina, Martin Krallinger, Davy Weissenbacher, and Gra- ciela Gonzalez-Hernandez. 2021. Overview of the sixth social media mining for health applications (# smm4h) shared tasks at naacl 2021. In Proceedings of the Sixth Social Media Mining for Health Appli- cations Workshop & Shared Task.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "The profner shared task on automatic recognition of professions and occupation mentions in social media: systems, evaluation, guidelines, embeddings and corpora", |
|
"authors": [ |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Miranda-Escalada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eul\u00e0lia", |
|
"middle": [], |
|
"last": "Farr\u00e9-Maduell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salvador", |
|
"middle": [ |
|
"Lima" |
|
], |
|
"last": "L\u00f3pez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicent", |
|
"middle": [], |
|
"last": "Briva-Iglesias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marvin", |
|
"middle": [], |
|
"last": "Ag\u00fcero-Torales", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luis", |
|
"middle": [], |
|
"last": "Gasc\u00f3-S\u00e1nchez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Krallinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Sixth Social Media Mining for Health Applications Workshop & Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonio Miranda-Escalada, Eul\u00e0lia Farr\u00e9-Maduell, Sal- vador Lima L\u00f3pez, Vicent Briva-Iglesias, Marvin Ag\u00fcero-Torales, Luis Gasc\u00f3-S\u00e1nchez, and Martin Krallinger. 2021. The profner shared task on automatic recognition of professions and occupa- tion mentions in social media: systems, evaluation, guidelines, embeddings and corpora. In Proceed- ings of the Sixth Social Media Mining for Health Ap- plications Workshop & Shared Task.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Low Resource Text Classification with ULMFit and Backtranslation", |
|
"authors": [ |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sam Shleifer. 2019. Low Resource Text Classifi- cation with ULMFit and Backtranslation. CoRR, abs/1903.09244.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Twitter-Based Influenza Detection After Flu Peak via Tweets With Indirect Information: Text Mining Study", |
|
"authors": [ |
|
{ |
|
"first": "Shoko", |
|
"middle": [], |
|
"last": "Wakamiya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yukiko", |
|
"middle": [], |
|
"last": "Kawai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eiji", |
|
"middle": [], |
|
"last": "Aramaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "JMIR Public Health and Surveillance", |
|
"volume": "4", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.2196/publichealth.8627" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shoko Wakamiya, Yukiko Kawai, and Eiji Aramaki. 2018. Twitter-Based Influenza Detection After Flu Peak via Tweets With Indirect Information: Text Mining Study. JMIR Public Health and Surveil- lance, 4(3).", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "EDA: Easy data augmentation techniques for boosting performance on text classification tasks", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "EMNLP-IJCNLP 2019 -2019 Conference on Empirical Methods in Natural Language Processing and 9th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/d19-1670" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Wei and Kai Zou. 2020. EDA: Easy data aug- mentation techniques for boosting performance on text classification tasks. In EMNLP-IJCNLP 2019 -2019 Conference on Empirical Methods in Natu- ral Language Processing and 9th International Joint Conference on Natural Language Processing, Pro- ceedings of the Conference.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Dartmouth CS at WNUT-2020 Task 2: Informative COVID-19 Tweet Classification Using BERT", |
|
"authors": [ |
|
{ |
|
"first": "Dylan", |
|
"middle": [], |
|
"last": "Whang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soroush", |
|
"middle": [], |
|
"last": "Vosoughi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Sixth Workshop on Noisy Usergenerated Text (W-NUT 2020)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "480--484", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.wnut-1.72" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dylan Whang and Soroush Vosoughi. 2020. Dart- mouth CS at WNUT-2020 Task 2: Informative COVID-19 Tweet Classification Using BERT. In Proceedings of the Sixth Workshop on Noisy User- generated Text (W-NUT 2020), pages 480-484, Stroudsburg, PA, USA. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Transformers: State-of-the-Art Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Remi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Patrick Von Platen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mariama", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Drame", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-demos.6" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, Remi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Trans- formers: State-of-the-Art Natural Language Process- ing. In Proceedings of the 2020 Conference on Em- pirical Methods in Natural Language Processing: System Demonstrations, Stroudsburg, PA, USA. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Exploring Occupation Differences in Reactions to COVID-19 Pandemic on Twitter. Data and Information Management", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haixu", |
|
"middle": [], |
|
"last": "Xi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengzhi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.2478/dim-2020-0032" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Zhao, Haixu Xi, and Chengzhi Zhang. 2020. Explor- ing Occupation Differences in Reactions to COVID- 19 Pandemic on Twitter. Data and Information Man- agement, 0(0).", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null, |
|
"text": "ProfNER Task 7a Test Results" |
|
} |
|
} |
|
} |
|
} |