|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:35:04.421626Z" |
|
}, |
|
"title": "Medication Mention Detection in Tweets Using ELECTRA Transformers and Decision Trees", |
|
"authors": [ |
|
{ |
|
"first": "Lung-Hao", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Pervasive Artificial Intelligence Research (PAIR) Labs", |
|
"institution": "National Central University", |
|
"location": { |
|
"country": "Taiwan, Taiwan" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Po-Han", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Pervasive Artificial Intelligence Research (PAIR) Labs", |
|
"institution": "National Central University", |
|
"location": { |
|
"country": "Taiwan, Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hao-Chuan", |
|
"middle": [], |
|
"last": "Kao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Pervasive Artificial Intelligence Research (PAIR) Labs", |
|
"institution": "National Central University", |
|
"location": { |
|
"country": "Taiwan, Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ting-Chun", |
|
"middle": [], |
|
"last": "Hung", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Pervasive Artificial Intelligence Research (PAIR) Labs", |
|
"institution": "National Central University", |
|
"location": { |
|
"country": "Taiwan, Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Po-Lei", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Pervasive Artificial Intelligence Research (PAIR) Labs", |
|
"institution": "National Central University", |
|
"location": { |
|
"country": "Taiwan, Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kuo-Kai", |
|
"middle": [], |
|
"last": "Shyu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Pervasive Artificial Intelligence Research (PAIR) Labs", |
|
"institution": "National Central University", |
|
"location": { |
|
"country": "Taiwan, Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This study describes our proposed model design for the SMM4H 2020 Task 1. We fine-tune ELECTRA transformers using our trained SVM filter for data augmentation, along with decision trees to detect medication mentions in tweets. Our best F1-score of 0.7578 exceeded the mean score 0.6646 of all 15 submitting teams.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This study describes our proposed model design for the SMM4H 2020 Task 1. We fine-tune ELECTRA transformers using our trained SVM filter for data augmentation, along with decision trees to detect medication mentions in tweets. Our best F1-score of 0.7578 exceeded the mean score 0.6646 of all 15 submitting teams.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The Social Media Mining for Health Applications (SMM4H) shared task involves natural language processing challenges for using social media data for health research. We participated in the SMM4H 2020 Task 1, focusing on automatic classification of tweets that mention medications (Klein et al., 2020 ). This binary classification task involves distinguishing tweets that mention a medication or dietary supplement (annotated as '1') from those that do not (annotated as '0'). This task was first organized in 2018 using a data set containing an artificially balanced distribution of the two classes (Weissenbacher et al., 2018) . Several approaches have been presented to address this binary classification task (Coltekin and Rama, 2018; Xherija, 2018; Wu et al., 2018) . However, this year's task is more challenging. The data set represents a natural, highly imbalanced distribution of the two classes from tweets posted by 112 women during pregnancy, with only approximately 0.2% of the tweets mentioning a medication (Sarker et al., 2017; Weissenbacher et al., 2019) . This paper describes the NCUEE (National Central University, Dept. of Electrical Engineering) system for the SMM4H 2020 Task 1. To deal with highly imbalanced distribution, the support vector machine trained using the training data is used as a filter to crawl and select more tweets for data augmentation. We then fine-tune the pre-trained ELECTRA transformers (Clark et al., 2020) , using our augmented data for medication mention detection. In addition, we train the decision tree as a supplementary classifier. Finally, the integrated set of testing instances are detected as a positive class from ELECTRA and decision trees are regraded as label '1' and the remaining cases as label '0' to form our submissions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 279, |
|
"end": 298, |
|
"text": "(Klein et al., 2020", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 598, |
|
"end": 626, |
|
"text": "(Weissenbacher et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 711, |
|
"end": 736, |
|
"text": "(Coltekin and Rama, 2018;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 737, |
|
"end": 751, |
|
"text": "Xherija, 2018;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 752, |
|
"end": 768, |
|
"text": "Wu et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1020, |
|
"end": 1041, |
|
"text": "(Sarker et al., 2017;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1042, |
|
"end": 1069, |
|
"text": "Weissenbacher et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1434, |
|
"end": 1454, |
|
"text": "(Clark et al., 2020)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In addition to the training data provided by task organizers, we crawl and select highly related tweets for data augmentation. We manually check small positive tweets to pick up textual terms that may refer to medications, and then use these terms as seeds for query expansions. The pre-trained Word2Vec embedding from Twitter data is used to look up word vectors and compare their cosine similarities. The top 10 similar terms of seeds are collected, where expanded terms are kept if the document frequency (DF) of an expanded term in the positive class exceeds that in the negative class. Each expanded term, along with the query term 'pregnant' is regarded as an individual query to search for possibly related tweets from Twitter. To automatically label highly positive cases, we train the support vector machine (SVM) 132 using the provided training data and select crawled tweets predicted to be positive cases by the SVM. Finally, we construct an augmented data set including the original training sets for neural computing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The NCUEE System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "ELECTRA (Efficiently Learning as Encoder that Classifiers Token Replacements Accurately) is a new pre-training approach that aims to match or exceed the downstream performance of an MLM (Masked Language Modeling) pre-trained model while using less computational loading (Clark et al., 2020) . ELECTRA trains two transformer models: the generator, which replaces the tokens in a sequence for training a masked language model; and the discriminator, which tries to identify which tokens in the sequence were replaced by the generator. We use pre-trained ELECTRA transformers and fine-tune them using our augmented data to detect medication mentions in tweets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 270, |
|
"end": 290, |
|
"text": "(Clark et al., 2020)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The NCUEE System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "According to our empirical results from the validation set, the decision tree (DT) classifiers usually achieved a high degree of precision if the discriminated features had been extracted and learned, but very low recall if the testing cases were significantly different from the trained ones. Hence, we use the same trained SVM as a filter to select the positive cases (predicted as '1' ) of from the 2018 task training data that may be closely similar to the positive tweets in this task and include these in an augmented set. We then adopt the TF-IDF (Term Frequency-Inverse Document Frequency) weighting method to extract discriminated features of positive tweets from this augmented set and use them with the original training data to train the decision trees.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The NCUEE System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Finally, based on our error analysis, we found the decision tree classifier was partially complementary to ELECTRA. So, the integrated set of testing instances predicted as the positive class from the ELEC-TRA transformers and decision trees are labeled '1', otherwise '0 ' in our submissions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The NCUEE System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We picked up 112 seeds from 181 positive tweets to further expand the dataset by 72 unique terms via cosine similarity through the pre-trained Word2Vec embeddings of the tweets. Without using the SVM as a filter, we have 57,678 positive tweets and 105,273 negative tweets. With SVM, we have 32,619 positive tweets and 65,238 negative tweets. The distribution of tweets after data augmentation (DA) was still remained imbalanced, with a positive to negative ratio close to 1:2. The pre-trained ELECTRA-Large was downloaded from HuggingFace (Wolf et al., 2019) . The hyper-parameters used for fine-tuning ELECTRA are as follows: batch size 16; gradient accumulation steps 16; learning rate 1e-5; and number of training epochs 6. Table 1 shows the results on the validation and test sets. The evaluation metric is the F1-score for the positive class (i.e. tweets that mention medications). For the test set, compared with submission 1 that does not use SVM as a filter for data augmentation and submission 3 that adds the prediction result of the RoBERTa transformer (Liu et al., 2019) , our submission 2 achieved the best F1-score of 0.7578. Their relative ranks were identical to those of the validation set. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 539, |
|
"end": 558, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1064, |
|
"end": 1082, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 727, |
|
"end": 734, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "This study describes the NCUEE system participating in the SMM4H 2020 Task 1 for medication mention detection, including system design, implementation and evaluation. Our best F1-score of 0.7578 exceeded the mean score 0.6646 for all 15 teams with at least one submissions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "4" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This study is partially supported by the Ministry of Science and Technology, Taiwan under the grant MOST 108-2218-E-008-017-MY3 and MOST 108-2634-F-008-003-through Pervasive Artificial Intelligence Research (PAIR) Labs, Taiwan.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Discovering cohorts of pregnant women from social media for safety surveillance analysis", |
|
"authors": [ |
|
{ |
|
"first": "Abeed", |
|
"middle": [], |
|
"last": "Sarker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pramod", |
|
"middle": [], |
|
"last": "Chandrashekar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arjun", |
|
"middle": [], |
|
"last": "Magge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haitao", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graciela", |
|
"middle": [], |
|
"last": "Gonzalez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Journal of Medical Internet Research", |
|
"volume": "19", |
|
"issue": "10", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abeed Sarker, Pramod Chandrashekar, Arjun Magge, Haitao Cai, Ari Klein and Graciela Gonzalez. 2017. Discov- ering cohorts of pregnant women from social media for safety surveillance analysis. Journal of Medical Internet Research, 19(10):e361.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Overview of the fifth Social Media Mining for Health Applications (#SMM4H) Shared Tasks at COLING 2020", |
|
"authors": [ |
|
{ |
|
"first": "Ari", |
|
"middle": [ |
|
"Z" |
|
], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilseyar", |
|
"middle": [], |
|
"last": "Alimova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Flores", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arjun", |
|
"middle": [], |
|
"last": "Magge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zulfat", |
|
"middle": [], |
|
"last": "Miftahutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anne-Lyse", |
|
"middle": [], |
|
"last": "Minard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [ |
|
"O" |
|
], |
|
"last": "Connor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abeed", |
|
"middle": [], |
|
"last": "Sarker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Tutubalina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Davy", |
|
"middle": [], |
|
"last": "Weissenbacher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graciela", |
|
"middle": [], |
|
"last": "Gonzalez-Hernandez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Fifth Social Media Mining for Health Applications (#SMM4H) Workshop & Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ari Z. Klein, Ilseyar Alimova, Ivan Flores, Arjun Magge, Zulfat Miftahutdinov, Anne-Lyse Minard, Karen O'Con- nor, Abeed Sarker, Elena Tutubalina, Davy Weissenbacher, and Graciela Gonzalez-Hernandez. 2020. Overview of the fifth Social Media Mining for Health Applications (#SMM4H) Shared Tasks at COLING 2020. In Pro- ceedings of the Fifth Social Media Mining for Health Applications (#SMM4H) Workshop & Shared Task.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Drug-use identification from tweets with word and character n-grams", |
|
"authors": [ |
|
{ |
|
"first": "Cagri", |
|
"middle": [], |
|
"last": "Coltekin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taraka", |
|
"middle": [], |
|
"last": "Rama", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 3 rd Social Media Mining for Health Applications (SMM4H) Workshop and Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "52--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cagri Coltekin and Taraka Rama. 2018. Drug-use identification from tweets with word and character n-grams. In Proceedings of the 3 rd Social Media Mining for Health Applications (SMM4H) Workshop and Shared Task, pages 52-53, Brussels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Detecting tweets mentioning drug name and adverse drug reaction with hierarchical tween representation and multi-head self-attention", |
|
"authors": [ |
|
{ |
|
"first": "Chuhan", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fangzhao", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junxin", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sixing", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongfeng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xing", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 3 rd Social Media Mining for Health Applications (SMM4H) Workshop and Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "34--37", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chuhan Wu, Fangzhao Wu, Junxin Liu, Sixing Wu, Yongfeng Huang and Xing Xie. 2018. Detecting tweets men- tioning drug name and adverse drug reaction with hierarchical tween representation and multi-head self-atten- tion. In Proceedings of the 3 rd Social Media Mining for Health Applications (SMM4H) Workshop and Shared Task, pages 34-37, Brussels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Deep neural networks ensemble for detecting medication mentions in tweets", |
|
"authors": [ |
|
{ |
|
"first": "Davy", |
|
"middle": [], |
|
"last": "Weissenbacher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abeed", |
|
"middle": [], |
|
"last": "Sarker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [ |
|
"O" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Journal of the American Medical Informatics Association", |
|
"volume": "26", |
|
"issue": "12", |
|
"pages": "1618--1626", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Davy Weissenbacher, Abeed Sarker, Ari Klein, Karen O'Connorm Arjun Magge and Graciela Gonzalez-Hernan- dez. 2019. Deep neural networks ensemble for detecting medication mentions in tweets. Journal of the Ameri- can Medical Informatics Association, 26(12):1618-1626.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Overview of the third social media mining for health (SMM4H) shared tasks at EMNLP", |
|
"authors": [ |
|
{ |
|
"first": "Davy", |
|
"middle": [], |
|
"last": "Weissenbacher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abeed", |
|
"middle": [], |
|
"last": "Sarker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Paul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graciela", |
|
"middle": [], |
|
"last": "Gonzalez-Hernandez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 3 rd Social Media Mining for Health Applications (SMM4H) Workshop and Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Davy Weissenbacher, Abeed Sarker, Michael Paul and Graciela Gonzalez-Hernandez. 2018. Overview of the third social media mining for health (SMM4H) shared tasks at EMNLP 2018. In Proceedings of the 3 rd Social Media Mining for Health Applications (SMM4H) Workshop and Shared Task, pages 13-16, Brussels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "ELECTRA: pre-training text encoders as discriminators rather than generators", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 8th International Conference on Learning Representations (ICLR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Minh-Thang Luong, Quoc V. Le and Christopher D. Manning. 2020. ELECTRA: pre-training text encoders as discriminators rather than generators. In Proceedings of the 8th International Conference on Learn- ing Representations (ICLR) , pages 1-18.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Classification of medication-related tweets using stacked bidirectional LSTMs with contextaware attention", |
|
"authors": [ |
|
{ |
|
"first": "Orest", |
|
"middle": [], |
|
"last": "Xherija", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 3 rd Social Media Mining for Health Applications (SMM4H) Workshop and Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--42", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Orest Xherija. 2018. Classification of medication-related tweets using stacked bidirectional LSTMs with context- aware attention. In Proceedings of the 3 rd Social Media Mining for Health Applications (SMM4H) Workshop and Shared Task, pages 38-42, Brussels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Mariama Drame, Quentin Lhoest, and Alexander M. Rush. 2019. HuggingFace's transformers: state-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Remi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joe", |
|
"middle": [], |
|
"last": "Davison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Shleifer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Patrick Von Platen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yacine", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Jernite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Canwen", |
|
"middle": [], |
|
"last": "Plu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teven", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Scao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.03771" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alex- ander M. Rush. 2019. HuggingFace's transformers: state-of-the-art natural language processing, arXiv:1910.03771.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "RoBERTa: a robustly optimized BERT pretraining approach. Computing Research Repository", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer and Veselin Stoyanov. 2019. RoBERTa: a robustly optimized BERT pretraining approach. Compu- ting Research Repository, arXiv:1907.11692. version 2.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": {} |
|
} |
|
} |