|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:35:16.785540Z" |
|
}, |
|
"title": "Classification, Extraction, and Normalization : CASIA_Unisound Team at the Social Media Mining for Health 2021 Shared Tasks", |
|
"authors": [ |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "National Laboratory of Pattern Recognition", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zhucong", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "National Laboratory of Pattern Recognition", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zhen", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "National Laboratory of Pattern Recognition", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Baoli", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "National Laboratory of Pattern Recognition", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yubo", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "National Laboratory of Pattern Recognition", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Beijing University of Posts and Telecommunications", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Beijing University of Chemical Technology", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Kang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "National Laboratory of Pattern Recognition", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "National Laboratory of Pattern Recognition", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yafei", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Beijing Unisound Information Technology Co", |
|
"location": { |
|
"settlement": "Ltd" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Weifeng", |
|
"middle": [], |
|
"last": "Chong", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Beijing Unisound Information Technology Co", |
|
"location": { |
|
"settlement": "Ltd" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Shengping", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Beijing Unisound Information Technology Co", |
|
"location": { |
|
"settlement": "Ltd" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This is the system description of the CA-SIA_Unisound team for Task 1, Task 7b, and Task 8 of the sixth Social Media Mining for Health Applications (SMM4H) shared task in 2021. To address two shared challenges among those tasks, the colloquial text and the imbalance annotation, we apply customized pre-trained language models and propose various training strategies. Experimental results show the effectiveness of our system. Moreover, we got an F1-score of 0.87 in task 8, which is the highest among all participates.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This is the system description of the CA-SIA_Unisound team for Task 1, Task 7b, and Task 8 of the sixth Social Media Mining for Health Applications (SMM4H) shared task in 2021. To address two shared challenges among those tasks, the colloquial text and the imbalance annotation, we apply customized pre-trained language models and propose various training strategies. Experimental results show the effectiveness of our system. Moreover, we got an F1-score of 0.87 in task 8, which is the highest among all participates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Enormous data in social media has drawn much attention in medical applications. With the rapid development of health language processing, effective systems in mining health information from social media were built to assist pharmacy, diagnosis, nursing, and so on (Paul et al., 2016) (Yang et al., 2012) (Zhou et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 264, |
|
"end": 283, |
|
"text": "(Paul et al., 2016)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 303, |
|
"text": "(Yang et al., 2012)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 304, |
|
"end": 323, |
|
"text": "(Zhou et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The health language processing lab at the University of Pennsylvania organized the Social Media Mining for Health Applications (SMM4H) shared task 2021 (mag), which provided an opportunity for fair competition among state-of-the-art health information mining systems customized in the social media domain. We participated in task 1, subtask b of task 7, and task 8.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Task 1 consists of three subtasks in a cascade manner: (1) identifying whether a tweet mentions adverse drug effect; (2) mark the exact position that mentions ADE in the tweet; (3) normalization ADE mentions to standard terms. Subtask b of task 7 (Miranda-Escalada et al., 2021) is designed to identify professions and occupations (ProfNER) in Spanish tweets during the COVID-19 outbreak. Task 8 is targeting the classification of self-reported breast cancer posts on Twitter.", |
|
"cite_spans": [ |
|
{ |
|
"start": 247, |
|
"end": 278, |
|
"text": "(Miranda-Escalada et al., 2021)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 331, |
|
"end": 340, |
|
"text": "(ProfNER)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The ubiquitous two challenges of all the SMM4H shared tasks are (1) how to properly model the colloquial text in tweets; (2) avoid prediction bias caused by learning from unbalanced annotated data. The tweet's text, mixing with informal spelling, various emojis, usernames mentioned, and hyperlinks, will hinder the real semantic comprehension by a common pre-trained language model. Meanwhile, medical concepts are imbalanced in the real world due to the imbalanced morbidity of various diseases, and this phenomenon is also reflected in social media data. Training with imbalanced data will induce the model to pay much attention to the major classes and neglect the tail classes, which hinders the model's robustness and generalization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To address the challenges above, we utilize a language model pre-trained on tweet data as the backbone and introduce multiple data construction methods in the training process. In the following, we will describe our methods and corresponding experiments for each task separately. At last, we summary this competition and discuss future directions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Adverse drug effect (ADE) is among the leading cause of morbidity and mortality. The collection of those adverse effects is crucial in prescribing and new drug research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task 1: English ADE Tweets Mining", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "MedDRA Term vyvanse completely gets rid of my appetite. not quite sure how to feel about this. Table 1 : An example of tweets labeled ADE in Task 1. The ADE span is colored red, and the corresponding MedDRA term id is 10003028. This task's objective is to find the tweet containing ADE, locate the span, and finally map the span to concepts in standard terms.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 102, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Tweet", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The goal of this subtask is to distinguish whether a tweet mentions adverse drug effects. As shown in Table 1 , \"rid of my appetite\" is an ADE mention, so this tweet is labeled on \"ADE\". In this dataset, the training set consists of 17385 tweets (16150 NoADE and 1235 ADE tweets), the validation set consists of 914 labeled tweets (849 NoADE and 65 ADE tweets), and the test set consists of 10984 tweets. Since only about 7% of the tweets contain ADEs, we target this class imbalance issue with a customized pseudo data construction strategy.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 109, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classification", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Pseudo Data: A human may differentiate ADE tweets by some complaints trigger words like verb \"feel\" \"think\" or some negative sentiment words like \"gets rid of\", but a more precise way is discerning ADE mention. The mention in the tweet indicating ADE is a colloquial MedDRA term, and they express the same semantic. We construct ADE tweet for training in two ways: (1) randomly inserting the text description of a standard term in a tweet; (2) regarding the text description of a standard term as an ADE tweet. With those pseudo training data, a model should pay more attention to ADE mention in a tweet and more robust to diversified and unseen context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "2.1.1" |
|
}, |
|
{ |
|
"text": "Model: We apply the BERTweet (Nguyen et al., 2020), a RoBERTa (Liu et al., 2019) language model pre-trained on Twitter data, to encode tweet text and make a binary prediction according to the corresponding pooling vector.", |
|
"cite_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 80, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "2.1.1" |
|
}, |
|
{ |
|
"text": "We set the batch size to 32 and using AdamW (Loshchilov and Hutter, 2018) optimizer for optimizing. For BERTweet parameters, we set a learning rate of 3e-5, the weight of L2 normalization is 0.01; for other parameters, we set the learning rate to 3e-4, the weight of L2 normalization is 0. We finetune all models using 5-fold cross-validation on the training set for 50 epochs. The amount of pseudo data is equal to 85.80% of the origin training data to balance the two classes. The experimental results are shown in Table 2 , and indicate the advantage of our data construction strategies.", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 73, |
|
"text": "(Loshchilov and Hutter, 2018)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 517, |
|
"end": 524, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "2.1.2" |
|
}, |
|
{ |
|
"text": "This subtask aims to extract ADE entities from English Twitter texts containing ADE. The dataset includes training set, validation set, and test set containing 17385, 915, and 10984 tweets respectively. The proportion of tweets involving ADE mentions in the training set and the validation set is about 7.1%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extraction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Preprocessing: To reflect real semantic properly, we preprocess tweets in customized manners. 1Since most user names are outside the vocabulary, We change all user names behind @ to \"user\". (2) There are some escape characters in the Twitter text, such as \""\", \"&\", \"<\", \">\", and we replace them with the original characters: \"\"\", \"&\", \"<\", \">\" respectively. Training: During the training stage, We use a five-fold cross-training fusion system, which include 7 different pre-training models. We ensemble them through average weighted voting to weaken the fluctuations of performance of single model. Model: We use seven pre-training models: bertweet-base, bertweet-covid19-base-cased, bertweet-covid19-base-uncased, bert-base-cased, bert-base-uncased, bert-large-cased, and bert-largeuncased.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "2.2.1" |
|
}, |
|
{ |
|
"text": "The models we choose and their learning rates are shown in Table 3 . Each model has two learning rates, the former is the learning rate of BERT, and the latter is the learning rate of BiLSTM (Ma and Hovy, 2016)+CRF(Lafferty et al., 2001 ). Each BERT model is finetuned for 50 epochs with the dropout (Srivastava et al., 2014) We set the batch size of bert-large-cased and bertlarge-uncased to 8, and the others are 64. The experimental results are shown in Table 4 . The Recall of our result is close to two percentage points higher than the average, but our Precision is about 11 percentage points lower than the average. Therefore, our model recalls more correct entities, but it also recalls a lot of wrong entities. So this may be a direction in which our method can be optimized.", |
|
"cite_spans": [ |
|
{ |
|
"start": 191, |
|
"end": 198, |
|
"text": "(Ma and", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 199, |
|
"end": 236, |
|
"text": "Hovy, 2016)+CRF(Lafferty et al., 2001", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 325, |
|
"text": "(Srivastava et al., 2014)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 66, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 457, |
|
"end": 464, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "2.2.2" |
|
}, |
|
{ |
|
"text": "MedDRA (Brown et al., 1999) is a rich and highly specific standardized medical terminology to facilitate sharing regulatory information internationally for medical products used by humans. This subtask aims to normalize ADE mention to standard Med-DRA term based on the result of span detection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Normalization", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Our model's inference process consists of a classification phase and a compare phase, responsible for recall and rank, respectively. We train the above two phrases with shared parameters and optimizing with the combined supervising signal.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Recall: In view of the representation process of ADE's mention could be benefited from its context, we utilize BERTweet for complete tweet representation. Since we have a specific position of mention in a tweet from subtask b, we first truncate mention's representations and calculate out the mean vector as the mention representation. Next, we calculate the dot product between mention representation and term embedding. Each vector in the term embedding is initialized according to its corresponding mean BERTweet representation of standard term text description. Finally, a softmax operation is added to convert the dot product value to conditional probabilities. A cross-entropy loss function responsible for supervising this process.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Rank: Since the MedDRA term's description is a normalized expression of its corresponding ADE mention, the global semantic of a tweet should remain unchanged after exchanging the colloquial ADE mention and correct term description. On the contrary, the global semantic should have an offset after exchanging with a wrong term. Based on the above assumption, we add an additional supervising signal. A tweet's global representation is obtained from BERTweet's mean pooling vector. The model calculates triplet loss among the following global representations: (a) origin tweet (b) replace the mention with target term's description (c) replace the mention with a wrong term's description. The wrong term is firstly obtained by random selection from the whole term set, and with the procedures of the training process, it is randomly selected from the classification model's top K prediction. The triplet loss intends to maximize the similarity of the global representation of (a) and (b); meanwhile, it minimizes the similarity of (a) and (c).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Inference: In the inference stage, first, we obtain the top K terms based on the prediction of the recall procedure. Then we exchange the candidate K terms with the mention in the origin tweet and calculate the similarity of global representation with the origin tweet. The similarity score is the base of term ranking. Finally, we retain the top 1 as the final prediction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "Our hyperparameter setting is identical to subtask a. Besides, we set K to 10, and for the combination of cross-entropy loss and triplet loss, we set equal weights. The experimental results are shown in Table 5 , and indicate the advantage of the comparebased rank procedure.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 203, |
|
"end": 210, |
|
"text": "Table 5", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "3 Task 7: ProfNER for Spanish Tweets", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "This subtask aims to detect the spans of professions and occupations entities in each Spanish tweet. The corpus contains four categories, but participants will only be evaluated to predict two of them: PROFESSION [profession] and SITUA-CION_LABORAL [working status]. The dataset includes a training set, validation set, and test set containing 6000, 2000, and 27000 tweets, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extraction", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Preprocessing: According to the characteristics of the competition's Spanish Twitter data and the competition requirements, we preprocess data to improve the model's ability to capture text information. (1) Since most user names are outside the vocabulary, We change all user names behind @ to \"usuario\". (2) The corpus contains four kinds of labels, but we will only be evaluated in the prediction of 2 of them: PROFESSION and SITUA-CION_LABORAL, so we removed the other two labels ACTIVIDAD and FIGURATIVA.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Training: Similar to subtask b of task 1, we make predictions on the multiple trained models and perform a simple voting scheme to get the final result.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "Model: We use three BERT-based (Devlin et al., 2018) pre-training models: bert-base-spanishwwm-cased, bert-spanish-cased-finetuned-ner, and bert-spanish-cased-finetuned-pos.", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 52, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "3.1.1" |
|
}, |
|
{ |
|
"text": "For this subtask, each BERT model is finetuned for 50 epochs with the learning rate of 5e-5 using AdamW optimizer, and for the BiLSTM+CRF module, our learning rate is 5e-3, and the batch size is 64. The experimental results are shown in Table 6 . The Model_ensemble0(noLSTM) is the result of the fusion of fifteen models without the BiLSTM modules, and The Model_ensemble1(LSTM) is the result of the fusion of fifteen models with the BiL-STM modules. The Ours is the final result, which is the voting fusion result of 30 models. From the experimental results, we can see that the F1 score of the fusion record on the validation set is superior, but the test set score has dropped. According to our https://huggingface.co/dccuchile/ bert-base-spanish-wwm-cased https://huggingface.co/mrm8488 analysis, this is probably related to a large amount of test data.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 237, |
|
"end": 244, |
|
"text": "Table 6", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "3.1.2" |
|
}, |
|
{ |
|
"text": "The adverse patient-centered outcomes (PCOs) caused by hormone therapy would lead to breast cancer patients discontinuing their long-term treatments (Fayanju et al., 2016) . The research on PCOs is beneficial to reducing the risk of cancer recurrence. However, PCOs are not detectable through laboratory tests and are sparsely documented in electronic health records. Social media is a promising resource, and we can extract PCOs from the tweet with breast cancer self-reporting (Freedman et al., 2016) . First and foremost, the PCO extraction system requires the accurate detection of selfreported breast cancer patients. This task's objective is to identify tweets in the self-reports category. In this dataset, the training set consists of 3513 tweets (898 self-report and 2615 non-relevant tweets), the validation set consists of 302 tweets (77 self-report and 225 non-relevant tweets), and the test set consists of 1204 tweets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 149, |
|
"end": 171, |
|
"text": "(Fayanju et al., 2016)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 502, |
|
"text": "(Freedman et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task 8: Self-reported Patient Detection", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Preprocessing: We preprocess the data to fit the tokenizer of the pre-trained RoBERTa model BERTweet, which is customized in tweet data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(1) The BERTweet's tokenizer transform the URL string in tweet to a unified special token by matching \"http\" or \"www\". For the tokenizer to effectively identify the URL, we insert \"http://\" before \"pic.twitter.com\" in tweets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(2) The emoji in tweets is expressed as UTF-8 bytes code in string form. We match the \"\\x\" and transform the code into its corresponding emoji. Training: Although the generalization ability of the pre-trained language model finetuned in text classification tasks has been proved, it could still seize the wrong correction between specific tokens and the target label, turn out to neglect the crucial semantic. As shown at the top of Table 7 , \"I had breast cancer\" is convincing evidence to a positive prediction. A model can make the right decision on the example at the bottom of Table 7 only if it takes the context into consideration. To avoid this wrong correction and improve our model's robustness, we apply two strategies on the training stage exert in data level and model level, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 433, |
|
"end": 440, |
|
"text": "Table 7", |
|
"ref_id": "TABREF9" |
|
}, |
|
{ |
|
"start": 582, |
|
"end": 589, |
|
"text": "Table 7", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(1) Noise: Each word in a tweet has a probability p to be replaced by a random word, and the target label has a probability p to reverse.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(2) FGM: Following the fast gradient method (Miyato et al., 2016) , we move the input one step further in the direction of rising loss, which will make the model loss rise in the fastest direction, thus forming an attack. In response, the model needs to find more robust parameters in the optimization process to deal with attacks against samples.", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 65, |
|
"text": "(Miyato et al., 2016)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Model: Similar to subtask a in Task 1, we apply the BERTweet to encode tweet text and make a binary prediction according to the corresponding pooling vector.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We set the batch size to 32 and using AdamW optimizer for optimizing. For BERTweet parameters, we set a learning rate of 3e-5, the weight of L2 normalization is 0.01; for other parameters, we set the learning rate to 3e-4, the weight of L2 normalization is 0. We set the noise rate to 0.025 and the epsilon of FGM to 0.5. We finetune all models using 5-fold cross-validation on the training set for 50 epochs. The experimental results are shown in Table 8 . Our method has obtained the highest F1 score in this task. Furthermore, the ablation results indicate the advantage of the customized data preprocessing procedure and the robust training strategies.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 448, |
|
"end": 455, |
|
"text": "Table 8", |
|
"ref_id": "TABREF11" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "This work explores various customized methods in tasks of classification, extraction, and normalization of health information from social media. We have empirically evaluated different variants of our system and demonstrated the effectiveness of the proposed methods. As future work, we intend to introduce the medical domain's knowledge graph to improve our system further.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work is supported by the National Key RD Program of China (2020AAA0106400), the National Natural Science Foundation of China ( No.61806201) and the Key Research Program of the Chinese Academy of Sciences (Grant NO. ZDBS-SSW-JSC006).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The medical dictionary for regulatory activities (meddra)", |
|
"authors": [ |
|
{ |
|
"first": "Louise", |
|
"middle": [], |
|
"last": "Elliot G Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sue", |
|
"middle": [], |
|
"last": "Wood", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wood", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Drug safety", |
|
"volume": "20", |
|
"issue": "2", |
|
"pages": "109--117", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elliot G Brown, Louise Wood, and Sue Wood. 1999. The medical dictionary for regulatory activities (meddra). Drug safety, 20(2):109-117.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Valuebased breast cancer care: a multidisciplinary approach for defining patient-centered outcomes. Annals of surgical oncology", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Oluwadamilola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tinisha", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Fayanju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tracy", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Mayo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seohyun", |
|
"middle": [], |
|
"last": "Spinks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Carlos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Barcenas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Benjamin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Sharon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rosa", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Giordano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jesse", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Ehlers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Selber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "23", |
|
"issue": "", |
|
"pages": "2385--2390", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oluwadamilola M Fayanju, Tinisha L Mayo, Tracy E Spinks, Seohyun Lee, Carlos H Barcenas, Ben- jamin D Smith, Sharon H Giordano, Rosa F Hwang, Richard A Ehlers, Jesse C Selber, et al. 2016. Value- based breast cancer care: a multidisciplinary ap- proach for defining patient-centered outcomes. An- nals of surgical oncology, 23(8):2385-2390.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Learning from social media: utilizing advanced data extraction techniques to understand barriers to breast cancer treatment", |
|
"authors": [ |
|
{ |
|
"first": "Kasisomayajula", |
|
"middle": [], |
|
"last": "Rachel A Freedman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ines", |
|
"middle": [], |
|
"last": "Viswanath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nancy", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Vaz-Luis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Keating", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Breast cancer research and treatment", |
|
"volume": "158", |
|
"issue": "2", |
|
"pages": "395--405", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rachel A Freedman, Kasisomayajula Viswanath, Ines Vaz-Luis, and Nancy L Keating. 2016. Learning from social media: utilizing advanced data extrac- tion techniques to understand barriers to breast can- cer treatment. Breast cancer research and treatment, 158(2):395-405.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando Cn", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the Eighteenth International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "282--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John D Lafferty, Andrew McCallum, and Fernando CN Pereira. 2001. Conditional random fields: Prob- abilistic models for segmenting and labeling se- quence data. In Proceedings of the Eighteenth In- ternational Conference on Machine Learning, pages 282-289.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Fixing weight decay regularization in adam", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Loshchilov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Hutter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Loshchilov and Frank Hutter. 2018. Fixing weight decay regularization in adam.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "End-to-end sequence labeling via bi-directional lstm-cnns-crf", |
|
"authors": [ |
|
{ |
|
"first": "Xuezhe", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1064--1074", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuezhe Ma and Eduard Hovy. 2016. End-to-end se- quence labeling via bi-directional lstm-cnns-crf. In Proceedings of the 54th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 1064-1074.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "The profner shared task on automatic recognition of occupation mentions in social media: systems, evaluation, guidelines, embeddings and corpora", |
|
"authors": [ |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Miranda-Escalada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eul\u00e0lia", |
|
"middle": [], |
|
"last": "Farr\u00e9-Maduell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salvador", |
|
"middle": [ |
|
"Lima" |
|
], |
|
"last": "L\u00f3pez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luis", |
|
"middle": [], |
|
"last": "Gasc\u00f3-S\u00e1nchez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vicent", |
|
"middle": [], |
|
"last": "Briva-Iglesias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marvin", |
|
"middle": [], |
|
"last": "Ag\u00fcero-Torales", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Krallinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Sixth Social Media Mining for Health Applications Workshop & Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonio Miranda-Escalada, Eul\u00e0lia Farr\u00e9-Maduell, Sal- vador Lima L\u00f3pez, Luis Gasc\u00f3-S\u00e1nchez, Vicent Briva-Iglesias, Marvin Ag\u00fcero-Torales, and Martin Krallinger. 2021. The profner shared task on auto- matic recognition of occupation mentions in social media: systems, evaluation, guidelines, embeddings and corpora. In Proceedings of the Sixth Social Media Mining for Health Applications Workshop & Shared Task.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Adversarial training methods for semi-supervised text classification", |
|
"authors": [ |
|
{ |
|
"first": "Takeru", |
|
"middle": [], |
|
"last": "Miyato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Andrew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Goodfellow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1605.07725" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Takeru Miyato, Andrew M Dai, and Ian Good- fellow. 2016. Adversarial training methods for semi-supervised text classification. arXiv preprint arXiv:1605.07725.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Bertweet: A pre-trained language model for english tweets", |
|
"authors": [ |
|
{ |
|
"first": "Thanh", |
|
"middle": [], |
|
"last": "Dat Quoc Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anh", |
|
"middle": [ |
|
"Tuan" |
|
], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2005.10200" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dat Quoc Nguyen, Thanh Vu, and Anh Tuan Nguyen. 2020. Bertweet: A pre-trained language model for english tweets. arXiv preprint arXiv:2005.10200.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Social media mining for public health monitoring and surveillance", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abeed", |
|
"middle": [], |
|
"last": "Paul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sarker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Azadeh", |
|
"middle": [], |
|
"last": "John S Brownstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Nikfarjam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Scotch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Karen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graciela", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gonzalez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Biocomputing 2016: Proceedings of the Pacific symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "468--479", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael J Paul, Abeed Sarker, John S Brownstein, Azadeh Nikfarjam, Matthew Scotch, Karen L Smith, and Graciela Gonzalez. 2016. Social media mining for public health monitoring and surveillance. In Biocomputing 2016: Proceedings of the Pacific sym- posium, pages 468-479. World Scientific.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research", |
|
"authors": [ |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "15", |
|
"issue": "", |
|
"pages": "1929--1958", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. 2014. Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research, 15(1):1929-1958.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Social media mining for drug safety signal detection", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haodong", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ling", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mi", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 international workshop on Smart health and wellbeing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "33--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher C Yang, Haodong Yang, Ling Jiang, and Mi Zhang. 2012. Social media mining for drug safety signal detection. In Proceedings of the 2012 international workshop on Smart health and wellbe- ing, pages 33-40.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Harnessing social media for health information management. Electronic commerce research and applications", |
|
"authors": [ |
|
{ |
|
"first": "Lina", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongsong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "27", |
|
"issue": "", |
|
"pages": "139--151", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lina Zhou, Dongsong Zhang, Christopher C Yang, and Yu Wang. 2018. Harnessing social media for health information management. Electronic commerce re- search and applications, 27:139-151.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"num": null, |
|
"text": "Results on the SMM4H Task 1a test set.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"text": "of 0.3 using AdamW(Loshchilov and Hutter, 2018) optimizer.", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Model</td><td>Learning Rate</td></tr><tr><td>bertweet-base+BiLSTM+CRF</td><td>[5e-5, 5e-3]</td></tr><tr><td>bertweet-covid19-base-cased+</td><td/></tr><tr><td>BiLSTM+CRF</td><td>[5e-5, 5e-3]</td></tr><tr><td>bertweet-covid19-base-uncased+</td><td/></tr><tr><td>BiLSTM+CRF</td><td>[5e-5, 5e-3]</td></tr><tr><td>bert-base-cased+BiLSTM+CRF</td><td>[5e-5, 5e-3]</td></tr><tr><td>bert-base-uncased+BiLSTM+CRF</td><td>[4e-5, 4e-3]</td></tr><tr><td>bert-large-cased+CRF</td><td>[1e-5, 1e-3]</td></tr><tr><td>bert-large-uncased+CRF</td><td>[7e-6, 7e-4]</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"text": "Implementation details of our models of the SMM4H Task 1b.", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Model</td><td colspan=\"2\">Precision Recal F1</td></tr><tr><td>Ours</td><td>0.381</td><td>0.475 0.42</td></tr><tr><td colspan=\"2\">Average scores 0.493</td><td>0.458 0.42</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"text": "Results on the SMM4H Task 1b test set.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"text": "Results on the SMM4H Task 1c test set, * denotes the results of our method based on our best prediction in subtask b.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"text": "Results on the SMM4H Task 7b Validation and test set.", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Tweet</td><td>Label</td></tr><tr><td>Excellent cause! I hope you are doing well. I had</td><td/></tr><tr><td>breast cancer too. I'm into my 3rd year of</td><td>S</td></tr><tr><td>Tamoxifen.</td><td/></tr><tr><td>OH MY GOD i just remembered my dream from</td><td/></tr><tr><td>my nap earlier i understand now why i felt so bad when i woke up i literally dreamt that i had breast</td><td>NR</td></tr><tr><td>cancer</td><td/></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF9": { |
|
"num": null, |
|
"text": "Two examples of tweets and corresponding labels in Task 8.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF11": { |
|
"num": null, |
|
"text": "Results on the SMM4H Task 8 test set.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |