|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:21:41.709978Z" |
|
}, |
|
"title": "Target Concept Guided Medical Concept Normalization in Noisy User-Generated Texts", |
|
"authors": [ |
|
{ |
|
"first": "Katikapalli", |
|
"middle": [], |
|
"last": "Subramanyam", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "NIT Trichy", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sivanesan", |
|
"middle": [], |
|
"last": "Sangeetha", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "NIT Trichy", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Medical concept normalization (MCN) i.e., mapping of colloquial medical phrases to standard concepts is an essential step in analysis of medical social media text. The main drawback in existing state-of-the-art approach (Kalyan and Sangeetha, 2020b) is learning target concept vector representations from scratch which requires more training instances. Our model is based on RoBERTa and target concept embeddings. In our model, we integrate a) target concept information in the form of target concept vectors generated by encoding target concept descriptions using SRoBERTa, state-of-the-art RoBERTa based sentence embedding model and b) domain lexicon knowledge by enriching target concept vectors with synonym relationship knowledge using retrofitting algorithm. It is the first attempt in MCN to exploit both target concept information as well as domain lexicon knowledge in the form of retrofitted target concept vectors. Our model outperforms all the existing models with an accuracy improvement up to 1.36% on three standard datasets. Further, our model when trained only on mapping lexicon synonyms achieves up to 4.87% improvement in accuracy.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Medical concept normalization (MCN) i.e., mapping of colloquial medical phrases to standard concepts is an essential step in analysis of medical social media text. The main drawback in existing state-of-the-art approach (Kalyan and Sangeetha, 2020b) is learning target concept vector representations from scratch which requires more training instances. Our model is based on RoBERTa and target concept embeddings. In our model, we integrate a) target concept information in the form of target concept vectors generated by encoding target concept descriptions using SRoBERTa, state-of-the-art RoBERTa based sentence embedding model and b) domain lexicon knowledge by enriching target concept vectors with synonym relationship knowledge using retrofitting algorithm. It is the first attempt in MCN to exploit both target concept information as well as domain lexicon knowledge in the form of retrofitted target concept vectors. Our model outperforms all the existing models with an accuracy improvement up to 1.36% on three standard datasets. Further, our model when trained only on mapping lexicon synonyms achieves up to 4.87% improvement in accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Medical concept normalization (MCN) involves learning a model which can assign medical concept from a standard lexicon for the given health related mention. Table 1 shows few examples of concept mentions and corresponding standard concepts from SNOMED-CT lexicon. Normalizing medical concepts finds application in tasks like questions answering, pharmacovigilance, knowledge graph construction etc. In this work, we deal with medical concept normalization in noisy usergenerated texts like tweets and online discussion forum posts. With the rising popularity of social media platforms, common public are using these platforms to share information. For example, in twitter people share their health experiences and in websites like AskAPatient.com, public post reviews for the drugs they consume. This valuable health information available in social media platforms can be exploited in applications like pharmacovigilance, public health monitoring etc (Kalyan and Sangeetha, 2020c) . In general, most of the common public express their health related concerns in an informal way using colloquial language. For example, 'dizziness' is expressed as 'head spinning a little' and 'diarrhoea' is expressed as 'bathroom with runs' (Limsopatham and Collier, 2016; Lee et al., 2017) . As social media text is highly noisy with irregular grammar and colloquial words, medical concept normalization in social media text is more challenging.", |
|
"cite_spans": [ |
|
{ |
|
"start": 951, |
|
"end": 980, |
|
"text": "(Kalyan and Sangeetha, 2020c)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1224, |
|
"end": 1255, |
|
"text": "(Limsopatham and Collier, 2016;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1256, |
|
"end": 1273, |
|
"text": "Lee et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 164, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Standard Concept lowering of energy lack of energy ( SNOMED ID: 248274002) felt weak asthenia (SNOMED ID: 13791008) very severe pain in arms pain in upper limb (SNOMED ID: 102556003) only wanted to sleep hypersomnia (SNOMED ID: 77692006) Table 1 : Examples of concept mentions and corresponding standard concepts from Systematized Nomenclature of Medicine -Clinical Terms (SNOMED CT) lexicon.", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 182, |
|
"text": "(SNOMED ID: 102556003)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 238, |
|
"end": 245, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Concept Mention", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Most of the existing work in medical concept normalization in social media text ignore valuable target concept knowledge (Limsopatham and Collier, 2016; Lee et al., 2017; Han et al., 2017; Belousov et al., 2017) . Recently researchers (Tutubalina et al., 2018; Miftahutdinov and Tutubalina, 2019; Pattisapu et al., 2020; Kalyan and Sangeetha, 2020b) focused on exploiting target concept knowledge in normalizing concepts. The drawbacks in these recent works in integrating target concept knowledge in deep learning based medical concept normalization systems are \u2022 Tutubalina et al. (2018) and Miftahutdinov and Tutubalina (2019) exploit target concept knowledge in the form of cosine similarity between tf-idf based vector representations of concept mentions in social media text and concept descriptions from UMLS. However, tf-idf based cosine similarity features between concept mentions and concept descriptions are not effective as concept mentions are noisy, descriptive and colloquial in nature while concept descriptions are expressed in formal language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 152, |
|
"text": "(Limsopatham and Collier, 2016;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 153, |
|
"end": 170, |
|
"text": "Lee et al., 2017;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 188, |
|
"text": "Han et al., 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 211, |
|
"text": "Belousov et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 260, |
|
"text": "(Tutubalina et al., 2018;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 296, |
|
"text": "Miftahutdinov and Tutubalina, 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 297, |
|
"end": 320, |
|
"text": "Pattisapu et al., 2020;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 321, |
|
"end": 349, |
|
"text": "Kalyan and Sangeetha, 2020b)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 563, |
|
"end": 589, |
|
"text": "\u2022 Tutubalina et al. (2018)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 629, |
|
"text": "Miftahutdinov and Tutubalina (2019)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "\u2022 Pattisapu et al. (2020) choose appropriate target concept based on cosine similarity between concept mention and graph embedding based target concept vectors. Here concept mentions are encoded using RoBERTa and then transformed to target concepts embedding space using two fully connected layers. However, a) the quality of graph embedding based target concept vectors depends on the comprehensiveness of mapping lexicon which limits the application of this approach ( e.g., MedDRA is less comprehensive compared to SNOMED-CT (Bodenreider, 2009) ) b) graph embedding methods used by Pattisapu et al. (2020) generate target concept vectors based on network structure only and completely ignore other information like concept text description and c) when mapping lexicon used is different across datasets, it requires more time and resources to generate target concept vectors using graph embedding methods for each dataset (Kalyan and Sangeetha, 2020b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 25, |
|
"text": "Pattisapu et al. (2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 528, |
|
"end": 547, |
|
"text": "(Bodenreider, 2009)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 585, |
|
"end": 608, |
|
"text": "Pattisapu et al. (2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 924, |
|
"end": 953, |
|
"text": "(Kalyan and Sangeetha, 2020b)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "\u2022 Kalyan and Sangeetha (2020b) learn the vector representations of concept mentions and concepts jointly. The authors randomly assign values to target concept vectors and update them at the time of training. However, learning concept vectors from scratch requires more number of training instances. With less number of training instances, this approach results in poor performance which we illustrate in Section 6.1. This is the current state-of-theart approach in medical concept normalization in social media text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "Our proposed model overcomes the drawbacks in existing work in utilizing target concept knowledge and answers the following two research questions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "\u2022 RQ1 -How to effectively integrate target concept knowledge in deep learning based medical concept normalization system?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "\u2022 RQ2 -How to utilize domain lexicon knowledge in medical concept normalization?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "Figure 1: SNOMED-CT Concept and its synonyms. Here, 'Drowsy' is concept description and '271782001' is concept-id.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "As shown in Figure 1 , every concept has concept-id, description and set of synonyms. To address RQ1, we represent each target concept using fixed length dense vector which is generated by encoding target concept description using SRoBERTa. SRoBERTa (Reimers and Gurevych, 2019) is Siamese network based Sentence RoBERTa model trained on NLI+Multi NLI and STS datasets. It is state-ofthe-art sentence embedding model which encodes sequence of words into dense fixed length vectors in a way that sequences which are in close meaning are also close in embedding pace . To address RQ2, we retrofit target concept vectors produced by SRoBERTa using synonyms from mapping lexicon. Retrofitting algorithm (Faruqui et al., 2015) enriches concept vectors with synonym relationship knowledge from domain lexicon. In our model we encode a) input concept mentions using RoBERTa and b) target concepts using SRoBERTa and enrich them with synonym relationship knowledge. We compute similarity vector in which each value is equal to cosine similarity between vectors of concept mentions and all the target concepts. Finally, the cosine similarity values are normalized and the target concept with maximum similarity is chosen. During training, the vectors of target concepts are not updated. We evaluate our model on three standard MCN datasets CADEC, PsyTAR and SMM4H2017 and achieve accuracy improvements up to 1.36%. Further, our model when trained only using mapping lexicon synonyms achieves up to 4.87% improvement in accuracy. The key aspects of our work are", |
|
"cite_spans": [ |
|
{ |
|
"start": 250, |
|
"end": 278, |
|
"text": "(Reimers and Gurevych, 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 699, |
|
"end": 721, |
|
"text": "(Faruqui et al., 2015)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 20, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "\u2022 A simple approach to integrate both target concept information and domain lexicon knowledge in medical concept normalization in the form of retrofitted target concept vectors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "\u2022 Our model achieves state-of-the-art performance on three standard medical concept normalization datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "\u2022 Our model when trained using mapping lexicon synonyms only, achieves up to 4.87% improvement in accuracy which shows that our approach to generate target concept vectors is better than graph embedding based approach (Pattisapu et al., 2020) or learning from scratch (Kalyan and Sangeetha, 2020b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 218, |
|
"end": 242, |
|
"text": "(Pattisapu et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 297, |
|
"text": "(Kalyan and Sangeetha, 2020b)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "2 Related Work", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "Traditional concept normalization systems used string matching (Aronson, 2001; McCallum et al., 2005; Tsuruoka et al., 2007) or machine learning approaches (Leaman et al., 2013; Leaman and Lu, 2014) . These methods perform poorly in case of instances with no words in common between concept mention and concept description. With the introduction of embedding models like Word2vec (Mikolov et al., 2013) In recent times, unsupervised pre-trained models like BERT (Devlin et al., 2019) , RoBERTa (Liu et al., 2019) achieved significant improvements in most of the natural language processing tasks. Most of the recent work in medical concept normalization (Miftahutdinov and Tutubalina, 2019; Kalyan and Sangeetha, 2020a; Pattisapu et al., 2020; Kalyan and Sangeetha, 2020b) in social media text is based on BERT and RoBERTa. Miftahutdinov and Tutubalina (2019) experimented with BERT and cosine similarity based semantic features, Kalyan and Sangeetha (2020a) experimented with various general and domain specific BERT models combined with highway network layer. Pattisapu et al. (2020) normalize medical concepts using RoBERTa and graph embedding based concept vectors while approach of Kalyan and Sangeetha (2020b) involves learning the vectors representations of target concepts along with input concept mentions. Our approach is similar to (Kalyan and Sangeetha, 2020b) by choosing target concept which has maximum cosine similarity with the input concept mention. However unlike Kalyan and Sangeetha (2020b) method which learns target concept vectors from scratch, we use retrofitted target concept vectors which are generated using SRoBERTa and then enriched with synonym relationship knowledge from domain lexicon. It is the first work to exploit both target concept information and domain lexicon knowledge effectively in MCN in the form of retrofitted target concept vectors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 78, |
|
"text": "(Aronson, 2001;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 79, |
|
"end": 101, |
|
"text": "McCallum et al., 2005;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 102, |
|
"end": 124, |
|
"text": "Tsuruoka et al., 2007)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 177, |
|
"text": "(Leaman et al., 2013;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 198, |
|
"text": "Leaman and Lu, 2014)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 402, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 462, |
|
"end": 483, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 494, |
|
"end": 512, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 654, |
|
"end": 690, |
|
"text": "(Miftahutdinov and Tutubalina, 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 691, |
|
"end": 719, |
|
"text": "Kalyan and Sangeetha, 2020a;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 720, |
|
"end": 743, |
|
"text": "Pattisapu et al., 2020;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 744, |
|
"end": 772, |
|
"text": "Kalyan and Sangeetha, 2020b)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 930, |
|
"end": 958, |
|
"text": "Kalyan and Sangeetha (2020a)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1062, |
|
"end": 1085, |
|
"text": "Pattisapu et al. (2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1187, |
|
"end": 1215, |
|
"text": "Kalyan and Sangeetha (2020b)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1343, |
|
"end": 1372, |
|
"text": "(Kalyan and Sangeetha, 2020b)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Medical Concept Normalization", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Sentence embeddings encode sequence of words into dense fixed size vector. Some of the popular approaches are averaging word vectors, encoderdecoder based skip thought (Kiros et al., 2015) , InferSent (Conneau et al., 2017) which is Siamese BiLSTM+max pooling trained on SNLI, transformer based Universal Sentence Encoder (Cer et al., 2018) . Recently, Reimers and Gurevych (2019) proposed SRoBERTa, Siamese network based Sentence RoBERTa model and it is trained on NLI + MultiNLI datasets followed by STS dataset. It is a state-of-the-art sentence embedding model which encodes sequence of words into dense fixed length vector in a way that sequences which are close in meaning are also close in embedding pace.", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 188, |
|
"text": "(Kiros et al., 2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 223, |
|
"text": "(Conneau et al., 2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 322, |
|
"end": 340, |
|
"text": "(Cer et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 353, |
|
"end": 380, |
|
"text": "Reimers and Gurevych (2019)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Embeddings", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Vector representations generated by neural embedding models are rich in syntactic and semantic information but lack valuable relationship knowledge from semantic lexicons. To enrich vector representations with relationship knowledge, Faruqui et al. (2015) proposed retrofitting algorithm. It is simply a post-processing step and can be applied to vectors generated using any embedding model. It learns", |
|
"cite_spans": [ |
|
{ |
|
"start": 234, |
|
"end": 255, |
|
"text": "Faruqui et al. (2015)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrofitting algorithm", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "retrofitted concept vectors {v 1 , v 2 , v 3 , .., v n } from concept vectors {v 1 ,v 2 ,v 3 , .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrofitting algorithm", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "..,v n } by iteratively minimizing distance between (i) retrofitted vector v i and its counterpartv i and (ii) retrofitted vector v i and all its neighbors v j . The objective function is", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrofitting algorithm", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "n i=1 \uf8ee \uf8f0 \u03b1 i v i \u2212v i 2 + (i,j)\u2208E \u03b2 ij v i \u2212 v j 2 \uf8f9 \uf8fb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrofitting algorithm", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "(1) Here retrofitted vectors v i are initialized with values of concept vectorsv i and then updated iteratively by minimizing the objective function.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Retrofitting algorithm", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Our proposed model is evaluated on three standard MCN datasets of noisy user-generated texts. Out of these, CADEC (Karimi et al., 2015) and PsyTAR (Zolnoori et al., 2019) datasets contain concept mentions gathered from user-generated AskAPatient.com reviews and SMM4H2017 (Sarker et al., 2018) contains adverse drug reaction (ADR) mentions extracted from twitter. CADEC: Karimi et.al released CSIRO Adverse Drug Event Corpus (CADEC) having user posted drug reviews gathered from AskAPatient (Karimi et al., 2015) . The annotators manually identified concept mentions and mapped them to SNOMED-CT concepts which resulted in a corpus of 6754 concept mentions and 1029 SNOMED-CT codes. As 66% of instances are common in train and test splits in the random folds of this dataset released by Limsopatham and Collier (2016) , Tutubalina et al. (2018) split this dataset into five folds 1 with no overlap.", |
|
"cite_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 135, |
|
"text": "(Karimi et al., 2015)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 170, |
|
"text": "(Zolnoori et al., 2019)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 272, |
|
"end": 293, |
|
"text": "(Sarker et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 491, |
|
"end": 512, |
|
"text": "(Karimi et al., 2015)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 787, |
|
"end": 817, |
|
"text": "Limsopatham and Collier (2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 820, |
|
"end": 844, |
|
"text": "Tutubalina et al. (2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "PsyTAR: Zolnoori et al. (2019) released PsyTAR corpus which includes 887 user generated psychiatric drug reviews collected from AskAPatient. This dataset includes manually identified 6556 concept phrases which are mapped to 618 concepts in SNOMED-CT. Zolnoori et al. (2019) released random folds of this dataset. However, 56% of instances are common in train and test in these folds. So, Miftahutdinov and Tutubalina (2019) create custom folds of this dataset 2 to reduce the overlap between train and test sets. SMM4H017: Sarker et al. (2018) released this dataset 3 of ADR mentions for subtask3 of SMM4H2017 shared task organized by Health Language Processing Lab @ University of Pennsilvaniya. Initially, twets containing generic and trade names of drugs were collected. Then, ADR mentions were manually identified and mapped to Med-DRA concepts. In this corpus, train set consists of 6500 ADR phrases and 472 unique MedDRA codes, test set consists of 2500 ADR phrases and 254 MedDRA codes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 30, |
|
"text": "Zolnoori et al. (2019)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 251, |
|
"end": 273, |
|
"text": "Zolnoori et al. (2019)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 388, |
|
"end": 423, |
|
"text": "Miftahutdinov and Tutubalina (2019)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The significant overlap between train and test sets in random folds of CADEC and PsyTAR datasets can result in bias and contribute to high performance of model (Lee et al., 2017; Kalyan and Sangeetha, 2020a) . So, we evaluate our approach on custom folds of PsyTAR and CADEC datasets in addition to SMM4H2017 dataset, like the recent previous works (Pattisapu et al., 2020; Kalyan and Sangeetha, 2020b) 4 Methodology", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 178, |
|
"text": "(Lee et al., 2017;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 207, |
|
"text": "Kalyan and Sangeetha, 2020a)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 349, |
|
"end": 373, |
|
"text": "(Pattisapu et al., 2020;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 402, |
|
"text": "Kalyan and Sangeetha, 2020b)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our model is based on RoBERTa and target concept embeddings. Initially we compute vector representations of input phrase and concepts in standard lexicon using RoBERTa and SRoBERTa respectively. We further enrich target concept vectors with synonym relationship from domain lexicon using retrofitting algorithm. Then, we find cosine similarity between vectors of concept mention and all the target concepts. Finally, the concept mention is mapped to concept with maximum similarity. Figure 2 gives an overview of our proposed model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 483, |
|
"end": 491, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Description", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We use SRoBERTa, state-of-the-art sentence embedding model to compute target concept representations and then inject synonym relationship using retrofitting algorithm to get target concept vector e c \u2208 R h . e c = Retrof it(SRoBERT a(concept)) (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Concept Representation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Learning quality representation of concept mentions is a key step in medical concept normalization. We use RoBERTa, which is an improved version of BERT with large training batch sizes and more ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concept Mention Representation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We find similarity vector based on cosine similarity between vectors of input pharse and concepts in standard lexicon. Finally, we normalize all the cosine similarity values using softmax which result in normalized similarity vector\u015d \u2208 R C .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concept Mention Representation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s = [\u015d i ] C i=1", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Concept Mention Representation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here C represents total number of unique target concepts in the dataset, s i = Sof tmax(f (e m , e ci )) where the function f() represents cosine similarity and e ci represents vector of the concept c i . We train the model using AdamW optimizer (Loshchilov and Hutter, 2019) which minimize cross entropy loss (L CE ) between normalized similarity vector\u015d and the ground truth vector s. During training, we freeze the vectors of target concepts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 275, |
|
"text": "(Loshchilov and Hutter, 2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concept Mention Representation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L CE = \u2212 1 K K i=1 C j=1 s i j log(\u015d i j )", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Concept Mention Representation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We do basic pre-processing steps like lower-casing, removing non-ASCII and special characters in concept mention and concept descriptions. We remove unnecessary words like 'nos', 'unspecified' and 'finding' in concept descriptions. In case of concept mentions, we do additional pre-processing steps like removing repeating characters (e.g., sooo much \u2192 so much) , replacing medical acronyms 4", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "( 'ra' \u2192 'rheumatoid arthritis') and contractions (isn't \u2192 is not) with full forms. Pattisapu et al. (2020) treat synonyms in mapping lexicon as concept mention and augment the training set with the labeled instances generated from synonyms. However, we augment training set with synonyms of less frequently occurring concepts only. In case of CADEC and Psy-TAR datasets, we use synonyms from the mapping lexicon SNOMED-CT. In case of SMM4H2017 dataset, we use synonyms from UMLS Metathesaurus as synonyms are very few in number in Med-DRA. For each concept in MedDRA, we find the corresponding concept unique identifier(CUI) in UMLS and then gather all the associated synonyms excluding non-English synonyms.", |
|
"cite_spans": [ |
|
{ |
|
"start": 84, |
|
"end": 107, |
|
"text": "Pattisapu et al. (2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In case of retrofitting algorithm, we choose number of iterations = 10 as suggested by the authors. Further, we use the implementation 5 provided by the authors. As there is no official validation set in case of all the three datasets, we use 10% of the augmented training set for validation. We find optimal hyperparameter values by performing random search over the range of hyperparameter values. During training, we freeze target concept vectors. We implement all our models in PyTorch using transformers package from huggingface (Wolf et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 534, |
|
"end": 553, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In case of all the three datasets, standard evaluation metric is accuracy (Miftahutdinov and Tutubalina, 2019; Pattisapu et al., 2020; Kalyan and Sangeetha, 2020b) . In case of CADEC and PsyTAR datasets which are multi-fold, reported accuracy is average accuracy across all five folds.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 110, |
|
"text": "(Miftahutdinov and Tutubalina, 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 111, |
|
"end": 134, |
|
"text": "Pattisapu et al., 2020;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 135, |
|
"end": 163, |
|
"text": "Kalyan and Sangeetha, 2020b)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Here, we compare our approach with the following existing methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with existing methods", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Hierarchical Character- LSTM Han et al. (2017) use hierarchical character level LSTM to normalize the concept mentions. Intially, they generate character level word representations using LSTM over embeddings of characters and their classes and then apply bidirectional LSTM over these word representations to generate contextual word vectors. Finally, vector obtained by max-pooling of contextual word vectors is given to fully connected softmax layer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 46, |
|
"text": "LSTM Han et al. (2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with existing methods", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "Multinomial LR Belousov et al. (2017) generate concept mention vector representation as average of three weighted vectors of words in the concept mention. Here, word weights are based on inverse document frequencies of words and word vectors are obtained as average of GoogleNews, twitter and drugtwitter embeddings. With these mention representations as input, Multinomial Logistic Regression classifier assigns the concepts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 37, |
|
"text": "LR Belousov et al. (2017)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with existing methods", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "BERT + Cosine Semantic Features Miftahutdinov and Tutubalina (2019) generate representation of concept mention using BERT. To integrate target concept knowledge , the authors generate semantic features based on cosine similarity between tfidf vector representations of concept mention and all the target concepts in the dataset. Finally, the output of BERT and cosine semantic features are concatenated and given to fully connected softmax layer which assigns the concepts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with existing methods", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "BERT + Highway Network Layer Kalyan and Sangeetha (2020a) experiment with various general and domain specific BERT models for medical concept normalization. The output of BERT model is passed through highway network layer to eliminate the unnecessary information and then passed through fully connected softmax layer to get the target concept.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with existing methods", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "RoBERTa + Graph based Concept Vectors Pattisapu et al. (2020) generate target concept vectors using graph embedding algorithms. They train RoBERTa based model which embeds input concept mention into the embedding space of target concept vectors. For a given input phrase, the nearest standard concept in embedding space is assigned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with existing methods", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "RoBERTa + Random Concept Vectors Kalyan and Sangeetha (2020b) propose a model based on RoBERTa which jointly learns the representations of concept mention and the standard concepts. The authors randomly initialize the target concept vectors and then they are updated during training. The standard concept with maximum cosine similarity with input phrase is chosen.", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 61, |
|
"text": "Kalyan and Sangeetha (2020b)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with existing methods", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "RoBERTa We generate the representations of input concept mention using RoBERTa. We experiment with both variants of RoBERTa namely RoBERTa-base and RoBERTa-large. In both the cases, the size of concept mention vector is equal to the hidden vector size i.e., 768 in case of RoBERTabase and 1024 in case of RoBERTa-large.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "+ Concept Vectors (CV) We generate target concept vectors by encoding their descriptions using SRoBERTa. In case of a) RoBERTa-base model, we use target concept vectors generated by 'robertabase-nli-stsb-mean-tokens' and b) RoBERTa-large model, we use target concepts generated by 'roberta-large-nli-stsb-mean-tokens'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "+ Retrofitted Concept Vectors(RCV) We enrich target concepts generated by SRoBERTa with synonym relationship knowledge from mapping lexicon using retrofitting algorithm.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Our proposed model is evaluated on the standard MCN datasets CADEC, PsyTAR and SMM4H2017. The performance of our model and existing models is presented in Table 2 . From Table 2 , we notice that our proposed model achieves the best results of 86.40%, 85.04% and 91.73% across CADEC, PsyTAR and SMM4H2017 datasets. Our model outperforms existing methods with accuracy improvement up to 1.36%. The existing stateof-the-art model Kalyan and Sangeetha (2020b) learns target concept vectors from scratch and so it requires more number of training instances. Our model outperforms the approach of Kalyan and Sangeetha (2020b) (i) up to 1.9% in case of base version and (ii) up to 1.36% in case of large version. The use of retrofitted concept vectors improved performance only in case of SMM4H2017. The performance of retrofitted concept vectors depends on the number of available synonyms for each concept. Table 2 : Performance of our mdoel and existing methods on CADEC, PsyTAR and SMM4H2017 datasets. \u03c0 -model based on Roberta-base and \u03a0 -model based on Roberta-large. \u03b3 -concept vectors generated using SRoBERTa and \u03b4 -concept vectors generated using SRoBERTa and then retrofitted using synonym relationship from domain lexicon.", |
|
"cite_spans": [ |
|
{ |
|
"start": 428, |
|
"end": 456, |
|
"text": "Kalyan and Sangeetha (2020b)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 162, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 170, |
|
"end": 178, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 903, |
|
"end": 910, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The synonyms for SMM4H2017 are gathered from UMLS Metathesaurus and as they are more number in number compared to SNOMED-CT synonyms, retrofitted concept vectors improve accuracy only in case of SMM4H2017 (Roberta-large). In future, we would like to see whether using UMLS synonyms instead of SNOMED-CT synonyms improve performance in case of CADEC and PsyTAR datasets also.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "6 Analysis and Discussion", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "There will be a set of synonyms for each concept in mapping lexicon. Table 3 shows some of the concepts and corresponding synonyms from SNOMED-CT lexicon. We consider each synonym as user-generated concept mention and generate labeled instances from mapping lexicon synonyms.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 76, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training only on mapping lexicon synonyms", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "To show the performance of our model in the absence of human annotated instances in training set, we train our model using labeled instances generated from mapping lexicon synonyms and then evaluate our model on the corresponding test set. Tabel 4 shows the performance of our model and existing models across three datasets. As reported in the table, our model outperforms existing methods with accuracy improvement up to 4.46% and 4.87% across CADEC and PsyTAR datasets respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training only on mapping lexicon synonyms", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "From Table 4 , we infer that among the three approaches, Kalyan and Sangeetha (2020b) achieved the lowest performance in case of CADEC and PsyTAR datasets. When compared to Kalyan and Sangeetha (2020b) , the performance of a) Pattisapu et al. (2020) is 9.42% and 9.87% higher b) our approach is 13.88% and 14.74% higher across CADEC and PsyTAR datasets respectively. Kalyan and Sangeetha (2020b) learn the vector representations of concept mentions and concepts jointly. The authors randomly assigned values to target concepts and then updated them during training. However, learning concept vectors from scratch requires more number of training instances. As the number of training instances generated from synonyms is less in number, this approach results in poor performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 85, |
|
"text": "Kalyan and Sangeetha (2020b)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 201, |
|
"text": "Kalyan and Sangeetha (2020b)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 367, |
|
"end": 395, |
|
"text": "Kalyan and Sangeetha (2020b)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 12, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training only on mapping lexicon synonyms", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In case of SMM4H2017, Kalyan and Sangeetha (2020b) achieved the best performance of 63.28% which is 2.55% more than our approach. Here as the number of training instances generated from synonyms is more in number, Kalyan and Sangeetha (2020b) outperformed our approach. This shows that learning target concept vectors from scratch is effective only when training instances are more in number.", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 50, |
|
"text": "Kalyan and Sangeetha (2020b)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 214, |
|
"end": 242, |
|
"text": "Kalyan and Sangeetha (2020b)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training only on mapping lexicon synonyms", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Here we analyse the reasons for the wrong predictions given by our best performing model. For this, we check all the failure cases in CADEC dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Failure Analysis", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Our model failed to handle the concept mentions which are misspelled words of ground truth con- Table 4 : Performance of our model and existing methods when trained only on mapping lexicon synonyms. \u03c0 -model based on Roberta-base and \u03a0 -model based on Roberta-large. \u03b3 -concept vectors generated using SRoBERTa. \u03b4 -concept vectors generated using SRoBERTa and then retrofitted using synonym relationship from domain lexicon.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 103, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Failure Analysis", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "cepts. For example, the concept mention 'insomina is mapped to 'nausea -422587007 ' instead of the ground truth concept 'insomnia -193462001'. Similarly, the concept mentions 'naseua', 'fatique', 'insommnia', 'diziness', 'nausia' and 'diarreah' are not mapped to the ground truth concepts 'nausea -422587007', 'fatigue -84229001 ', 'insomnia -193462001 ', 'dizziness -404640003', 'nausea -422587007' and 'diarrhea -62315008' respectively. Here, all the concept mentions are misspelled words of the ground truth concepts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 332, |
|
"end": 438, |
|
"text": "'insomnia -193462001 ', 'dizziness -404640003', 'nausea -422587007' and 'diarrhea -62315008' respectively.", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Failure Analysis", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In some of the cases, our model assigned concepts which are more specific than the ground truth concepts. For example, our model mapped the concepts mentions 'pain so bad', 'so much pain', 'worse pain' and 'pain bad' to the concept 'severe pain -76948002' rather than the ground truth 'pain -22253000'. Here we observe that in case of all these concept mentions, the concept 'severe pain' is more specific and hence appropriate compared to the ground truth 'pain'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Failure Analysis", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In few cases, our model assigned concepts which are closely related to the ground truth concept. For example, the concept mention 'difficult to concentrate is assigned to the concept 'unable to con-centrate -60032008' instead of the ground truth concept 'poor concentration -26329005'. Here the predicted and ground truth concepts are closely related. Similarly, 'could not walk across the room' is assigned to 'unable to walk -282145008' instead of 'walking disability -228158008'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Failure Analysis", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "One more case in which our model failed is when the concept mention is an abbreviation of the ground truth concept. For example, the concept mention 'ibu' is assigned to the concept 'ubidecarenone' and the ground truth concept is 'ibuprofen. Here, 'ibu' is an abbreviation of 'ibuprofen'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Failure Analysis", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In case of SMM4H2017 dataset, we find the corresponding CUI for each MedDRA concept and include all the associated synonyms excluding non-English synonyms. Here the limitation is that, some CUIs can be mapped to more than one Med-DRA concept. For example, 'C0020649 (Hypotension)' can be mapped to both the MedDRA concepts '10021097 (Hypotension)' and '10005734 (Blood pressure decreased)'. Similarly, 'C0036974 (Shock)' can be mapped to both the MedDRA concepts '10009192 (Circulatory collapse)' and '10034567 (Peripheral circulatory failure)'.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Here, we propose a model based on RoBERTa and target concept embeddings to normalize concepts in medical related user-generated texts. Our model integrates target concept knowledge as well domain lexicon knowledge in a simple and novel way. The existing state-of-the-art approach (Kalyan and Sangeetha, 2020b) exploits target concept knowledge by learning vector representations of target concepts from scratch. As target concept vectors are learned from scratch, this approach requires more training instances and it performs poorly with less number of training instances. Our model exploits target concept information and domain lexicon knowledge in the form of retrofitted target concept vectors. We encode target concepts using SRoBERTa and enrich these concept vectors with synonym relationship knowledge from standard lexicon using retrofitting algorithm. Our model outperforms all the existing methods and achieves significant improvements on three standard datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 280, |
|
"end": 309, |
|
"text": "(Kalyan and Sangeetha, 2020b)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://cutt.ly/Gi6kka6 2 https://doi.org/10.5281/zenodo.3236318", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://data.mendeley.com/datasets/rxwfb3tysd/2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Gathered from UMLS Methathesaurus, Wikipedia and https://www.acronymslist.com/cat/medical-acronyms.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/mfaruqui/retrofitting", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Effective mapping of biomedical text to the umls metathesaurus: the metamap program", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Alan R Aronson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the AMIA Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan R Aronson. 2001. Effective mapping of biomed- ical text to the umls metathesaurus: the metamap program. In Proceedings of the AMIA Symposium, page 17. American Medical Informatics Associa- tion.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Using an ensemble of generalised linear and deep learning models in the smm4h 2017 medical concept normalisation task", |
|
"authors": [ |
|
{ |
|
"first": "Maksim", |
|
"middle": [], |
|
"last": "Belousov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Dixon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goran", |
|
"middle": [], |
|
"last": "Nenadic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "SMM4H@ AMIA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "54--58", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maksim Belousov, William Dixon, and Goran Nenadic. 2017. Using an ensemble of generalised linear and deep learning models in the smm4h 2017 medical concept normalisation task. In SMM4H@ AMIA, pages 54-58.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Using snomed ct in combination with meddra for reporting signal detection and adverse drug reactions reporting", |
|
"authors": [ |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Bodenreider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "AMIA Annual Symposium Proceedings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Olivier Bodenreider. 2009. Using snomed ct in com- bination with meddra for reporting signal detec- tion and adverse drug reactions reporting. In AMIA Annual Symposium Proceedings, volume 2009, page 45. American Medical Informatics As- sociation.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Universal sentence encoder for english", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinfei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng-Yi", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Hua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicole", |
|
"middle": [], |
|
"last": "Limtiaco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rhomni", |
|
"middle": [], |
|
"last": "St John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Constant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mario", |
|
"middle": [], |
|
"last": "Guajardo-Cespedes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Tar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "169--174", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Cer, Yinfei Yang, Sheng-yi Kong, Nan Hua, Nicole Limtiaco, Rhomni St John, Noah Constant, Mario Guajardo-Cespedes, Steve Yuan, Chris Tar, et al. 2018. Universal sentence encoder for english. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 169-174.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Supervised learning of universal sentence representations from natural language inference data", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "670--680", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo\u00efc Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In Proceedings of the 2017 Conference on Empirical Methods in Natu- ral Language Processing, pages 670-680.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Retrofitting word vectors to semantic lexicons", |
|
"authors": [ |
|
{ |
|
"first": "Manaal", |
|
"middle": [], |
|
"last": "Faruqui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jesse", |
|
"middle": [], |
|
"last": "Dodge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujay", |
|
"middle": [], |
|
"last": "Kumar Jauhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1606--1615", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manaal Faruqui, Jesse Dodge, Sujay Kumar Jauhar, Chris Dyer, Eduard Hovy, and Noah A Smith. 2015. Retrofitting word vectors to semantic lexicons. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 1606-1615.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Team uknlp: Detecting adrs, classifying medication intake messages, and normalizing adr mentions on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Sifei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tung", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Rios", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramakanth", |
|
"middle": [], |
|
"last": "Kavuluru", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "SMM4H@ AMIA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "49--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sifei Han, Tung Tran, Anthony Rios, and Ramakanth Kavuluru. 2017. Team uknlp: Detecting adrs, classi- fying medication intake messages, and normalizing adr mentions on twitter. In SMM4H@ AMIA, pages 49-53.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Bertmcn: Mapping colloquial phrases to standard medical concepts using bert and highway network", |
|
"authors": [ |
|
{ |
|
"first": "Katikapalli", |
|
"middle": [], |
|
"last": "Subramanyam Kalyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Sangeetha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katikapalli Subramanyam Kalyan and S Sangeetha. 2020a. Bertmcn: Mapping colloquial phrases to standard medical concepts using bert and highway network. Technical report, EasyChair.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Medical concept normalization in user generated texts by learning target concept embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Katikapalli", |
|
"middle": [], |
|
"last": "Subramanyam Kalyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Sangeetha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2006.04014" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katikapalli Subramanyam Kalyan and S Sangeetha. 2020b. Medical concept normalization in user gen- erated texts by learning target concept embeddings. arXiv preprint arXiv:2006.04014.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Secnlp: A survey of embeddings in clinical natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Katikapalli", |
|
"middle": [], |
|
"last": "Subramanyam Kalyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Sangeetha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Journal of biomedical informatics", |
|
"volume": "101", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katikapalli Subramanyam Kalyan and S Sangeetha. 2020c. Secnlp: A survey of embeddings in clinical natural language processing. Journal of biomedical informatics, 101:103323.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Cadec: A corpus of adverse drug event annotations", |
|
"authors": [ |
|
{ |
|
"first": "Sarvnaz", |
|
"middle": [], |
|
"last": "Karimi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alejandro", |
|
"middle": [], |
|
"last": "Metke-Jimenez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madonna", |
|
"middle": [], |
|
"last": "Kemp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Journal of biomedical informatics", |
|
"volume": "55", |
|
"issue": "", |
|
"pages": "73--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarvnaz Karimi, Alejandro Metke-Jimenez, Madonna Kemp, and Chen Wang. 2015. Cadec: A corpus of adverse drug event annotations. Journal of biomedi- cal informatics, 55:73-81.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Skip-thought vectors", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raquel", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Urtasun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3294--3302", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan Kiros, Yukun Zhu, Russ R Salakhutdinov, Richard Zemel, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Skip-thought vectors. In Advances in neural information processing systems, pages 3294-3302.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Dnorm: disease name normalization with pairwise learning to rank", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Leaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyong", |
|
"middle": [], |
|
"last": "Rezarta Islamaj Dogan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Bioinformatics", |
|
"volume": "29", |
|
"issue": "22", |
|
"pages": "2909--2917", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Leaman, Rezarta Islamaj Dogan, and Zhiy- ong Lu. 2013. Dnorm: disease name normaliza- tion with pairwise learning to rank. Bioinformatics, 29(22):2909-2917.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Automated disease normalization with low rank approximations", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Leaman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyong", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of BioNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "24--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Leaman and Zhiyong Lu. 2014. Automated disease normalization with low rank approximations. In Proceedings of BioNLP 2014, pages 24-28.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Medical concept normalization for online user-generated texts", |
|
"authors": [ |
|
{ |
|
"first": "Kathy", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Sadid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oladimeji", |
|
"middle": [], |
|
"last": "Hasan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Farri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE International Conference on Healthcare Informatics (ICHI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "462--469", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kathy Lee, Sadid A Hasan, Oladimeji Farri, Alok Choudhary, and Ankit Agrawal. 2017. Medical con- cept normalization for online user-generated texts. In 2017 IEEE International Conference on Health- care Informatics (ICHI), pages 462-469. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Normalising medical concepts in social media texts by learning semantic representation", |
|
"authors": [ |
|
{ |
|
"first": "Nut", |
|
"middle": [], |
|
"last": "Limsopatham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nigel", |
|
"middle": [], |
|
"last": "Collier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nut Limsopatham and Nigel Collier. 2016. Normalis- ing medical concepts in social media texts by learn- ing semantic representation. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Decoupled weight decay regularization", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Loshchilov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Hutter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In International Con- ference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A conditional random field for discriminatively-trained finite-state string edit distance", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kedar", |
|
"middle": [], |
|
"last": "Bellare", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the Twenty-First Conference on Uncertainty in Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "388--395", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew McCallum, Kedar Bellare, and Fernando Pereira. 2005. A conditional random field for discriminatively-trained finite-state string edit dis- tance. In Proceedings of the Twenty-First Confer- ence on Uncertainty in Artificial Intelligence, pages 388-395.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Deep neural models for medical concept normalization in user-generated texts", |
|
"authors": [ |
|
{ |
|
"first": "Zulfat", |
|
"middle": [], |
|
"last": "Miftahutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Tutubalina", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "393--399", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zulfat Miftahutdinov and Elena Tutubalina. 2019. Deep neural models for medical concept normaliza- tion in user-generated texts. In Proceedings of the 57th Annual Meeting of the Association for Com- putational Linguistics: Student Research Workshop, pages 393-399.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "1st International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word represen- tations in vector space. In 1st International Con- ference on Learning Representations, ICLR 2013, Scottsdale, Arizona, USA, May 2-4, 2013, Workshop Track Proceedings.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Medical Concept Normalization by Encoding Target Knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Pattisapu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sangameshwar", |
|
"middle": [], |
|
"last": "Patil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Girish", |
|
"middle": [], |
|
"last": "Palshikar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vasudeva", |
|
"middle": [], |
|
"last": "Varma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Machine Learning for Health NeurIPS Workshop", |
|
"volume": "116", |
|
"issue": "", |
|
"pages": "246--259", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikhil Pattisapu, Sangameshwar Patil, Girish Palshikar, and Vasudeva Varma. 2020. Medical Concept Nor- malization by Encoding Target Knowledge. In Proceedings of the Machine Learning for Health NeurIPS Workshop, volume 116 of Proceedings of Machine Learning Research, pages 246-259. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2227--2237", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of NAACL-HLT, pages 2227-2237.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3973--3983", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 3973-3983.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Data and systems for medication-related text classification and concept normalization from twitter: insights from the social media mining for health (smm4h)-2017 shared task", |
|
"authors": [ |
|
{ |
|
"first": "Abeed", |
|
"middle": [], |
|
"last": "Sarker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maksim", |
|
"middle": [], |
|
"last": "Belousov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jasper", |
|
"middle": [], |
|
"last": "Friedrichs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Hakala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Farrokh", |
|
"middle": [], |
|
"last": "Mehryary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sifei", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tung", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Rios", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramakanth", |
|
"middle": [], |
|
"last": "Kavuluru", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of the American Medical Informatics Association", |
|
"volume": "25", |
|
"issue": "10", |
|
"pages": "1274--1283", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abeed Sarker, Maksim Belousov, Jasper Friedrichs, Kai Hakala, Svetlana Kiritchenko, Farrokh Mehryary, Sifei Han, Tung Tran, Anthony Rios, Ramakanth Kavuluru, et al. 2018. Data and sys- tems for medication-related text classification and concept normalization from twitter: insights from the social media mining for health (smm4h)-2017 shared task. Journal of the American Medical Informatics Association, 25(10):1274-1283.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Deep contextualized medical concept normalization in social media text", |
|
"authors": [ |
|
{ |
|
"first": "Katikapalli", |
|
"middle": [], |
|
"last": "Kalyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Subramanyam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sangeetha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Third International Conference on Computing and Network Communications (CoCoNet'19)", |
|
"volume": "171", |
|
"issue": "", |
|
"pages": "1353--1362", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.procs.2020.04.145" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kalyan Katikapalli Subramanyam and S Sangeetha. 2020. Deep contextualized medical concept normal- ization in social media text. Procedia Computer Sci- ence, 171:1353 -1362. Third International Confer- ence on Computing and Network Communications (CoCoNet'19).", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Learning string similarity measures for gene/protein name dictionary look-up using logistic regression", |
|
"authors": [ |
|
{ |
|
"first": "Yoshimasa", |
|
"middle": [], |
|
"last": "Tsuruoka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Mcnaught", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sophia", |
|
"middle": [], |
|
"last": "Jun'i; Chi Tsujii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ananiadou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Bioinformatics", |
|
"volume": "23", |
|
"issue": "20", |
|
"pages": "2768--2774", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoshimasa Tsuruoka, John McNaught, Jun'i; chi Tsujii, and Sophia Ananiadou. 2007. Learning string sim- ilarity measures for gene/protein name dictionary look-up using logistic regression. Bioinformatics, 23(20):2768-2774.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Medical concept normalization in social media posts with recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Tutubalina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zulfat", |
|
"middle": [], |
|
"last": "Miftahutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Nikolenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Valentin", |
|
"middle": [], |
|
"last": "Malykh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of biomedical informatics", |
|
"volume": "84", |
|
"issue": "", |
|
"pages": "93--102", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Tutubalina, Zulfat Miftahutdinov, Sergey Nikolenko, and Valentin Malykh. 2018. Medical concept normalization in social media posts with recurrent neural networks. Journal of biomedical informatics, 84:93-102.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Huggingface's transformers: Stateof-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtow- icz, et al. 2019. Huggingface's transformers: State- of-the-art natural language processing. ArXiv, pages arXiv-1910.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "A systematic approach for developing a corpus of patient reported adverse drug events: a case study for ssri and snri medications", |
|
"authors": [ |
|
{ |
|
"first": "Maryam", |
|
"middle": [], |
|
"last": "Zolnoori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kin", |
|
"middle": [ |
|
"Wah" |
|
], |
|
"last": "Fung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Patrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Fontelo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hadi", |
|
"middle": [], |
|
"last": "Kharrazi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Faiola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi Shuan Shirley", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christina", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Eldredge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jake", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Conway", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Journal of biomedical informatics", |
|
"volume": "90", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maryam Zolnoori, Kin Wah Fung, Timothy B Patrick, Paul Fontelo, Hadi Kharrazi, Anthony Faiola, Yi Shuan Shirley Wu, Christina E Eldredge, Jake Luo, Mike Conway, et al. 2019. A systematic ap- proach for developing a corpus of patient reported adverse drug events: a case study for ssri and snri medications. Journal of biomedical informatics, 90:103091.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"text": "Overview of our proposed model for medical concept normalization in noisy user-generated texts. e m -RoBERTa encoded input concept mention, s -similarity vector computed based on cosine similarity between the vectors of input phrase and concepts in standard lexicon,\u015d -normalized cosine similarity vector.training corpus, to compute input concept mention representation e m \u2208 R h . e m = RoBERT a(mention)", |
|
"type_str": "figure" |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td>Method</td><td colspan=\"3\">CADEC PsyTAR SMM4H2017</td></tr><tr><td colspan=\"2\">Existing Methods</td><td/><td/></tr><tr><td>(Pattisapu et al., 2020)</td><td>64.80</td><td>58.4</td><td>-</td></tr><tr><td>(Kalyan and Sangeetha, 2020b) \u03c0</td><td>51.55</td><td>45.77</td><td>55.75</td></tr><tr><td colspan=\"2\">(Kalyan and Sangeetha, 2020b) \u03a0 55.38</td><td>48.53</td><td>63.28</td></tr><tr><td/><td>Ours</td><td/><td/></tr><tr><td>Roberta-base + CV \u03b3</td><td>62.44</td><td>59.47</td><td>58.78</td></tr><tr><td>Roberta-base + RCV \u03b4</td><td>63.73</td><td>60.14</td><td>57.31</td></tr><tr><td>Roberta-large + CV \u03b3</td><td>69.26</td><td>63.06</td><td>58.82</td></tr><tr><td>Roberta-large + RCV \u03b4</td><td>69.14</td><td>63.27</td><td>60.73</td></tr></table>", |
|
"html": null, |
|
"text": "Concepts and their synonyms from SNOMED-CT lexicon", |
|
"type_str": "table", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |