|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:12:13.634883Z" |
|
}, |
|
"title": "Transfer learning applied to text classification in Spanish radiological reports", |
|
"authors": [ |
|
{ |
|
"first": "Pilar", |
|
"middle": [], |
|
"last": "L\u00f3pez-\u00dabeda", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Universidad de Ja\u00e9n", |
|
"location": { |
|
"addrLine": "Campus Las Lagunillas", |
|
"postCode": "23071", |
|
"settlement": "Ja\u00e9n", |
|
"country": "Spain" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Manuel", |
|
"middle": [], |
|
"last": "Carlos D\u00edaz-Galiano", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Universidad de Ja\u00e9n", |
|
"location": { |
|
"addrLine": "Campus Las Lagunillas", |
|
"postCode": "23071", |
|
"settlement": "Ja\u00e9n", |
|
"country": "Spain" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Alfonso Ure\u00f1a-L\u00f3pez", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Universidad de Ja\u00e9n", |
|
"location": { |
|
"addrLine": "Campus Las Lagunillas", |
|
"postCode": "23071", |
|
"settlement": "Ja\u00e9n", |
|
"country": "Spain" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Maria-Teresa", |
|
"middle": [], |
|
"last": "Mart\u00edn-Valdivia", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Universidad de Ja\u00e9n", |
|
"location": { |
|
"addrLine": "Campus Las Lagunillas", |
|
"postCode": "23071", |
|
"settlement": "Ja\u00e9n", |
|
"country": "Spain" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Teodoro", |
|
"middle": [], |
|
"last": "Mart\u00edn-Noguerol", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Luna", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Pre-trained text encoders have rapidly advanced the state-of-the-art on many Natural Language Processing tasks. This paper presents the use of transfer learning methods applied to the automatic detection of codes in radiological reports in Spanish. Assigning codes to a clinical document is a popular task in NLP and in the biomedical domain. These codes can be of two types: standard classifications (e.g. ICD-10) or specific to each clinic or hospital. In this study we show a system using specific radiology clinic codes. The dataset is composed of 208,167 radiology reports labeled with 89 different codes. The corpus has been evaluated with three methods using the BERT model applied to Spanish: Multilingual BERT, BETO and XLM. The results are interesting obtaining 70% of F1-score with a pre-trained multilingual model.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Pre-trained text encoders have rapidly advanced the state-of-the-art on many Natural Language Processing tasks. This paper presents the use of transfer learning methods applied to the automatic detection of codes in radiological reports in Spanish. Assigning codes to a clinical document is a popular task in NLP and in the biomedical domain. These codes can be of two types: standard classifications (e.g. ICD-10) or specific to each clinic or hospital. In this study we show a system using specific radiology clinic codes. The dataset is composed of 208,167 radiology reports labeled with 89 different codes. The corpus has been evaluated with three methods using the BERT model applied to Spanish: Multilingual BERT, BETO and XLM. The results are interesting obtaining 70% of F1-score with a pre-trained multilingual model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Radiology reports are text records taken by radiologists that detail the interpretation of a certain imaging modality exam including a description of radiological findings that could be the answer to a specific clinical question (patient's symptoms, clinical signs or specific syndromes). Structured text information in image reports can be applied in many scenarios, including clinical decision support (Demner-Fushman et al., 2009) , detection of critical reports (Hripcsak et al., 2002) , labeling of medical images (Dreyer et al., 2005; Hassanpour et al., 2017; Yadav et al., 2013) , among other. Natural language processing (NLP) has shown promise in automating the classification of free narrative text. In the NLP area this process is named Automatic Text Classification techniques (ATC). ATC is an automated process of assigning set of predefined categories to plain text documents (Witten and Frank, 2002) . The health care system employs a large number of categorization and classification systems to assist data management for a variety of tasks, including patient care, record storage and retrieval, statistical analysis, insurance and billing (Crammer et al., 2007; Scheurwegs et al., 2017; Wang et al., 2016) . One of these classification systems is the International Classification of Diseases, Ten Version (ICD-10 1 ). In 2017 a challenge was born at CLEF where the aim of the task was to automatically assign ICD-10 codes to the text content of death certificates in different languages such as English, French (N\u00e9v\u00e9ol et al., 2017) , Hungarian, Italian (N\u00e9v\u00e9ol et al., 2018) or German (D\u00f6rendahl et al., 2019) . Regarding ATC, many techniques have been applied and studied. In traditional machine learning the most common algorithms known in the radiology community are: Naive Bayes, decision trees, logistic regression and SVM (Wang and Summers, 2012; Wei et al., 2005; Perotte et al., 2014) . On the other hand, Recurrent Neural Networks (RNN) are 1 https://icd.who.int/browse10/2016/en used for sequence learning, where both input and output are word and label sequences, respectively. There are several studies related to RNN using Long Short-Term Memory (LSTM) (Tutubalina and Miftahutdinov, 2017) or CNN with an attention layer (Mullenbach et al., 2018) . Finally, researchers have shown the value of transfer learningpre-training a neural network model on a known task and then performing fine-tuning -using the trained neural network as the basis of a new purpose-specific model. BERT model is one of the best known models nowadays. BERT has also been used for multi-class classification with ICD-10 (Amin et al., 2019) obtaining good results with minimal effort. This study is in the initial phase and it is focuses on automatic code assignment in Spanish, so it can also be an automatic multi-class classification task. The main contributions of this paper can be summarized as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 404, |
|
"end": 433, |
|
"text": "(Demner-Fushman et al., 2009)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 489, |
|
"text": "(Hripcsak et al., 2002)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 519, |
|
"end": 540, |
|
"text": "(Dreyer et al., 2005;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 541, |
|
"end": 565, |
|
"text": "Hassanpour et al., 2017;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 566, |
|
"end": 585, |
|
"text": "Yadav et al., 2013)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 890, |
|
"end": 914, |
|
"text": "(Witten and Frank, 2002)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1156, |
|
"end": 1178, |
|
"text": "(Crammer et al., 2007;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1179, |
|
"end": 1203, |
|
"text": "Scheurwegs et al., 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1204, |
|
"end": 1222, |
|
"text": "Wang et al., 2016)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1528, |
|
"end": 1549, |
|
"text": "(N\u00e9v\u00e9ol et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1571, |
|
"end": 1592, |
|
"text": "(N\u00e9v\u00e9ol et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1603, |
|
"end": 1627, |
|
"text": "(D\u00f6rendahl et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1846, |
|
"end": 1870, |
|
"text": "(Wang and Summers, 2012;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1871, |
|
"end": 1888, |
|
"text": "Wei et al., 2005;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1889, |
|
"end": 1910, |
|
"text": "Perotte et al., 2014)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 2184, |
|
"end": 2220, |
|
"text": "(Tutubalina and Miftahutdinov, 2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 2252, |
|
"end": 2277, |
|
"text": "(Mullenbach et al., 2018)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "\u2022 We analyse the performance of the three transfer learning architectures using the BERT models in Spanish: Multilingual BERT, BETO and XLM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "\u2022 We achieve encouraging results for a collection of Spanish radiological reports.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "\u2022 We also investigate the fine-tuning parameters for BERT, including pre-process of long text, layerwise learning rate, batch sizes and number of epochs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1." |
|
}, |
|
{ |
|
"text": "Dataset is composed of 208,167 anonymized Computed Tomography (CT) examinations. This clinical corpus has been provided by the HT m\u00e9dica. Each report contains relevant information such as: reason for consultation, information regarding the hospital where the CT scan was conducted, type of scan (contrast or non-contrast), and location of the scan (body part). Each radiology report requires a unique code from the 89 available codes. These labels are assigned according to the area where the scan was performed, the type of contrast (contrast or non-contrast) and other clinical indications such as fractures, trauma, inflammation, tumors, and so on. Figure 1 shows the most common codes in the dataset and the number of documents in which each label appears. We can see that the TX4, TC4 and TX5 codes are the ones that appear most frequently in the corpus. A weakness of the collection is that the text written by the specialists is in capital letters. Therefore, we pre-process the text by changing it to lower case.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 652, |
|
"end": 660, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Medical collection", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Training, dev and test set The dataset was divided up to carry out the experimentation: 60% of the collection was used for the training set (124,899 documents), the development set was composed of 41,6434 documents (20%) and the remaining 20% for the test set (41,634 documents). The sections of the CT examinations considered for this study were: the reason for the consultation, the location of the scan and the type of contrast used, avoiding hospital information because most of the examinations were done in the same hospital.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Medical collection", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "Transfer learning (Thrun, 1996) is an approach, by which a system can apply knowledge learned from previous tasks to a new task domain. This theory is inspired from the idea that people can intuitively use their previously learned experience to define and solve new problems. For the automatic assignment of codes in Spanish, we have applied three transfer learning approaches based on BERT 2 . BERT (Bidirectional Encoder Representations from Transformers) (Devlin et al., 2018 ) is designed to pre-train deep 2 https://github.com/google-research/bert bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. BERT uses a popular attention mechanism called transformer (Vaswani et al., 2017) that takes into account the context of words. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create a multi-class classification model. This layer assigns a single code to a document.", |
|
"cite_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 31, |
|
"text": "(Thrun, 1996)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 458, |
|
"end": 478, |
|
"text": "(Devlin et al., 2018", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 732, |
|
"end": 754, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Code assignment methods", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "In order to categorize radiology reports in Spanish, we have used three pre-trained models described below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Code assignment methods", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "Multilingual (henceforth, M-BERT) follows the same model architecture and training procedure as BERT using data from Wikipedia in 104 languages (Pires et al., 2019) . In M-BERT, the WordPiece modeling strategy allows the model to share embedding across languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 164, |
|
"text": "(Pires et al., 2019)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Code assignment methods", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "BETO is a BERT model trained on a big Spanish corpus. BETO 3 is of size similar to a BERT for English and was trained with the Whole Word Masking technique (Cui et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 156, |
|
"end": 174, |
|
"text": "(Cui et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Code assignment methods", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "XLM uses a pre-processing technique and a duallanguage training mechanism with BERT in order to learn relations between words in different languages (Lample and Conneau, 2019) . XLM presents a new training technique of BERT for multilingual classification tasks and the use of BERT as initialization of machine translation models. In this study we show the performance of two XLM models: XLM trained with 17 languages (XLM-17) and trained with 100 languages (XLM-100)", |
|
"cite_spans": [ |
|
{ |
|
"start": 149, |
|
"end": 175, |
|
"text": "(Lample and Conneau, 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Code assignment methods", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "4. Experiments and evaluation", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Code assignment methods", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "In this step, we need to make decisions about the hyperparameters for the BERT model. We use the BERT model with a hidden size of 768, 12 transformer blocks and 12 self-attention heads. For the optimizer, we leverage the adam optimizer which performs very well for NLP data and for BERT models in particular. For the purposes of fine-tuning, the authors recommend choosing from the following values: batch size, learning rate, max sequence and number of epoch. Table 4 .1. illustrates the hyperparameters and their tested options, finally in each column we can see the model used and its selected parameter.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 461, |
|
"end": 468, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fine-tuning pre-trained parameters", |
|
"sec_num": "4.1." |
|
}, |
|
{ |
|
"text": "In this section we present the results obtained by applying each BERT model. Since the corpus of radiological reports is in Spanish, we have applied the available models for this language in transfer learning. The metrics used to carry out the experiments are the measures popularly known in the NLP community, namely macro-precision, macro-recall and macro-averaged F1score. M-BERT. XLM mixes several languages but it is enough to learn in the radiology reports and to detect the correct code. XLM-100 obtains the best precision (75%) and XLM-17 the best recall (69.7%). The best F1-score was also obtained with XLM-17 getting 70%. Performing a brief analysis of the mislabeled codes, we found that the 23 worst-labeled codes had 2,443 documents to be trained, which is 1.96% of the total training set. In addition, the average number of training documents is 106, so they do not have enough information to learn. According to the evaluation of each code, Figure 2 shows the number of codes and their result ranges using the F1 score. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 957, |
|
"end": 965, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.2." |
|
}, |
|
{ |
|
"text": "Our project is in a beginner's state and has limitations that need to be improved in the future. The limitations we found are shown below:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "\u2022 Occasionally, the texts of the radiological reports are longer than allowed in the BERT model (max sequence of 512).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "\u2022 The texts provided by the specialists are in capital letters, we pre-process the text by changing it to lower case.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "\u2022 There are codes with few examples for training, so the system fails to classify.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "We plan to make future improvements to the automatic classification system. These improvements can be summarized in the following points:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "\u2022 We will perform a deep error analysis and see the behavior of each model applied to our corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "\u2022 We will analyze why XLM has achieved better results than BETO, being XLM trained for different languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "\u2022 Strategies with embeddings to obtain the representation vector of each word will be used in future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "\u2022 We will make changes to the model, for example, adding new layers or concatenating new features extracted from the corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "\u2022 We will improve BERT's vocabulary to find more words related to the biomedical domain.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "BioBERT (Lee et al., 2019) currently exists for English, we could make an adaptation or create a similar model with Spanish.", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 26, |
|
"text": "(Lee et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "\u2022 There are parts of the text that are more important than others, for example the location of the exploration, in the future we plan to detect these features so that the model learns better.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Limitations and future work", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "In this study we conducted a multi-class task to detect codes in radiology reports written in Spanish. We have carried out experiments that are the state-of-the-art pre-training for NLP: BERT model. We apply different approaches using this model such as Multilingual BERT, BETO and XLM. Recent advances in transfer learning model have opened another way to extract features and classify medical documents. We have a collection of over 200,000 CT scans and each text can have 89 possible codes. Each code is associated with the document for a reason. The most important reasons include: location of the body where the CT scan was performed or a previous finding or disease. Using the XLM algorithm trained with 17 different languages we obtain a 70% of F1-score, detecting that the worst predictions are those codes that have scarce examples to train.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "https://github.com/dccuchile/beto", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "This study is at an early stage so we have described limitations and future work to further improve the code assignment task", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "This study is at an early stage so we have described limi- tations and future work to further improve the code assign- ment task.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "This work has been partially supported by the Fondo Europeo de Desarrollo Regional (FEDER), LIVING-LANG project (RTI2018-094653-B-C21), under the Spanish Government", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "This work has been partially supported by the Fondo Eu- ropeo de Desarrollo Regional (FEDER), LIVING-LANG project (RTI2018-094653-B-C21), under the Spanish Gov- ernment.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Mlt-dfki at clef ehealth 2019: Multi-label classification of icd-10 codes", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Amin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Dunfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Vechkaeva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Chapman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Wixted", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amin, S., Neumann, G., Dunfield, K., Vechkaeva, A., Chapman, K. A., and Wixted, M. K. (2019). Mlt-dfki at clef ehealth 2019: Multi-label classification of icd-10 codes with bert. CLEF (Working Notes).", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Automatic code assignment to medical text", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Crammer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Ganchev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Talukdar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carroll", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Biological, translational, and clinical language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "129--136", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Crammer, K., Dredze, M., Ganchev, K., Talukdar, P., and Carroll, S. (2007). Automatic code assignment to med- ical text. In Biological, translational, and clinical lan- guage processing, pages 129-136.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Pre-training with whole word masking for chinese bert", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.08101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cui, Y., Che, W., Liu, T., Qin, B., Yang, Z., Wang, S., and Hu, G. (2019). Pre-training with whole word masking for chinese bert. arXiv preprint arXiv:1906.08101.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "What can natural language processing do for clinical decision support", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Demner-Fushman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Chapman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Journal of biomedical informatics", |
|
"volume": "42", |
|
"issue": "5", |
|
"pages": "760--772", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Demner-Fushman, D., Chapman, W. W., and McDonald, C. J. (2009). What can natural language processing do for clinical decision support? Journal of biomedical in- formatics, 42(5):760-772.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M.-W", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. (2018). Bert: Pre-training of deep bidirectional trans- formers for language understanding. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Overview of the clef ehealth 2019 multilingual information extraction", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "D\u00f6rendahl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Leich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Hummel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Sch\u00f6nfelder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Grune", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Working Notes of CLEF 2019 -Conference and Labs of the Evaluation Forum", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D\u00f6rendahl, A., Leich, N., Hummel, B., Sch\u00f6nfelder, G., and Grune, B. (2019). Overview of the clef ehealth 2019 multilingual information extraction. In Working Notes of CLEF 2019 -Conference and Labs of the Evaluation Fo- rum.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Application of recently developed computer algorithm for automatic classification of unstructured radiology reports: validation study", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Dreyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Kalra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Maher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Hurier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Asfaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Schultz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Halpern", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Thrall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Radiology", |
|
"volume": "234", |
|
"issue": "2", |
|
"pages": "323--329", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dreyer, K. J., Kalra, M. K., Maher, M. M., Hurier, A. M., Asfaw, B. A., Schultz, T., Halpern, E. F., and Thrall, J. H. (2005). Application of recently developed computer al- gorithm for automatic classification of unstructured radi- ology reports: validation study. Radiology, 234(2):323- 329.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Performance of a machine learning classifier of knee mri reports in two large academic radiology practices: a tool to estimate diagnostic yield", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Hassanpour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Langlotz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Amrhein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Befera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Lungren", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "American Journal of Roentgenology", |
|
"volume": "208", |
|
"issue": "4", |
|
"pages": "750--753", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hassanpour, S., Langlotz, C. P., Amrhein, T. J., Befera, N. T., and Lungren, M. P. (2017). Performance of a machine learning classifier of knee mri reports in two large academic radiology practices: a tool to estimate diagnostic yield. American Journal of Roentgenology, 208(4):750-753.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Use of natural language processing to translate clinical information from a database of 889,921 chest radiographic reports", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Hripcsak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Austin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"O" |
|
], |
|
"last": "Alderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Friedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Radiology", |
|
"volume": "224", |
|
"issue": "1", |
|
"pages": "157--163", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hripcsak, G., Austin, J. H., Alderson, P. O., and Fried- man, C. (2002). Use of natural language processing to translate clinical information from a database of 889,921 chest radiographic reports. Radiology, 224(1):157-163.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Crosslingual language model pretraining", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.07291" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lample, G. and Conneau, A. (2019). Cross- lingual language model pretraining. arXiv preprint arXiv:1901.07291.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Biobert: pre-trained biomedical language representation model for biomedical text mining", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Yoon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.08746" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lee, J., Yoon, W., Kim, S., Kim, D., Kim, S., So, C. H., and Kang, J. (2019). Biobert: pre-trained biomedical language representation model for biomedical text min- ing. arXiv preprint arXiv:1901.08746.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Explainable prediction of medical codes from clinical text", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Mullenbach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Wiegreffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Duke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1802.05695" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mullenbach, J., Wiegreffe, S., Duke, J., Sun, J., and Eisen- stein, J. (2018). Explainable prediction of medical codes from clinical text. arXiv preprint arXiv:1802.05695.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Clef ehealth 2017 multilingual information extraction task overview: Icd10 coding of death certificates in english and french", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "N\u00e9v\u00e9ol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Robert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Grouin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Lavergne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Rey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Rondet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Zweigenbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "CLEF (Working Notes)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N\u00e9v\u00e9ol, A., Robert, A., Anderson, R., Cohen, K. B., Grouin, C., Lavergne, T., Rey, G., Rondet, C., and Zweigenbaum, P. (2017). Clef ehealth 2017 multilin- gual information extraction task overview: Icd10 cod- ing of death certificates in english and french. In CLEF (Working Notes).", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Clef ehealth 2018 multilingual information extraction task overview: Icd10 coding of death certificates in french, hungarian and italian", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "N\u00e9v\u00e9ol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Robert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Grippo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Morgand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Orsi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Pelikan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Ramadier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Rey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Zweigenbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "CLEF (Working Notes)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N\u00e9v\u00e9ol, A., Robert, A., Grippo, F., Morgand, C., Orsi, C., Pelikan, L., Ramadier, L., Rey, G., and Zweigenbaum, P. (2018). Clef ehealth 2018 multilingual information extraction task overview: Icd10 coding of death certifi- cates in french, hungarian and italian. In CLEF (Working Notes).", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Diagnosis code assignment: models and evaluation metrics", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Perotte", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Pivovarov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Natarajan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Weiskopf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Wood", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Elhadad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of the American Medical Informatics Association", |
|
"volume": "21", |
|
"issue": "2", |
|
"pages": "231--237", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Perotte, A., Pivovarov, R., Natarajan, K., Weiskopf, N., Wood, F., and Elhadad, N. (2014). Diagnosis code as- signment: models and evaluation metrics. Journal of the American Medical Informatics Association, 21(2):231- 237.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "How multilingual is multilingual bert? arXiv preprint", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Pires", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Schlinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Garrette", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.01502" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pires, T., Schlinger, E., and Garrette, D. (2019). How multilingual is multilingual bert? arXiv preprint arXiv:1906.01502.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Selecting relevant features from the electronic health record for clinical code prediction", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Scheurwegs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Cule", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Luyckx", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Luyten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Journal of biomedical informatics", |
|
"volume": "74", |
|
"issue": "", |
|
"pages": "92--103", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Scheurwegs, E., Cule, B., Luyckx, K., Luyten, L., and Daelemans, W. (2017). Selecting relevant features from the electronic health record for clinical code prediction. Journal of biomedical informatics, 74:92-103.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Is learning the n-th thing any easier than learning the first?", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Thrun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "640--646", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thrun, S. (1996). Is learning the n-th thing any easier than learning the first? In Advances in neural information processing systems, pages 640-646.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "An encoderdecoder model for icd-10 coding of death certificates", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Tutubalina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Miftahutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1712.01213" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tutubalina, E. and Miftahutdinov, Z. (2017). An encoder- decoder model for icd-10 coding of death certificates. arXiv preprint arXiv:1712.01213.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, \u0141., and Polosukhin, I. (2017). Attention is all you need. In Advances in neural infor- mation processing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Machine learning and radiology", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Summers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Medical image analysis", |
|
"volume": "16", |
|
"issue": "5", |
|
"pages": "933--951", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wang, S. and Summers, R. M. (2012). Machine learning and radiology. Medical image analysis, 16(5):933-951.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Diagnosis code assignment using sparsity-based disease correlation embedding", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Long", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [ |
|
"Z" |
|
], |
|
"last": "Sheng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "IEEE Transactions on Knowledge and Data Engineering", |
|
"volume": "28", |
|
"issue": "12", |
|
"pages": "3191--3202", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wang, S., Chang, X., Li, X., Long, G., Yao, L., and Sheng, Q. Z. (2016). Diagnosis code assignment us- ing sparsity-based disease correlation embedding. IEEE Transactions on Knowledge and Data Engineering, 28(12):3191-3202.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A study on several machine-learning methods for classification of malignant and benign clustered microcalcifications", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Nishikawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "IEEE transactions on medical imaging", |
|
"volume": "24", |
|
"issue": "3", |
|
"pages": "371--380", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei, L., Yang, Y., Nishikawa, R. M., and Jiang, Y. (2005). A study on several machine-learning methods for classification of malignant and benign clustered mi- crocalcifications. IEEE transactions on medical imag- ing, 24(3):371-380.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Data mining: practical machine learning tools and techniques with java implementations", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Witten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Acm Sigmod Record", |
|
"volume": "31", |
|
"issue": "1", |
|
"pages": "76--77", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Witten, I. H. and Frank, E. (2002). Data mining: practical machine learning tools and techniques with java imple- mentations. Acm Sigmod Record, 31(1):76-77.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Automated outcome classification of emergency department computed tomography imaging reports", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Yadav", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Sarioglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H.-A", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Academic Emergency Medicine", |
|
"volume": "20", |
|
"issue": "8", |
|
"pages": "848--854", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yadav, K., Sarioglu, E., Smith, M., and Choi, H.-A. (2013). Automated outcome classification of emergency depart- ment computed tomography imaging reports. Academic Emergency Medicine, 20(8):848-854.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Most common labels and their frequency in the collection." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Results obtained in the F1-score and number of codes evaluated." |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "shows the results achieved and we can see that the results are encouraging, having a large list of codes to assign. XLM gets the best results by upgrading to BETO and", |
|
"html": null, |
|
"content": "<table><tr><td/><td>Parameter</td><td>Options</td><td colspan=\"4\">M-BERT BETO XLM-100 XLM-17</td></tr><tr><td/><td>Batch size</td><td colspan=\"2\">[16, 32, 64]</td><td>32</td><td>16</td><td>16</td><td>16</td></tr><tr><td/><td colspan=\"3\">Max sequence [256, 512]</td><td>256</td><td>256</td><td>256</td><td>256</td></tr><tr><td/><td>Learning rate</td><td colspan=\"2\">[2e-5, 3e-5]</td><td>3e-5</td><td>2e-5</td><td>2e-5</td><td>2e-5</td></tr><tr><td/><td>Epoch</td><td>[3, 4, 5]</td><td/><td>4</td><td>5</td><td>5</td><td>5</td></tr><tr><td/><td colspan=\"6\">Table 1: Hyperparameters tested and options chosen in each model.</td></tr><tr><td colspan=\"4\">Pre-trainined Model Precision Recall F1-score</td><td/><td/></tr><tr><td>M-BERT</td><td>65.41</td><td>62.07</td><td>62.33</td><td/><td/></tr><tr><td>BETO</td><td>69.86</td><td>65.34</td><td>66.34</td><td/><td/></tr><tr><td>XLM-100</td><td>75.05</td><td>69.10</td><td>70.64</td><td/><td/></tr><tr><td>XML-17</td><td>74.83</td><td>69.79</td><td>70.84</td><td/><td/></tr></table>" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Results obtained for code assignment in radiological reports.", |
|
"html": null, |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |