ACL-OCL / Base_JSON /prefixC /json /clinicalnlp /2020.clinicalnlp-1.22.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2020",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T12:27:16.439519Z"
},
"title": "Cancer Registry Information Extraction via Transfer Learning",
"authors": [
{
"first": "You-Chen",
"middle": [],
"last": "Zhang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Kaohsiung University of Science and Technology",
"location": {
"settlement": "Kaohsiung",
"country": "Taiwan R.O.C"
}
},
"email": ""
},
{
"first": "Ti-Hao",
"middle": [],
"last": "Wang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Medical University Hospital",
"location": {
"country": "China"
}
},
"email": ""
},
{
"first": "Yi-Hsin",
"middle": [],
"last": "Yang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Health Research Institutes",
"location": {
"settlement": "Tainan",
"country": "Taiwan, R.O.C"
}
},
"email": ""
},
{
"first": "Yan-Jie",
"middle": [],
"last": "Lin",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Health Research Institutes",
"location": {
"settlement": "Miaoli",
"country": "Taiwan R.O.C"
}
},
"email": ""
},
{
"first": "Chung-Yang",
"middle": [],
"last": "Wu",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Kaohsiung University of Science and Technology",
"location": {
"settlement": "Kaohsiung",
"country": "Taiwan R.O.C"
}
},
"email": ""
},
{
"first": "Yu-Cheng",
"middle": [],
"last": "Chang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Kaohsiung University of Science and Technology",
"location": {
"settlement": "Kaohsiung",
"country": "Taiwan R.O.C"
}
},
"email": ""
},
{
"first": "Pin-Jou",
"middle": [],
"last": "Lu",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Kaohsiung University of Science and Technology",
"location": {
"settlement": "Kaohsiung",
"country": "Taiwan R.O.C"
}
},
"email": ""
},
{
"first": "Chih-Jen",
"middle": [],
"last": "Huang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Kaohsiung Medical University",
"location": {
"country": "Taiwan R.O.C"
}
},
"email": ""
},
{
"first": "Yu-Tsang",
"middle": [],
"last": "Wang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Kaohsiung Medical University",
"location": {
"country": "Taiwan R.O.C"
}
},
"email": ""
},
{
"first": "Sheau-Fang",
"middle": [],
"last": "Yang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Kaohsiung Medical University",
"location": {
"country": "Taiwan R.O.C"
}
},
"email": ""
},
{
"first": "Kuan-Chung",
"middle": [],
"last": "Hsiao",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Health Research Institutes",
"location": {
"settlement": "Tainan",
"country": "Taiwan, R.O.C"
}
},
"email": ""
},
{
"first": "Ko-Jiunn",
"middle": [],
"last": "Liu",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Health Research Institutes",
"location": {
"settlement": "Tainan",
"country": "Taiwan, R.O.C"
}
},
"email": ""
},
{
"first": "Li-Tzong",
"middle": [],
"last": "Chen",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Health Research Institutes",
"location": {
"settlement": "Tainan",
"country": "Taiwan, R.O.C"
}
},
"email": ""
},
{
"first": "Tsang-Wu",
"middle": [],
"last": "Liu",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "I-Shou",
"middle": [],
"last": "Chang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Health Research Institutes",
"location": {
"settlement": "Tainan",
"country": "Taiwan, R.O.C"
}
},
"email": ""
},
{
"first": "Kun-San",
"middle": [
"Clifford"
],
"last": "Chao",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Health Research Institutes",
"location": {
"settlement": "Tainan",
"country": "Taiwan, R.O.C"
}
},
"email": ""
},
{
"first": "Hong-Jie",
"middle": [],
"last": "Dai",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "National Kaohsiung University of Science and Technology",
"location": {
"settlement": "Kaohsiung",
"country": "Taiwan R.O.C"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "A cancer registry is a critical and massive database for which various types of domain knowledge are needed and whose maintenance requires labor-intensive data curation. In order to facilitate the curation process for building a high-quality and integrated cancer registry database, we compiled a cross-hospital corpus and applied neural network methods to develop a natural language processing system for extracting cancer registry variables buried in unstructured pathology reports. The performance of the developed networks was compared with various baselines using standard micro-precision, recall and Fmeasure. Furthermore, we conducted experiments to study the feasibility of applying transfer learning to rapidly * Corresponding authors develop a well-performing system for processing reports from different sources that might be presented in different writing styles and formats. The results demonstrate that the transfer learning method enables us to develop a satisfactory system for a new hospital with only a few annotations and suggest more opportunities to reduce the burden of cancer registry curation.",
"pdf_parse": {
"paper_id": "2020",
"_pdf_hash": "",
"abstract": [
{
"text": "A cancer registry is a critical and massive database for which various types of domain knowledge are needed and whose maintenance requires labor-intensive data curation. In order to facilitate the curation process for building a high-quality and integrated cancer registry database, we compiled a cross-hospital corpus and applied neural network methods to develop a natural language processing system for extracting cancer registry variables buried in unstructured pathology reports. The performance of the developed networks was compared with various baselines using standard micro-precision, recall and Fmeasure. Furthermore, we conducted experiments to study the feasibility of applying transfer learning to rapidly * Corresponding authors develop a well-performing system for processing reports from different sources that might be presented in different writing styles and formats. The results demonstrate that the transfer learning method enables us to develop a satisfactory system for a new hospital with only a few annotations and suggest more opportunities to reduce the burden of cancer registry curation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Cancer is a main cause of mortality worldwide and has been the leading cause of death over several decades in our country. A cancer registry system has been established by Taiwan Society of Cancer Registry and supported by Ministry of Health and Welfare (MOHW) over 40 years. How to extract massive data concisely and maintain high quality continuously are critical issues and burdens of healthcare system. However, the maintenance of an individual cancer registry from patient healthcare trajectories needs different types of domain knowledge which is pronouncedly both labor-intensive and time-consuming. In addition, how to validate and integrate between different hospitals or between local healthcare resource and national database are crucial topics.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "To facilitate the integration of models for a specific cancer, applying information technology tools to improve acquisition and classification of patients' healthcare trajectories can enable more accurate phenotyping of cancer information. Nevertheless, addressing the issues needs more cooperation both on information technology and medical expertise. In order to assist integration among the institutes, a national project was established under the Cancer Center Support Grant Program (CCSG) supported by MOHW. As the coordinator of this project, we conducted research studies and cooperated with several hospitals to establish a platform to work out a model system based on existing cancer data.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "One major goal of this project is to apply natural language processing (NLP) techniques to automatically analyze unstructured data including surgical reports, pathology reports, oncology clinical notes, and laboratory findings that may not be easy to acquire or share across hospitals for specific cancers. Pathology reports are usually abundant and contain operative findings, general tumor information, pathological assessment, cancer staging, and end-results which need to be extracted and classified clearly. In the pilot study, we focus on tasks including the collection and deidentification of pathology reports, data annotation for developing and evaluating deep learning-based NLP systems to extract cancer registry variables from different hospital sites.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "To standardize the annotation of pathology material for developing our NLP system, the variables and their definitions were defined by the consensus from expertise committee composed of hospital investigators and annotators. Furthermore, we applied transfer learning and conducted experiments to examine the performance of the developed neural networks on the cross-hospital pathology materials to gain insights on how effective and concise transfer learning can be. The results not only enable us to understand which layers of the developed network convey the most important parameters for transfer but also let us know how many annotations are needed for training a system for a new hospital to achieve reasonable performance.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In the presented study, we primarily focused on the colorectal cancer, which is the third leading cause of cancer-specific death in Taiwan. We cooperated with two medical centers, namely China Medical University Hospital (CMUH) and Kaohsiung Medical University Chung-Ho Memorial Hospital (KMUH), to collect colorectal pathology reports and the data were excluded non-tumor reports as well as the reports without cancer registration data for compiling our corpora. Table 1 shows the grouping and the number of the collected datasets.",
"cite_spans": [],
"ref_spans": [
{
"start": 464,
"end": 471,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Datasets",
"sec_num": "2.1"
},
{
"text": "In order to produce high quality annotations for developing our system, we established a NLP working group focusing on the construction of high-quality corpora. For our purpose, the annotation process was conducted by eight annotators based on an annotation guideline developed by consulting the committee composed of hospital investigators and cancer registrars. According to the standard of American Joint Committee on Cancer, nine cancer registry variables were defined for extraction in order to achieving a better understanding and unified effects on pathological materials. an identical set of 100 reports randomly selected from the collected datasets. All of them used the annotation tool ( Figure 1 ) developed by our collaborator to conduct their annotations. We then measured their inter-annotation agreement by Kappa statistic (Viera & Garrett, 2005) .",
"cite_spans": [
{
"start": 838,
"end": 861,
"text": "(Viera & Garrett, 2005)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [
{
"start": 698,
"end": 706,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Corpus Construction",
"sec_num": "2.2"
},
{
"text": "Afterwards a labeling meeting was organized to discuss issues and concerns encountered during the annotation process and the annotation guideline was adjusted according to the conclusion of the meeting. The above process was conducted iterative until they achieved an agreement above substantial. Finally, the remaining unlabeled datasets were evenly distributed to all annotators for labeling. The same annotation process was applied individually for the data collected from the two hospitals.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpus Construction",
"sec_num": "2.2"
},
{
"text": "The aforementioned 100 annotation data generated by all annotators individually on the same reports were collected as the test set for evaluating the performance of the developed systems. They were combined by voting; only those annotations that were annotated by more than four annotators at the same time were kept. The other reports evenly annotated by annotators were collected as the training sets.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Corpus Construction",
"sec_num": "2.2"
},
{
"text": "For a given pathology report, our clinical toolkit (Dai, Syed-Abdul, Chen, & Wu, 2015) was employed to segment sentences and generate tokens based on MedPost (Smith, Rindflesch, & Wilbur, 2004) . The numerical normalization method proposed by Tsai et al. (2006) was employed to reduce variations in numerical parts of each token. We then formulated the problem as a sequential labeling task and applied the IOB-2 tag scheme to encode the span information generated by annotators. All sequences including those that did not contain any annotations were included in the training set to train a neural sequence labeling network model whose architecture is briefly described as follows.",
"cite_spans": [
{
"start": 51,
"end": 86,
"text": "(Dai, Syed-Abdul, Chen, & Wu, 2015)",
"ref_id": "BIBREF0"
},
{
"start": 158,
"end": 193,
"text": "(Smith, Rindflesch, & Wilbur, 2004)",
"ref_id": "BIBREF8"
},
{
"start": 243,
"end": 261,
"text": "Tsai et al. (2006)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Cancer Registry Information Extraction with Different Approaches",
"sec_num": "2.3"
},
{
"text": "The input of the network is the pre-processed sequence of tokens in a pathology report and the output being the sequence of labels for each token. The input tokens was represented as a vector by concatenating the pre-trained word representations obtained by using GloVe (Pennington, Socher, & Manning, 2014) and RoBERTa (Liu et al., 2019) . The parameters of the concatenated vectors were kept fixed during the training process. Description likes: well differentiated, and undifferentiated",
"cite_spans": [
{
"start": 270,
"end": 307,
"text": "(Pennington, Socher, & Manning, 2014)",
"ref_id": "BIBREF7"
},
{
"start": 320,
"end": 338,
"text": "(Liu et al., 2019)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Cancer Registry Information Extraction with Different Approaches",
"sec_num": "2.3"
},
{
"text": "The concatenated representation was then feed to a fully connected layer (denoted as FC1) along with a variational dropout before passing the embeddings into the bidirectional long-short term memory (BiLSTM) network with one layer consisting of 256 hidden nodes. The output of the BiLSTM layer goes through another fully connected layer (denoted as FC2) to generate an output of a size equal to the number of the classes, which becomes the input of the inference layer in which a conditional random field (CRF) layer was used to model the dependencies between labels in neighborhoods with the Viterbi loss to jointly decode the best chain of labels for the given sequence.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Cancer Registry Information Extraction with Different Approaches",
"sec_num": "2.3"
},
{
"text": "In addition to the aforementioned architecture, we implemented the following baselines for comparison:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Cancer Registry Information Extraction with Different Approaches",
"sec_num": "2.3"
},
{
"text": "Dictionary-based approach: For a given token, output the most frequent assigned tag estimated on the training sets.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Cancer Registry Information Extraction with Different Approaches",
"sec_num": "2.3"
},
{
"text": "Formulate the task as a token-based classification task and apply SVM with a polylinear kernel to learn a classification model.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Support vector machine (SVM):",
"sec_num": null
},
{
"text": "The normalized word features with a context window of three along with transition features were used for training a CRF model. BiLSTM: Similar to the aforementioned network architecture, but a linear layer was used instead of the CRF layer as the output layer.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "CRF:",
"sec_num": null
},
{
"text": "All of the above neural networks were implemented by using PyTorch trained on NVidia Tesla P-100 GPUs. CRF was implemented by using CRF++ 1 and scikit-learn 2 were used for the remaining implementations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "CRF:",
"sec_num": null
},
{
"text": "Transfer learning (Pan & Yang, 2009) aims to learn a better model on a target domain by leveraging the knowledge previously learned from a source domain. In this study, the transductive transfer learning technology was applied by transferring the parameters in different layers of the BiLSTM-CRF model trained on the dataset of the source hospital to the target hospital by retraining the model with transferred parameters on the target hospital's dataset via fine-tuning. In our experiments, we didn't freeze any layers but finetuned all transferred parameters in different layers.",
"cite_spans": [
{
"start": 18,
"end": 36,
"text": "(Pan & Yang, 2009)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Transfer Learning for Extracting Information between Different Hospitals",
"sec_num": "2.4"
},
{
"text": "We conducted three experiments to study the characteristics of the compiled corpora and the effectiveness of the developed models on the compiled corpora. The first compared the proposed model with the aforementioned baseline methods.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiment Configurations",
"sec_num": "2.5"
},
{
"text": "The second examined the effectiveness of transfer learning and the last checked the robustness of the developed models under the evaluation of crosscorpus. The standard micro-precision (P), recall (R) and F-measure (F) were used to evaluate the models' outputs against the gold annotations. For training the neural networks in all of our experiments, we randomly kept 50 reports in the training sets as the validation sets to determine the best performed models during the training process. The validation sets were not used in training. The mini-batch gradient descent along with the stochastic gradient descent algorithm (with a learning rate of 0.1, a momentum of 0.9 and a weight decay of 10 -5 ) was used for optimizing the parameters. Unless specifically described, the batch size and epoch were set to 2,048 and 150 respectively in the following experiments. The training process was early stopped if the learning rate was lower than 10 -5 . For consistency, we used the same set of hyper-parameters and a fixed random seed across all experiments.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experiment Configurations",
"sec_num": "2.5"
},
{
"text": "A total of 2,008 reports collected from the two hospitals were annotated. The final Kappa values estimated for CMUH and KMUH are 0.802 (substantial) and 0.914 (almost perfect) respectively. Table 3 shows the detail statistics of the compiled corpora. As one can see that the size of KMUH is much larger than that of CMUH. Although the size of the KMUH corpus is much larger than that of CMUH, the annotations for pathological M is much less in KMUH. It's because that pathological M stage need the other reports (e.g., image reports from other examination division) to conclude the outcome, the pathological M stage was shown inconclusive results on the current pathological data frequently in KMUH.",
"cite_spans": [],
"ref_spans": [
{
"start": 190,
"end": 197,
"text": "Table 3",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "Corpus Statistics",
"sec_num": "3.1"
},
{
"text": "In the first experiment, we trained the developed models on the two training sets separately and evaluated their performance on the test sets of the two hospitals. The results were illustrated in Table 4 . In general, the developed models performed better on the KMUH test set which may be owing to the larger numbers of training samples. The CRF model achieved a comparable F-score on the KMUH test set but its F-score is lower than that of BiLSTM-CRF by 0.214 on the CMUH test set. Table 5 shows the detail results for the nine annotation types of the BiLSTM-CRF model on the two test sets. Overall, the developed networks demonstrated promising F-scores for all items.",
"cite_spans": [],
"ref_spans": [
{
"start": 196,
"end": 204,
"text": "Table 4",
"ref_id": "TABREF4"
},
{
"start": 485,
"end": 492,
"text": "Table 5",
"ref_id": "TABREF5"
}
],
"eq_spans": [],
"section": "Performance Comparison with Different Methods",
"sec_num": "3.2"
},
{
"text": "In this experiment, we would like to gain insights on what extent transfer learning improves the performance on the cross-hospital datasets. We used KMUH as the source dataset since its size is larger than that of CMUH. We conducted experiments to examine the effect of transfer knowledge learned from KMUH to CMUH by 1) analyzing the importance of each layer of the developed neural networks, and 2) quantifying the performance gain by varying the sizes (20%~100%) of the CMUH training set when we fine-tuned the model pre-trained on KMUH. Note that because the size of the 20% CMUH dataset is quite small, we reduced the batch size to 512 for this case. Figure 2 shows the results. Here \"Non-transfer\" refers to that we only used the reduced sizes of the CMUH training set to develop the BiLSTM-CRF models without relying on any pre-trained parameters. \"FC1\" initialized the learned parameters of the FC1 layer of the BiLSTM-CRF model by adopting the pre-trained parameters on the KMUH corpus, \"BiLSTM\" further included the learned parameters of the BiLSTM layer of the source model and so on. Consider the comparable results achieved by CRF models, we also include the configuration \"Non-transfer-CRF\" in which we trained several CRF models by using the corresponding reduced CMUH datasets.",
"cite_spans": [],
"ref_spans": [
{
"start": 656,
"end": 664,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "The Effect of Transfer Learning",
"sec_num": "3.3"
},
{
"text": "In Figure 2 , we can observe that with more numbers of the training samples used, the performance can be apparently improved for the 'Non-transfer' models. However, the improvement for the CRF models is relatively flat comparing with that of the neural networks. On the other hand, even with only 20% of the CMUH training set, the models learned with transferred parameters achieved satisfactory F-scores, which outperformed the 'Non-transfer' models trained on more training samples (being equal or less than 60%) of the full CMUH training set. The above results give us an insight that we can exploit the parameters of the neural networks learned from source hospitals to rapidly develop a reliable system relying on a small annotated dataset to boost the annotation process in the new hospital for creating and evaluating a customized system.",
"cite_spans": [],
"ref_spans": [
{
"start": 3,
"end": 11,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "The Effect of Transfer Learning",
"sec_num": "3.3"
},
{
"text": "The results shown in Figure 2 also reveal the importance of parameters of each layer of the developed model in the manner of transfer learning. We can observe that transferring parameters of all layers in general leading to slightly better F-scores, but transferring the parameters of the first layer only is almost as efficient as transferring all. The result is consistent with the observations of other previous works (Giorgi & Bader, 2018 , 2020 Lee, Dernoncourt, & Szolovits, 2018) and the hypothesis that the lower layers of a neural network learn generic features and the higher layers learn task-specific (or we can say that hospital-specific) features.",
"cite_spans": [
{
"start": 421,
"end": 442,
"text": "(Giorgi & Bader, 2018",
"ref_id": "BIBREF2"
},
{
"start": 443,
"end": 449,
"text": ", 2020",
"ref_id": "BIBREF3"
},
{
"start": 450,
"end": 486,
"text": "Lee, Dernoncourt, & Szolovits, 2018)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [
{
"start": 21,
"end": 29,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "The Effect of Transfer Learning",
"sec_num": "3.3"
},
{
"text": "To assess the performance of the developed model in a more realistic setup, we conducted cross- n / a n / a n / a datasets experiments. For this purpose, we used the dataset from one hospital for training, and the dataset from another for testing. The experiments provide an estimate of the cross-hospital generalization ability of the developed models. Table 6 shows the results. Given that both corpora were annotated by the same annotators under the same annotation guideline, we can still see the generality of the developed models is not well; a larger drop in performance can be found on both datasets. The results exhibited that the format and the writing styles of the descriptive pathology in surgical biopsy reports across hospitals are heterogeneous in real-world scenarios.",
"cite_spans": [],
"ref_spans": [
{
"start": 354,
"end": 361,
"text": "Table 6",
"ref_id": "TABREF6"
}
],
"eq_spans": [],
"section": "Cross-corpus Evaluations",
"sec_num": "3.4"
},
{
"text": "We also estimated the performance of the transferred model on its source dataset in Table 6 . The result illustrates an apparent drop of F-score from 0.976 to 0.762 on the KMUH test set. The results demonstrated that the developed systems suffered the catastrophic forgetting problem (French, 1999) which is now known to be a challenge for artificial neural networks when the network is trained sequentially on multiple tasks because the weights in the network that are important for the original task are now changed to meet the objectives of the new task.",
"cite_spans": [
{
"start": 284,
"end": 298,
"text": "(French, 1999)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [
{
"start": 84,
"end": 91,
"text": "Table 6",
"ref_id": "TABREF6"
}
],
"eq_spans": [],
"section": "Cross-corpus Evaluations",
"sec_num": "3.4"
},
{
"text": "In this work, we investigated the feasibility of applying transfer learning via neural networks on the task of extraction cancer registry information from cross-hospital pathology reports. Because the writing styles and formats of the pathology reports is different in each hospital, to estimate the requirements of the number of annotated datasets when we migrate from one hospital to the others and iteratively improve the effectiveness of the developed systems, we conducted experiments to quantify the impact of transfer learning on the datasets collected from two hospitals. From the evaluations of the results, we confirmed that when transfer learning is adopted, the model pre-trained on a source hospital can be trained with fewer annotations of the target hospital and achieve satisfactory performance as when the full training set of the target hospital is used. The results suggest us to apply the transfer learning techniques for developing a customized system for a new hospital with only a few annotations. We will develop method to estimate the required numbers of annotations based on the language properties of the narrative reports and the characteristics of the developed neural networks. Furthermore, our experiment results also reveal challenges requiring to be addressed including the generalizability and catastrophic forgetting problem, which should be addressed in the future.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "4"
},
{
"text": "https://taku910.github.io/crfpp/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "https://scikit-learn.org/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Recognition and Evaluation of Clinical Section Headings in Clinical Documents Using Token-Based Formulation with Conditional Random Fields",
"authors": [
{
"first": "H.-J",
"middle": [],
"last": "Dai",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Syed-Abdul",
"suffix": ""
},
{
"first": "C.-W",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "C.-C",
"middle": [],
"last": "Wu",
"suffix": ""
}
],
"year": 2015,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dai, H.-J., Syed-Abdul, S., Chen, C.-W., & Wu, C.-C. (2015). Recognition and Evaluation of Clinical Section Headings in Clinical Documents Using Token-Based Formulation with Conditional Random Fields. BioMed Research International, 2015.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Catastrophic forgetting in connectionist networks",
"authors": [
{
"first": "R",
"middle": [
"M"
],
"last": "French",
"suffix": ""
}
],
"year": 1999,
"venue": "Trends in cognitive sciences",
"volume": "3",
"issue": "4",
"pages": "128--135",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "French, R. M. (1999). Catastrophic forgetting in connectionist networks. Trends in cognitive sciences, 3(4), 128-135.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Transfer learning for biomedical named entity recognition with neural networks",
"authors": [
{
"first": "J",
"middle": [
"M"
],
"last": "Giorgi",
"suffix": ""
},
{
"first": "G",
"middle": [
"D"
],
"last": "Bader",
"suffix": ""
}
],
"year": 2018,
"venue": "Bioinformatics",
"volume": "34",
"issue": "23",
"pages": "4087--4094",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Giorgi, J. M., & Bader, G. D. (2018). Transfer learning for biomedical named entity recognition with neural networks. Bioinformatics, 34(23), 4087-4094.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Towards reliable named entity recognition in the biomedical domain",
"authors": [
{
"first": "J",
"middle": [
"M"
],
"last": "Giorgi",
"suffix": ""
},
{
"first": "G",
"middle": [
"D"
],
"last": "Bader",
"suffix": ""
}
],
"year": 2020,
"venue": "Bioinformatics",
"volume": "36",
"issue": "1",
"pages": "280--286",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Giorgi, J. M., & Bader, G. D. (2020). Towards reliable named entity recognition in the biomedical domain. Bioinformatics, 36(1), 280-286.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Transfer Learning for Named-Entity Recognition with Neural Networks",
"authors": [
{
"first": "J",
"middle": [
"Y"
],
"last": "Lee",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Dernoncourt",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Szolovits",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lee, J. Y., Dernoncourt, F., & Szolovits, P. (2018). Transfer Learning for Named-Entity Recognition with Neural Networks. Paper presented at the Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018).",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Roberta: A robustly optimized bert pretraining approach",
"authors": [
{
"first": "Y",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Ott",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "Goyal",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Du",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Joshi",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": ".",
"middle": [
"."
],
"last": "Stoyanov",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1907.11692"
]
},
"num": null,
"urls": [],
"raw_text": "Liu, Y., Ott, M., Goyal, N., Du, J., Joshi, M., Chen, D., . . . Stoyanov, V. (2019). Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "A survey on transfer learning",
"authors": [
{
"first": "S",
"middle": [
"J"
],
"last": "Pan",
"suffix": ""
},
{
"first": "Q",
"middle": [],
"last": "Yang",
"suffix": ""
}
],
"year": 2009,
"venue": "IEEE Transactions on knowledge and data engineering",
"volume": "22",
"issue": "10",
"pages": "1345--1359",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pan, S. J., & Yang, Q. (2009). A survey on transfer learning. IEEE Transactions on knowledge and data engineering, 22(10), 1345-1359.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Glove: Global vectors for word representation",
"authors": [
{
"first": "J",
"middle": [],
"last": "Pennington",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Socher",
"suffix": ""
},
{
"first": "C",
"middle": [
"D"
],
"last": "Manning",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the Empiricial Methods in Natural Language Processing",
"volume": "12",
"issue": "",
"pages": "1532--1543",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pennington, J., Socher, R., & Manning, C. D. (2014). Glove: Global vectors for word representation. Proceedings of the Empiricial Methods in Natural Language Processing (EMNLP 2014), 12, 1532- 1543.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "MedPost: A Part of Speech Tagger for BioMedical Text",
"authors": [
{
"first": "L",
"middle": [],
"last": "Smith",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Rindflesch",
"suffix": ""
},
{
"first": "W",
"middle": [
"J"
],
"last": "Wilbur",
"suffix": ""
}
],
"year": 2004,
"venue": "Bioinformatics",
"volume": "20",
"issue": "14",
"pages": "2320--2321",
"other_ids": {
"DOI": [
"10.1093/bioinformatics/bth227"
]
},
"num": null,
"urls": [],
"raw_text": "Smith, L., Rindflesch, T., & Wilbur, W. J. (2004). MedPost: A Part of Speech Tagger for BioMedical Text. Bioinformatics, 20(14), 2320-2321. doi:10.1093/bioinformatics/bth227",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "NERBio: using selected word conjunctions, term normalization, and global patterns to improve biomedical named entity recognition",
"authors": [
{
"first": "R",
"middle": [
"T"
],
"last": "Tsai",
"suffix": ""
},
{
"first": ".-H",
"middle": [],
"last": "Sung",
"suffix": ""
},
{
"first": "C.-L",
"middle": [],
"last": "Dai",
"suffix": ""
},
{
"first": "H.-J",
"middle": [],
"last": "Hung",
"suffix": ""
},
{
"first": "H.-C",
"middle": [],
"last": "Sung",
"suffix": ""
},
{
"first": "T.-Y",
"middle": [],
"last": "Hsu",
"suffix": ""
},
{
"first": "W.-L",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2006,
"venue": "BMC Bioinformatics",
"volume": "7",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tsai, R. T.-H., Sung, C.-L., Dai, H.-J., Hung, H.-C., Sung, T.-Y., & Hsu, W.-L. (2006). NERBio: using selected word conjunctions, term normalization, and global patterns to improve biomedical named entity recognition. BMC Bioinformatics, 7(Suppl 5), S11.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Understanding interobserver agreement: the kappa statistic",
"authors": [
{
"first": "A",
"middle": [
"J"
],
"last": "Viera",
"suffix": ""
},
{
"first": "J",
"middle": [
"M"
],
"last": "Garrett",
"suffix": ""
}
],
"year": 2005,
"venue": "Fam med",
"volume": "37",
"issue": "5",
"pages": "360--363",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Viera, A. J., & Garrett, J. M. (2005). Understanding interobserver agreement: the kappa statistic. Fam med, 37(5), 360-363.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"uris": null,
"num": null,
"type_str": "figure",
"text": "An example pathology report and the annotation tool used for annotation."
},
"TABREF1": {
"content": "<table><tr><td>Source</td><td>CMUH</td><td>KMUH</td></tr><tr><td># of Reports</td><td>393</td><td>1615</td></tr><tr><td>Training Set</td><td>293</td><td>1515</td></tr><tr><td>Test set</td><td>100</td><td>100</td></tr><tr><td>Period</td><td colspan=\"2\">2007~2013 2009~2015</td></tr></table>",
"num": null,
"html": null,
"text": "Datasets collected from two medical centers for this study.",
"type_str": "table"
},
"TABREF2": {
"content": "<table><tr><td colspan=\"2\">Type Description</td><td/><td>Example</td></tr><tr><td/><td colspan=\"2\">Stage classifications including clinical,</td><td/></tr><tr><td>SC</td><td>pathological,</td><td>post-therapy/neoadjuvant</td><td>p., yp., rp., a., c.</td></tr><tr><td/><td colspan=\"2\">therapy, retreatment/recurrence and autopsy</td><td/></tr><tr><td>T</td><td colspan=\"2\">Size or contiguous extension of the primary tumor</td><td>Primary tumor (T): Tx, T0, Tis, T1, T1, T2, T3, T4a, T4b</td></tr><tr><td>N</td><td colspan=\"2\">The absence, or presence and extent of cancer in the regional draining lymph nodes</td><td>Regional lymph nodes (N): Nx, N0, N1a, N1b, N1c, N2, N2a, N2b</td></tr><tr><td>M</td><td colspan=\"2\">The absence or presence of distant spread or metastases</td><td>Distant Metastasis (M): M0, M1, M1a, M1b</td></tr><tr><td>NE</td><td colspan=\"2\">Regional lymph nodes examined</td><td>Any numeric values</td></tr><tr><td>PN</td><td colspan=\"2\">Regional lymph nodes positive</td><td>Any numeric values</td></tr><tr><td>TS</td><td>Size of tumor</td><td/><td>Any numeric values</td></tr><tr><td>H</td><td>Histology</td><td/><td>Adenocarcinoma</td></tr><tr><td>G</td><td colspan=\"2\">Tumor grade; a measure of how abnormal the cancer cells look under the microscope.</td><td/></tr></table>",
"num": null,
"html": null,
"text": "The nine cancer registry variables defined for this study.",
"type_str": "table"
},
"TABREF3": {
"content": "<table><tr><td/><td/><td>CMUH</td><td/><td/><td>KMUH</td><td/></tr><tr><td>Type</td><td colspan=\"5\">Training Test Total Training Test</td><td>Total</td></tr><tr><td>Histology</td><td>558</td><td>136</td><td>694</td><td>4,517</td><td colspan=\"2\">273 4,790</td></tr><tr><td>Grade</td><td>519</td><td>140</td><td>659</td><td>4,410</td><td colspan=\"2\">265 4,675</td></tr><tr><td>Numbers of examined nodes</td><td>623</td><td>189</td><td>812</td><td>2,021</td><td colspan=\"2\">153 2,174</td></tr><tr><td>Numbers of positive nodes</td><td>554</td><td>177</td><td>731</td><td>2,021</td><td colspan=\"2\">153 2,175</td></tr><tr><td>Staging classification</td><td>670</td><td>161</td><td>831</td><td>1,441</td><td colspan=\"2\">99 1,540</td></tr><tr><td>Pathological T</td><td>400</td><td>122</td><td>522</td><td>1,440</td><td colspan=\"2\">99 1,539</td></tr><tr><td>Pathological N</td><td>380</td><td>122</td><td>502</td><td>898</td><td>61</td><td>959</td></tr><tr><td>Pathological M</td><td>373</td><td>124</td><td>497</td><td>6</td><td>0</td><td>6</td></tr><tr><td>Tumor size</td><td>1,112</td><td colspan=\"2\">383 1,495</td><td>1,606</td><td colspan=\"2\">115 1,721</td></tr><tr><td>Numbers of reports</td><td>293</td><td>100</td><td>393</td><td>1,515</td><td colspan=\"2\">100 1,615</td></tr><tr><td>Numbers of sentences</td><td colspan=\"3\">21 229 5,699 26,928</td><td colspan=\"3\">63,887 3,966 67,853</td></tr><tr><td>Numbers of sentences with annotations</td><td>2,348</td><td colspan=\"2\">596 2,944</td><td>9,007</td><td colspan=\"2\">578 9,585</td></tr><tr><td>Numbers of annotations</td><td colspan=\"3\">5,189 1,554 6,743</td><td colspan=\"3\">18,360 1,218 19,578</td></tr><tr><td/><td/><td>0.95</td><td/><td/><td/><td/></tr><tr><td/><td/><td>0.93</td><td/><td/><td/><td/></tr><tr><td/><td/><td>0.91</td><td/><td/><td/><td/></tr><tr><td/><td/><td>0.89</td><td/><td/><td/><td/></tr><tr><td/><td/><td>0.87</td><td/><td/><td/><td/></tr><tr><td/><td/><td>0.85</td><td/><td/><td/><td/></tr><tr><td/><td/><td>0.83</td><td/><td/><td/><td/></tr><tr><td/><td/><td>0.81</td><td/><td/><td/><td/></tr><tr><td/><td/><td>0.79</td><td/><td/><td/><td/></tr><tr><td/><td/><td>0.77</td><td/><td/><td/><td/></tr><tr><td/><td/><td>20%</td><td>40%</td><td>60%</td><td>80%</td><td>100%</td></tr><tr><td/><td/><td/><td>Non-transfer</td><td/><td>FC1</td><td/></tr><tr><td/><td/><td/><td>BiLSTM</td><td/><td>FC2</td><td/></tr><tr><td/><td/><td/><td>CRF</td><td/><td colspan=\"2\">Non-transfer-CRF</td></tr><tr><td/><td/><td colspan=\"5\">Figure 2: Impact of F-score by fine-tuning the</td></tr><tr><td/><td/><td colspan=\"5\">models with the parameters up to each layer</td></tr><tr><td/><td/><td colspan=\"5\">pre-trained on KMU on the varied sizes of the</td></tr><tr><td/><td/><td colspan=\"2\">CMU training set.</td><td/><td/><td/></tr></table>",
"num": null,
"html": null,
"text": "Corpus statistics for the compiled corpora used in this work.",
"type_str": "table"
},
"TABREF4": {
"content": "<table><tr><td/><td/><td>CMUH</td><td/><td/><td>KMUH</td><td/></tr><tr><td>Type</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td></tr><tr><td colspan=\"7\">Dictionary-based 0.71 0.51 0.59 0.61 0.48 0.54</td></tr><tr><td>SVM</td><td colspan=\"3\">0.69 0.42 0.53</td><td colspan=\"3\">0.8 0.55 0.65</td></tr><tr><td>CRF</td><td colspan=\"6\">0.950 0.790 0.863 0.967 0.983 0.975</td></tr><tr><td>BiLSTM</td><td colspan=\"6\">0.823 0.638 0.719 0.975 0.975 0.975</td></tr><tr><td>BiLSTM-CRF</td><td colspan=\"6\">0.943 0.908 0.925 0.977 0.975 0.976</td></tr></table>",
"num": null,
"html": null,
"text": "Performance comparison among different approaches.",
"type_str": "table"
},
"TABREF5": {
"content": "<table><tr><td>Type</td><td>P</td><td>CMUH R</td><td>F</td></tr><tr><td>G</td><td>0.843</td><td>0.879</td><td>0.860</td></tr><tr><td>H</td><td>0.810</td><td>0.875</td><td>0.841</td></tr><tr><td>NE</td><td>0.973</td><td>0.968</td><td>0.971</td></tr><tr><td>PN</td><td>0.994</td><td>0.938</td><td>0.965</td></tr><tr><td>SC</td><td>0.946</td><td>0.988</td><td>0.967</td></tr><tr><td>TS</td><td>0.969</td><td>0.812</td><td>0.884</td></tr><tr><td>T</td><td>1.000</td><td>0.992</td><td>0.996</td></tr><tr><td>N</td><td>0.918</td><td>0.918</td><td>0.918</td></tr><tr><td>M</td><td>1.000</td><td>0.944</td><td>0.971</td></tr><tr><td>Type</td><td/><td>KMUH</td><td/></tr><tr><td>G</td><td>0.996</td><td>0.996</td><td>0.996</td></tr><tr><td>H</td><td>0.968</td><td>0.985</td><td>0.976</td></tr><tr><td>NE</td><td>1.000</td><td>0.961</td><td>0.980</td></tr><tr><td>PN</td><td>0.981</td><td>1.000</td><td>0.990</td></tr><tr><td>SC</td><td>0.970</td><td>0.990</td><td>0.980</td></tr><tr><td>TS</td><td>0.991</td><td>0.913</td><td>0.950</td></tr><tr><td>T</td><td>0.921</td><td>0.939</td><td>0.930</td></tr><tr><td>N</td><td>0.952</td><td>0.967</td><td>0.959</td></tr><tr><td>M</td><td/><td/><td/></tr></table>",
"num": null,
"html": null,
"text": "Detail precision, recall and F-score for each cancer registry item of the BiLSTM-CRF model.",
"type_str": "table"
},
"TABREF6": {
"content": "<table><tr><td/><td/><td>CMUH</td><td/><td/><td>KMUH</td><td/></tr><tr><td>Method</td><td>P</td><td>R</td><td>F</td><td>P</td><td>R</td><td>F</td></tr><tr><td>CRF</td><td colspan=\"6\">0.737 0.242 0.364 0.663 0.314 0.426</td></tr><tr><td>BiLSTM-CRF</td><td colspan=\"6\">0.631 0.376 0.472 0.925 0.483 0.634</td></tr><tr><td colspan=\"7\">Transferred BiLSTM-CRF 0.944 0.938 0.941 0.932 0.644 0.762</td></tr></table>",
"num": null,
"html": null,
"text": "Cross-corpus evaluation among different approaches.",
"type_str": "table"
}
}
}
}