|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:27:07.922643Z" |
|
}, |
|
"title": "Extracting Semantic Aspects for Structured Representation of Clinical Trial Eligibility Criteria", |
|
"authors": [ |
|
{ |
|
"first": "Ishani", |
|
"middle": [], |
|
"last": "Mondal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Research Labs", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Tirthankar", |
|
"middle": [], |
|
"last": "Dasgupta", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "TCS Research and Innovation Labs", |
|
"institution": "", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Abir", |
|
"middle": [], |
|
"last": "Naskar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "TCS Research and Innovation Labs", |
|
"institution": "", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sudeshna", |
|
"middle": [], |
|
"last": "Jana", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "TCS Research and Innovation Labs", |
|
"institution": "", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Lipika", |
|
"middle": [], |
|
"last": "Dey", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "lipika.dey]@tcs.com" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Eligibility criteria in the clinical trials specify the characteristics that a patient must or must not possess in order to be treated according to a standard clinical care guideline. As the process of manual eligibility determination is time-consuming, automatic structuring of the eligibility criteria into various semantic categories or aspects is the need of the hour. Existing methods use hand-crafted rules and feature-based statistical machine learning methods to dynamically induce semantic aspects. However, in order to deal with paucity of aspect-annotated clinical trials data, we propose a novel weakly-supervised co-training based method which can exploit a large pool of unlabeled criteria sentences to augment the limited supervised training data, and consequently enhance the performance. Experiments with 0.2M criteria sentences show that the proposed approach outperforms the competitive supervised baselines by 12% in terms of micro-averaged F1 score for all the aspects. Probing deeper into analysis, we observe domain-specific information boosts up the performance by a significant margin.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Eligibility criteria in the clinical trials specify the characteristics that a patient must or must not possess in order to be treated according to a standard clinical care guideline. As the process of manual eligibility determination is time-consuming, automatic structuring of the eligibility criteria into various semantic categories or aspects is the need of the hour. Existing methods use hand-crafted rules and feature-based statistical machine learning methods to dynamically induce semantic aspects. However, in order to deal with paucity of aspect-annotated clinical trials data, we propose a novel weakly-supervised co-training based method which can exploit a large pool of unlabeled criteria sentences to augment the limited supervised training data, and consequently enhance the performance. Experiments with 0.2M criteria sentences show that the proposed approach outperforms the competitive supervised baselines by 12% in terms of micro-averaged F1 score for all the aspects. Probing deeper into analysis, we observe domain-specific information boosts up the performance by a significant margin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Clinical trials (CTs) are research studies that are aimed at evaluating a medical, surgical, or behavioral intervention (Embi et al., 2008) , (Shivade et al., 2015) . Through such trials, researchers aim to find out whether a new treatment, like a new drug or diet or medical device is more effective than the existing treatments for a particular ailment. From an organization's perspective, a successful completion of a trial depends on achieving a significant sample size of patients enrolled for the trial within a limited time period. Total bilirubin less than or equal to 1.5 mg/dl, except in patients with history of anaemia. Have had their ileostomy or colostomy for at least 3 months. Subjects must be between the age of 18-65 yr old and must not intake alcohol.", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 139, |
|
"text": "(Embi et al., 2008)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 164, |
|
"text": "(Shivade et al., 2015)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Categories of Semantic Aspects are represented using the colors: Health Status ; Lab Test ; Demography ; Life Style ; Treatment Status However, recruiting enough number of eligible patients to participate in a trial can be a bottleneck. If suitable patients are not found then the trials might get cancelled or delayed significantly. In this case a patient queries the sites like clinicaltrial.gov to retrieve suitable trials. Due to the complexity of the task which involves repeated reading of the patient's Electronic Health Record (EHR) and the trial criteria for multiple trials, this is not only a labor-intensive and time-consuming task but also prone to human errors. In addition to this, the eligibility criteria often uses complex language structures and medical jargons mentioned in either semi-structured or unstructured way.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous works (Koopman and Zuccon, 2016) have formulated the problem of retrieving relevant document collection based on patient query. However, we demonstrate an approach in which the primary eligibility aspects are identified initially for further screening of the patients in terms of inclusion or exclusion strategy, which is the first step towards matching patients with the relevant trials.", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 41, |
|
"text": "(Koopman and Zuccon, 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose an effective method which automatically identifies and segregates the clinical trial eligibility criteria into five semantic aspects. Also, the criteria texts speak volume about multiple aspects of the patients that includes demographic information, health status, treatment history, laboratory test reports and life-style. However, there has been a dearth of annotated crite-ria. Since, prior methods on neural clinical entity recognition models rely on the presence of a large annotated corpora and due to the high cost associated with manual tagging of semantic aspects and limited availability of labeled datasets (Najafabadi et al., 2015) , it is difficult to train a deep neural network effectively for such a task. We attempt to combat this difficulty by proposing a novel semi-supervised method based on deep co-training (Blum and Mitchell, 1998) which can harness a large pool of unlabeled clinical trial criteria that are more economical to collect. To the best of our knowledge, we are the first to introduce such a co-training-based method and demonstrate its effectiveness in aspect categorization of clinical trials in comparison to stand-alone sequence-labelling in isolation. The end-product of our experiments is a clinical trial-register that contain details of the different aspects across conditions and interventions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 644, |
|
"end": 669, |
|
"text": "(Najafabadi et al., 2015)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 855, |
|
"end": 880, |
|
"text": "(Blum and Mitchell, 1998)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Given an eligibility criteria sentence in the form of a word sequence x = (x 1 ...., x n ) , where n is the maximum length of the sequence, the task is to predict an output sequence y = (y 1 , ...., y n ) in which each y i is encoded using standard sequence labeling encoding scheme. Each y i might take one of the following aspects : ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To induce semantic categorization of aspects in the eligibility criteria, we generate a small pool of annotated data by manually examining some of the most frequently used n-gram patterns such as history of, upper limit of normal, treated by, Allergy to as specified in (Luo et al., 2011) in the initial phase. During pre-processing, we filter out the most frequently occurring n-grams (n=2, n=3, n=4, n=5) present in the criteria of the patients. Secondly, the criteria sentences are also tagged with CliNER Tagger (Boag et al., 2015) for extracting out the diseases and drugs. Further details of data are provided in the supplementary material 1 . After these two steps, finally, the false positives are being removed during manual supervision by four independent domain-expert annotators. These include annotations for each of the different categories. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 270, |
|
"end": 288, |
|
"text": "(Luo et al., 2011)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 516, |
|
"end": 535, |
|
"text": "(Boag et al., 2015)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Annotation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u03b8 BiLSTM\u2212CRF , \u03b8 BiGRU\u2212CRF T 1 , T 2 \u2190 V 1 , V 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Annotation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Initialize the model parameters \u03b8 BiLSTM\u2212CRF , \u03b8 BiGRU\u2212CRF randomly. while (stopping criteria is not met) do C 1 \u2190 Train BiLSTM-CRF on T 1 (minimize the Aspect Loss) C 2 \u2190 Train BiGRU-CRF on T 2 (minimize the Aspect Loss)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Annotation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "for i=1 to |U| do if C 1 .score(U i ) \u2265 \u03c4 then T 2 \u2190 T 2 \u222a U i , U = U U i end if if C 2 .score(U i ) \u2265 \u03c4 then T 1 \u2190 T 1 \u222a U i , U = U U i end if end for end while", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Annotation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "ment. 1500 clinical trial documents from Clinical-Trials.gov 2 are annotated with an average of 16 sentences per document. The manually labelled dataset statistics with the class distributions are specified in the Table 1 . While manually inspecting the co-occurence statistics of different aspects in the same criteria sentence of the manually annotated dataset, we observe that around 30% of the eligibility criteria contains more than one aspect, with 65% containing health, life-style, demography aspects, while the remaining 35% contains demography and treatment. For facilitating further research, we will also provide some sample examples of the annotated corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 221, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Annotation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this work, we experiment with two different methods of aspect extraction. One of the following being the traditional supervised setup of using BiLSTM-CRF/Bi-GRU CRF with input representation optimized using categorical cross-entropy loss (Zhang and Sabuncu, 2018) . The second one being the Co-Training (Blum and Mitchell, 1998) method to extract the semantic aspects which has been outlined in Algorithm 1. The later method uses two conditionally independent feature views of the same dataset illustrated below:", |
|
"cite_spans": [ |
|
{ |
|
"start": 241, |
|
"end": 266, |
|
"text": "(Zhang and Sabuncu, 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 331, |
|
"text": "(Blum and Mitchell, 1998)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "1. Domain-independent: The contextual pretrained language models such as, BERT (De-2 https://clinicaltrials.gov/ vlin et al., 2019) (E1) (or word2vec (Mikolov et al., 2013) trained on GoogleNews Corpus 3 (E2)) embeddings followed by a BiLSTM-CRF (C 1 ) (Huang et al., 2015) feature extractor.", |
|
"cite_spans": [ |
|
{ |
|
"start": 150, |
|
"end": 172, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 273, |
|
"text": "(Huang et al., 2015)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "2. Domain-dependent: Bio-BERT embeddings (Lee et al., 2020) (E3) (or word2vec trained on PubMed 4 ) (E4) followed by BiGRU-CRF (C 2 ) (Lerner et al., 2020) feature extractor.", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 155, |
|
"text": "(Lerner et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "At each step of co-training, the classifiers C 1 and C 2 are trained on respective views of training sets V 1 and V 2 , thereby minimizing the loss function. Each instance from the unlabeled samples (U) is scored using a scoring function computed as follows. First, the current classifier is used to decode the output label distribution for each word in the unlabeled instances. For each word in the output, we choose the output label which has the maximum probability. We compute the score for the sample as the multiplication of the probabilities of each label type for all labeled words in sequence normalized by the total number of words in the sentence. If this confidence score of the sample is greater than some pre-defined threshold \u03c4 , the sample has been added to the training set of the other classifier along with its output labels as generated by the classifier. This is the process of generation of weak labels for each sequence. Due to interchange of training data, both classifiers can learn from mistakes of each other and work in synergy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We implement the model using Pytorch 0.3.0. The two classifiers considered for co-training are C 1 : Bi-LSTM-CRF and C 2 : Bi-GRU-CRF. For both supervised and co-training methods, the training data is divided according to 70-30% trainvalidation split. The two different views of cotraining setup are explained as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Details", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Hyper-parameters for two independent views: We run two experiments based on co-training, one using contextual embeddings (C-CTr) and the other using context-independent embeddings (NC-CTr). The hyper-parameter settings for the two views as required by the co-training method are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Details", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "View 1: For the first view (V 1 ), we use Bi-LSTM-CRF (Huang et al., 2015) with domain-independent word embeddings. We experiment with both a) (NC-CTr) Word2vec embeddings trained on GoogleNews Corpus with dimension 300 b) (C-CTr) pre-trained bert-base (12 layers, 12 attention heads, and 110 million parameters).", |
|
"cite_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 74, |
|
"text": "(Huang et al., 2015)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Details", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "View 2: For the second view (V 2 ), we use Bi-GRU classifier with domain-dependent word embeddings. We experiment with both a) (NC-CTr) Word2vec embeddings trained on PubMed Corpus with dimension 200 b) (C-CTr) contextualized pre-trained Bio-BERT embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Details", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For both the classifiers, the hidden unit dimensions are set to 300. During training, we use Adam (Kingma and Ba, 2015) optimizer with a learning rate of 0.001 and a batch size of 64. For co-training, \u03c4 has been set to 0.5, epoch size to 200, with earlystopping employed based on the performance of validation set. All the results are reported based on the best hyper-parameter settings after an exhaustive grid search over parameter space. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Details", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this section, we have provided a detailed analysis of the various results and findings that we have observed during experimentation. There are various criteria on which we have tried to evaluate our semi-supervised approach. Comparison with the baselines:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The results of the baseline methods are enumerated in Table 2 . We report the results based on exact match of each type of the aspects using F1-score. Following (Luo et al., 2011) , we implement the same (Baseline-2) on our dataset with UMLS (Bodenreider, 2004) feature representation and \"bag-of-words\" (BoW) features, and report results for various aspects. Although (Luo et al., 2011) assumes each criteria sentence essentially belongs to a single aspect, we have done an ablation of Baseline-2 without UMLS features (Baseline-1(1)) and without BoW (Baseline-1(2)). We observe that UMLS feature representation boosts up the performance due to inclusion of domain-specific information. We observed that this work finds resonance with (Chalapathy et al., 2016) in which the corpus uses multiple annotations. Due to availability of their working code, we have experimented with their stand-alone Bi-LSTM-CRF approach, used them as Baseline-2 and report results for each of the first three annotated aspects.", |
|
"cite_spans": [ |
|
{ |
|
"start": 161, |
|
"end": 179, |
|
"text": "(Luo et al., 2011)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 242, |
|
"end": 261, |
|
"text": "(Bodenreider, 2004)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 369, |
|
"end": 387, |
|
"text": "(Luo et al., 2011)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 736, |
|
"end": 761, |
|
"text": "(Chalapathy et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 61, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Analysis", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "For the purpose of fair comparison, we experiment with different ablations of feature extractor and types of input representation (in the supervised setup) and present the results of Macro-averaged F1-score in Table 3 . It has been observed that Bi-LSTM CRF with domain-specific input representation as Bio-BERT outperforms other ablations.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 217, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature ablation on model architecture:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "It is also evident from table 4, when the two independent views consist of contextualized embeddings (C-CTr), the model outperforms the non-contextualized features (NC-CTr) by an average margin of 6% F1-Scores. Also, we compare our best architecture for supervised setup with co-training approach. Given that the co-training model trains each classifier separately on different subsets of the training set, it can be sensitive to the choice of V 1 and V 2 . In order to address this issue, we experiment with repeating the same experimnts with various random sampling of the two training subsets. We observe an average F1-score standard deviation (across multiple sampling) of 0.064 for Health class, 0.091 for Treatment class, 0.116 for Lab-Test Results class, 0.055 for Demography class and 0.008 for Life-style class.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Impact of using co-training:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In figure 1, Macro-F1 score (across all aspects) of the co-trained model has been evaluated based on the values of co-training threshold. The values have been chosen from 0 to 1 at an interval of 0.1, in which the optimum value has been observed as 0.5. The sensitivity of co-training parameters has been shown in figure 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sensitivity of co-training parameters:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Effect of unlabelled data size: Moreover, the results are fairly constant even when the unlabeled data size varies (enumerated in Table 4 ) which demonstrates the robustness of our approach. The contextualized representations when augmented with fair amount of semi-automatically annotated samples outperforms the supervised baseline setup.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 138, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sensitivity of co-training parameters:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, we have proposed a semi-supervised co-training method to tackle the scarcity of annotated data for the semantic clinical aspect extraction. This method augments a limited pool of annotated data with a large number of unlabeled clinical eligibility criteria outperforming pure supervised approaches. To the best of our knowledge, we are the first to provide an effective semi-supervised approach to detect the semantic aspects from clinical eligibility criteria which is a promising direction for further research on automatic linking of the patient Electronic Health Records (EHR) to clinical eligibility criteria with promising performance. As a future work, we aim to propose an end-to-end automatic matching system for patient-based clinical trial eligibility with low-cost data annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://github.com/Ishani-Mondal/Clinical-Trials-Aspect-Extraction", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/mmihaltz/word2vec-GoogleNewsvectors 4 http://bio.nlplab.org/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work has been done by the first author, Ishani, during her internship with the TCS Research Labs, India. Besides, the authors would like to thank the anonymous reviewers for their valuable feedback.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Combining labeled and unlabeled data with co-training", |
|
"authors": [ |
|
{ |
|
"first": "Avrim", |
|
"middle": [], |
|
"last": "Blum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the Eleventh Annual Conference on Computational Learning Theory, COLT' 98", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "92--100", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/279943.279962" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Avrim Blum and Tom Mitchell. 1998. Combining labeled and unlabeled data with co-training. In Proceedings of the Eleventh Annual Conference on Computational Learning Theory, COLT' 98, page 92-100, New York, NY, USA. Association for Com- puting Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Cliner : A lightweight tool for clinical named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Boag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Wacome", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Naumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Rumshisky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Boag, Kevin Wacome, Tristan Naumann, and Anna Rumshisky. 2015. Cliner : A lightweight tool for clinical named entity recognition.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "The unified medical language system (umls): integrating biomedical terminology. Nucleic acids research, 32 Database issue", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Bodenreider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "267--70", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "O. Bodenreider. 2004. The unified medical language system (umls): integrating biomedical terminology. Nucleic acids research, 32 Database issue:D267-70.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Bidirectional LSTM-CRF for clinical concept extraction", |
|
"authors": [ |
|
{ |
|
"first": "Raghavendra", |
|
"middle": [], |
|
"last": "Chalapathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ehsan Zare", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimo", |
|
"middle": [], |
|
"last": "Borzeshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Piccardi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Clinical Natural Language Processing Workshop (ClinicalNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raghavendra Chalapathy, Ehsan Zare Borzeshi, and Massimo Piccardi. 2016. Bidirectional LSTM-CRF for clinical concept extraction. In Proceedings of the Clinical Natural Language Processing Work- shop (ClinicalNLP), pages 7-12, Osaka, Japan. The COLING 2016 Organizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Physicians' perceptions of an electronic health record-based clinical trial alert approach to subject recruitment: A survey", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anil", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Embi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C. Martin", |
|
"middle": [], |
|
"last": "Jain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "BMC Medical Informatics and Decision Making", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "13--13", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter J. Embi, Anil K. Jain, and C. Martin Harris. 2008. Physicians' perceptions of an electronic health record-based clinical trial alert approach to subject recruitment: A survey. BMC Medical Infor- matics and Decision Making, 8:13 -13.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Bidirectional lstm-crf models for sequence tagging", |
|
"authors": [ |
|
{ |
|
"first": "Zhiheng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiheng Huang, Wei Xu, and Kai Yu. 2015. Bidirec- tional lstm-crf models for sequence tagging. ArXiv, abs/1508.01991.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. CoRR, abs/1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A test collection for matching patients to clinical trials", |
|
"authors": [ |
|
{ |
|
"first": "Bevan", |
|
"middle": [], |
|
"last": "Koopman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guido", |
|
"middle": [], |
|
"last": "Zuccon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 39th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "669--672", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2911451.2914672" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bevan Koopman and Guido Zuccon. 2016. A test col- lection for matching patients to clinical trials. In Proceedings of the 39th International ACM SIGIR Conference on Research and Development in Infor- mation Retrieval, SIGIR '16, page 669-672, New York, NY, USA. Association for Computing Machin- ery.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Biobert: a pre-trained biomedical language representation model for biomedical text mining", |
|
"authors": [ |
|
{ |
|
"first": "Jinhyuk", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wonjin", |
|
"middle": [], |
|
"last": "Yoon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungdong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunkyu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chan", |
|
"middle": [], |
|
"last": "Ho So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaewoo", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, D. Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2020. Biobert: a pre-trained biomedical language represen- tation model for biomedical text mining. Bioinfor- matics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Terminologies augmented recurrent neural network model for clinical named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Lerner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Paris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Tannier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Journal of biomedical informatics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ivan Lerner, N. Paris, and Xavier Tannier. 2020. Termi- nologies augmented recurrent neural network model for clinical named entity recognition. Journal of biomedical informatics, page 103356.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Dynamic categorization of clinical research eligibility criteria by hierarchical clustering", |
|
"authors": [ |
|
{ |
|
"first": "Zhihui", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meliha", |
|
"middle": [], |
|
"last": "Yetisgen-Yildiz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunhua", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "J. of Biomedical Informatics", |
|
"volume": "44", |
|
"issue": "6", |
|
"pages": "927--935", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.jbi.2011.06.001" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhihui Luo, Meliha Yetisgen-Yildiz, and Chunhua Weng. 2011. Dynamic categorization of clinical re- search eligibility criteria by hierarchical clustering. J. of Biomedical Informatics, 44(6):927-935.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Interrater reliability: the kappa statistic", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mchugh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Biochemia Medica", |
|
"volume": "22", |
|
"issue": "", |
|
"pages": "276--282", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. McHugh. 2012. Interrater reliability: the kappa statistic. Biochemia Medica, 22:276 -282.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 26th International Conference on Neural Information Processing Systems", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Cor- rado, and Jeffrey Dean. 2013. Distributed represen- tations of words and phrases and their composition- ality. In Proceedings of the 26th International Con- ference on Neural Information Processing Systems -Volume 2, NIPS'13, page 3111-3119, Red Hook, NY, USA. Curran Associates Inc.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Deep learning applications and challenges in big data analytics", |
|
"authors": [ |
|
{ |
|
"first": "Maryam", |
|
"middle": [], |
|
"last": "Najafabadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Flavio", |
|
"middle": [], |
|
"last": "Villanustre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taghi", |
|
"middle": [], |
|
"last": "Khoshgoftaar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naeem", |
|
"middle": [], |
|
"last": "Seliya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Randall", |
|
"middle": [], |
|
"last": "Wald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edin", |
|
"middle": [], |
|
"last": "Muharemagic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Journal of Big Data", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1186/s40537-014-0007-7" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maryam Najafabadi, Flavio Villanustre, Taghi Khosh- goftaar, Naeem Seliya, Randall Wald, and Edin Muharemagic. 2015. Deep learning applications and challenges in big data analytics. Journal of Big Data, 2.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Textual inference for eligibility criteria resolution in clinical trials", |
|
"authors": [ |
|
{ |
|
"first": "Chaitanya", |
|
"middle": [], |
|
"last": "Shivade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Courtney", |
|
"middle": [], |
|
"last": "Hebert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcelo", |
|
"middle": [], |
|
"last": "Lopetegui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Catherine", |
|
"middle": [], |
|
"last": "De Marneffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Fosler-Lussier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Albert", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Lai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "J. of Biomedical Informatics", |
|
"volume": "58", |
|
"issue": "S", |
|
"pages": "211--218", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chaitanya Shivade, Courtney Hebert, Marcelo Lopetegui, Marie-Catherine de Marneffe, Eric Fosler-Lussier, and Albert M. Lai. 2015. Tex- tual inference for eligibility criteria resolution in clinical trials. J. of Biomedical Informatics, 58(S):S211-S218.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Generalized cross entropy loss for training deep neural networks with noisy labels", |
|
"authors": [ |
|
{ |
|
"first": "Zhilu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mert", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Sabuncu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 32nd International Conference on Neural Information Processing Systems, NIPS'18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8792--8802", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilu Zhang and Mert R. Sabuncu. 2018. Generalized cross entropy loss for training deep neural networks with noisy labels. In Proceedings of the 32nd Inter- national Conference on Neural Information Process- ing Systems, NIPS'18, page 8792-8802, Red Hook, NY, USA. Curran Associates Inc.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "Health Status (Health): describes the present medical condition like pregnancy status, disease affected, etc. 2. Treatment (Trt): contains information about the intervention, surgery or therapy related information of the patients. 3. Lab-Test (Lab): It deals with the lab-tests or experimental results. 4. Demography(Demo): This class primarily deals with the age, gender related to the patients undergoing clinical trials. 5. Life-Style(Life): This class primarily deals with the information of the patients regarding their daily habits like diet, exercise etc 6. Other: It contains none of the above classes.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "illustrates the overview of semantic aspect extraction.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"text": "Working Pipeline of Semantic Aspect Extraction from Eligibility Criteria using Co-Training.", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"text": "Testing the optimum Co-Training Threshold", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>Algorithm 1</td></tr><tr><td>The mean Cohen's Kappa (McHugh, 2012) was</td></tr><tr><td>0.82, which indicate good inter-annotator agree-</td></tr></table>", |
|
"text": "Aspect Extraction using Co-Training AlgorithmInput U : Large amount of unlabelled criteria sentences, \u03c4 : Co-Training threshold, V 1 , V 2 : Two views of labelled Aspect Annotated Criteria Sentences Output Model Parameters :", |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"2\">Methods Health</td><td>Trt</td><td colspan=\"3\">Lab Demo Life</td></tr><tr><td/><td>F1</td><td>F1</td><td>F1</td><td>F1</td><td>F1</td></tr><tr><td>C 1 +E1</td><td>0.72</td><td colspan=\"2\">0.70 0.65</td><td>0.80</td><td>0.70</td></tr><tr><td>C 1 +E2</td><td>0.68</td><td colspan=\"2\">0.61 0.62</td><td>0.75</td><td>0.67</td></tr><tr><td>C 2 +E3</td><td>0.73</td><td colspan=\"2\">0.70 0.66</td><td>0.81</td><td>0.72</td></tr><tr><td>C 2 +E4</td><td>0.70</td><td colspan=\"2\">0.64 0.63</td><td>0.77</td><td>0.67</td></tr></table>", |
|
"text": "Macro-F1 score for all the aspects using prior methods with some additional features", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Feature ablations on our supervised setup on the train-validation split of our dataset.", |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>: Results showing various co-training method-</td></tr><tr><td>ology with different size of unlabelled instances.</td></tr><tr><td>Trt=Treatment aspect. The scores are reported in the</td></tr><tr><td>table based on exact match F1-score for all aspects.</td></tr></table>", |
|
"text": "", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |