|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T02:10:48.753559Z" |
|
}, |
|
"title": "An Ensemble Model for Automatic Grading of Evidence", |
|
"authors": [ |
|
{ |
|
"first": "Yuting", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Emory University", |
|
"location": { |
|
"postCode": "30322", |
|
"settlement": "Atlanta", |
|
"region": "GA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yao", |
|
"middle": [], |
|
"last": "Ge", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Emory University", |
|
"location": { |
|
"postCode": "30322", |
|
"settlement": "Atlanta", |
|
"region": "GA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ruqi", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Abeed", |
|
"middle": [], |
|
"last": "Sarker", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Emory University", |
|
"location": { |
|
"postCode": "30322", |
|
"settlement": "Atlanta", |
|
"region": "GA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes our approach for the automatic grading of evidence task from the Australasian Language Technology Association (ALTA) Shared Task 2021. We developed two classification models with SVM and RoBERTa and applied an ensemble technique to combine the grades from different classifiers. Our results showed that the SVM model achieved comparable results to the RoBERTa model, and the ensemble system outperformed the individual models on this task. Our system achieved the first place among five teams and obtained 3.3% higher accuracy than the second place.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes our approach for the automatic grading of evidence task from the Australasian Language Technology Association (ALTA) Shared Task 2021. We developed two classification models with SVM and RoBERTa and applied an ensemble technique to combine the grades from different classifiers. Our results showed that the SVM model achieved comparable results to the RoBERTa model, and the ensemble system outperformed the individual models on this task. Our system achieved the first place among five teams and obtained 3.3% higher accuracy than the second place.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Evidence-based medicine (EBM) requires the making of clinical decisions using the current best external evidence rather than solely relying on clinical experience and pathophysiologic rationale (Sackett et al., 1996) . To adhere to EBM best practice, practitioners need to identify the best quality evidence associated with a clinical query. To grade the quality of evidence, Ebell et al. (2004) proposed the Strength of Recommendation Taxonomy (SORT). SORT has a three-levels for rating-A (strong), B (moderate), and C (weak), where A-level is based on high-quality studies with consistent results; Blevel is based on high-quality studies with inconsistent results or some limitations; C-level is based on the studies with severe limitations. It is a straightforward grading system that allows clinical experts to rate individual studies or bodies of evidence based on quantity, quality, and consistency.", |
|
"cite_spans": [ |
|
{ |
|
"start": 194, |
|
"end": 216, |
|
"text": "(Sackett et al., 1996)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 395, |
|
"text": "Ebell et al. (2004)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To address the challenging problem of automatically grading the quality of evidence, the Australasian Language Technology Association (ALTA) Shared Task 2021 organized a competition. The participants were required to develop a system to predict the grade of evidence given multiple related medical publications. Our team trained several supervised classifiers to address the problem. Our approach included traditional supervised classification models such as support vector machines (SVM) (Cortes and Vapnik, 1995) , neural network models using pretrained models (RoBERTa) (Liu et al., 2019) , and an innovative ensemble system which combines the predictions of multiple classifiers. Our results showed that the SVM model achieved comparable results to the RoBERTa model, and the ensemble system outperformed the individual models on this task. The ensemble model combines the prediction from multiple classifiers in a unique manner: grades (A, B or C) predicted by each classifier is first converted into a continuous number, and then all the numbers are added for each instance. Using the training data, the best separations for the numeric totals are computed. These numeric boundaries are then used to convert continuous scores in the test set to discrete evidence grades. Our system achieved the first place among five teams and obtained 3.3% higher accuracy than the second place.", |
|
"cite_spans": [ |
|
{ |
|
"start": 489, |
|
"end": 514, |
|
"text": "(Cortes and Vapnik, 1995)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 573, |
|
"end": 591, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "ALTA Shared Task 2021 is a re-visit of ALTA Shared Task 2011 (Molla and Sarker, 2011) . Previous studies have developed several SVM-based systems for this task. Molla and Sarker (2011) used a sequential approach to combine multiple individual SVM models trained with the features from the titles, body of the abstracts, and publication types. Gyawali et al. (2012) expanded the feature set proposed by Molla and Sarker (2011) with the Medical Subject Headings (MeSH) terms and developed a stacking-based approach to integrate predictions from multiple SVM models. Byczy\u0144ska et al. (2020) experimented with a larger set of features and applied multiple machine learning techniques such as classical machine learning mod-els, neural networks, game theory, and consensus methods. In our work, we trained SVM models on a feature set similar to Byczy\u0144ska et al. (2020) . We also applied a pre-trained transformer-based model named RoBERTa (Liu et al., 2019) , which has achieved state-of-the-art results in a wide range of natural language processing (NLP) tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 85, |
|
"text": "(Molla and Sarker, 2011)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 184, |
|
"text": "Molla and Sarker (2011)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 343, |
|
"end": 364, |
|
"text": "Gyawali et al. (2012)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 425, |
|
"text": "Molla and Sarker (2011)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 564, |
|
"end": 587, |
|
"text": "Byczy\u0144ska et al. (2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 840, |
|
"end": 863, |
|
"text": "Byczy\u0144ska et al. (2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 934, |
|
"end": 952, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The data for this shared task consisted of a set of evidence grades under the SORT criteria and a list of related publications associated with each evidence grade. The publications were obtained from PubMed 1 and were provided in the form of XML files which contained the title, the abstract, and some meta-data (eg., publication types, MeSH terms). Some data statistics are shown in Table 1 . ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 384, |
|
"end": 391, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Train (%) Dev (%) Test (%) A", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We implemented the SVM models with Python 3.7 and the sklearn tool (Pedregosa et al., 2011) . We trained multiple SVM models using different feature sets for each, which included the number of related publications (npmid), journal titles, and other features, as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 91, |
|
"text": "(Pedregosa et al., 2011)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "N-gram Features (n-gram)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The n-gram features were generated from the texts of the titles and the bodies of the abstracts. Because one evidence grade can be based on multiple publications, we combined the titles and the abstracts of all publications to create sequences of titles and abstracts per evidence, respectively. Then, we computed the term frequency-inverse document frequency (TF-IDF) features from the n-grams (n = 1, 2, 3, 4) of the combined sequences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Consistency Features (cons) As mentioned in Ebell et al. (2004) , the consistency of experimental results can affect the evidence strength. Inspired by that, we detected the mentions of consistent results in the body of abstracts by keyword matching. For each evidence, if any of the publications matched the word \"consistent\" or \"consistency\" in the abstract, the consistency feature was set as 1; otherwise it was set to 0.", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 63, |
|
"text": "Ebell et al. (2004)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Publication Types (pubtype) As discussed in Molla and Sarker (2011) and Byczy\u0144ska et al. (2020) , publication types can be a strong indicator of the evidence strength. We extracted the publication type terms tagged as PublicationType in the XML files and assigned a pseudo publication type \"unknown\" to the publications without any Publi-cationType tag. In addition, we used the PubMed tool 2 to retrieve the publication type IDs. We used one-hot encoding to encode the publication type terms and IDs, respectively. Also, we generated a publication type rank according to the level of evidence pyramid in Sarker and Moll\u00e1-Aliod (2010). The rank ranged from 0 to 5, where higher number indicates higher quality.", |
|
"cite_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 67, |
|
"text": "Molla and Sarker (2011)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 72, |
|
"end": 95, |
|
"text": "Byczy\u0144ska et al. (2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "MeSH MeSH terms provide information regarding the topics covered in a publication. We used the PubMed tool to request MeSH term IDs and represented the MeSH feature by one-hot encoding.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Encouraged by the success of the pre-trained transformer-based models in recent years, we developed a classifier using RoBERTa, one of the most popular pre-trained transformer-based models. The classification model architecture was the same as the model in (Liu et al., 2019) . It consists of an encoder, which converted the input text sequence into an embedding vector, and a classification layer with softmax activation, which projected the embedding vector into a class probability vector. The inputs were the abstract texts of the publications associated with each evidence instance. However, if we attached the abstracts into one sequence, the input length often exceeded the maximum sequence length limitation of RoBERTa, which is 512 characters. Therefore, we re-organized the dataset by splitting the evidences involving multiple publications into different instances so that each instance only contained one evidence and one publication, as shown in Figure 1 . During the inference phase, for each evidence, the class probability vectors of multiple publications were averaged, and the class with the highest probability was chosen as the final prediction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 257, |
|
"end": 275, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 959, |
|
"end": 967, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "RoBERTa", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Because the classes A, B and C represent the strength of evidence from strong to weak, we considered the task of grading as a regression problem (rather than a classification problem) and converted the predictions from the classifiers into numbers on a numeric scale. Specifically, we represented the classes A, B, and C as the numbers 0, 1, and 2. For each instance, we computed a numeric score (rather than a discrete category) by adding up the converted predictions from all classifiers. Following this process, we performed grid search to find two thresholds in which the evidences with scores smaller the lower threshold were classified as A, those larger than the higher threshold were classified as C, and those with scores between the lower and upper thresholds were classified as B. Optimal values for the thresholds were based on the training set. In addition, considering the fact that the classifiers with low accuracies may hurt the performance of the ensemble model, we greedily removed the least accurate classifiers to find the classifier set that achieved the best performance on the training/development set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ensemble", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "SVM We trained the SVM models for all possible combinations of the features and experimented with not using class weights and using the empir-ical class weights W A = 1.2, W B = 1.2, and W C = 1.0. In total, we created 127 feature combinations and obtained 254 classification models. For each model, we performed grid search on the development set to find the best configuration for the regularization parameter C \u2208 {1, 2, 4, 6, 8} and the kernel type K \u2208 {\"linear\", \"rbf \"}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "RoBERTa The specific version of RoBERTa we used was RoBERTa-large. According to the preliminary experiments, we set the batch size as 32, the learning rate as 8 \u00d7 10 \u22126 , and the maximum sequence length as 256. The model was trained for 10 epochs with 3 random initialisations. For both SVM and RoBERTa, we tuned the parameters based on the training set and the development set to find the optimal parameters, and we re-trained the model with the optimal parameters on the whole data set (i.e., the combination of the training set and the development set). The reported results of the test set were predicted by the models trained on the whole data set, and those of the development set were predicted by the models trained on the training set. Table 2 shows the results of the best individual SVM model, the RoBERTa model, and the ensemble model on the development set and the test set. For the SVM model, the best feature combination is n-gram+pubtype+npmid. The results show that the performance of the RoBERTa model is comparable to the SVM model, and the ensemble model outperformed the other two models. However, the differences between the three models were not statistically significant according to the 95% confidence intervals. Also, we observed that the performances were considerably lower for the test set compared to the development set. This suggests that the models may overfit on the training/development data because of the small data size.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 745, |
|
"end": 752, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For further error analysis, we plotted the confusion matrix for our best system (ie., the ensemble model), shown in Figure 2 . As we can see, the majority of errors can be attributed to the misclassification of the classes A and C. Most A-level and C-level evidences were predicted as B. This can be another indicator of overfitting because the majority evidences in the training set were graded as B. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 124, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "As illustrated in Table 6 , the RoBERTa model did not outperform the SVM model on this task. This finding is somewhat surprising because many recent studies have shown that pre-trained transformer-based models can achieve the stateof-the-art performance on a wide range of natural language processing tasks (Liu et al., 2019; Devlin et al., 2019; Nguyen et al., 2020; Yang et al., 2019) . A possible explanation for this can be that the most important factor for the evidence strength grading is the publication type and the consistency of the experiments (Ebell et al., 2004) . In our experiments, the input for RoBERTa was only the abstracts, which rarely contained the publication type information. In contrast, in the abstracts, the consistency of the experiments are usually implicitly described by comparing the experimental results which involve numbers. It has been suggested that the pre-trained transformer-based models lack in the ability of effectively representing numbers (Wallace et al., 2019) . Therefore, further studies will need to be undertaken to explore how to incorporate the meta-data information into transformerbased models and how to make such models understand/compare numbers. Although we achieved the top place in this com-petition, some systems described in past publications achieved higher accuracies than our best result (Molla and Sarker, 2011; Gyawali et al., 2012; Byczy\u0144ska et al., 2020) . We noted that all of these systems used the publication type features. Moreover, Byczy\u0144ska et al. (2020) showed that using the single publication type feature achieved 70% accuracy on the test set. However, in our experiments, our model with the single publication type feature only achieved 52% accuracy. We speculate that the cause of the performance gap might be due to the fact that we processed the publication type feature differently compared to the abovementioned publication. In our method, we simply used the publication type terms extracted from the XML files, while Byczy\u0144ska et al. (2020) used a rule-based system to identity the publication types from the titles and the abstracts. Further research is needed to explore effective methods for processing the publication type feature.", |
|
"cite_spans": [ |
|
{ |
|
"start": 307, |
|
"end": 325, |
|
"text": "(Liu et al., 2019;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 346, |
|
"text": "Devlin et al., 2019;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 347, |
|
"end": 367, |
|
"text": "Nguyen et al., 2020;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 386, |
|
"text": "Yang et al., 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 556, |
|
"end": 576, |
|
"text": "(Ebell et al., 2004)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 986, |
|
"end": 1008, |
|
"text": "(Wallace et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1355, |
|
"end": 1379, |
|
"text": "(Molla and Sarker, 2011;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1380, |
|
"end": 1401, |
|
"text": "Gyawali et al., 2012;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1402, |
|
"end": 1425, |
|
"text": "Byczy\u0144ska et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1509, |
|
"end": 1532, |
|
"text": "Byczy\u0144ska et al. (2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2006, |
|
"end": 2029, |
|
"text": "Byczy\u0144ska et al. (2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 25, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://www.ncbi.nlm.nih.gov/pmc/ tools/openftlist/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.ncbi.nlm.nih.gov/pmc/ tools/get-metadata/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Marcin Paprzycki, and Miko\u0142aj Kutka. 2020. Evidence quality estimation using selected machine learning approaches", |
|
"authors": [ |
|
{ |
|
"first": "Aleksandra", |
|
"middle": [], |
|
"last": "Byczy\u0144ska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Ganzha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "2020 Conference on Information Communications Technology and Society (ICTAS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICTAS47918.2020.244042" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aleksandra Byczy\u0144ska, Maria Ganzha, Marcin Paprzy- cki, and Miko\u0142aj Kutka. 2020. Evidence quality esti- mation using selected machine learning approaches. In 2020 Conference on Information Communications Technology and Society (ICTAS), pages 1-8.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Supportvector networks", |
|
"authors": [ |
|
{ |
|
"first": "Corinna", |
|
"middle": [], |
|
"last": "Cortes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vladimir", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Machine learning", |
|
"volume": "20", |
|
"issue": "3", |
|
"pages": "273--297", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Corinna Cortes and Vladimir Vapnik. 1995. Support- vector networks. Machine learning, 20(3):273-297.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. In Proceedings of the 2019 North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Strength of recommendation taxonomy (SORT): a patientcentered approach to grading evidence in the medical literature", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Ebell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Siwek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Woolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Susman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Ewigman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "J Am Board Fam Pract", |
|
"volume": "17", |
|
"issue": "1", |
|
"pages": "59--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. H. Ebell, J. Siwek, B. D. Weiss, S. H. Woolf, J. Sus- man, B. Ewigman, and M. Bowman. 2004. Strength of recommendation taxonomy (SORT): a patient- centered approach to grading evidence in the medical literature. J Am Board Fam Pract, 17(1):59-67.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Grading the quality of medical evidence", |
|
"authors": [ |
|
{ |
|
"first": "Binod", |
|
"middle": [], |
|
"last": "Gyawali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thamar", |
|
"middle": [], |
|
"last": "Solorio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yassine", |
|
"middle": [], |
|
"last": "Benajiba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "BioNLP: Proceedings of the 2012 Workshop on Biomedical Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "176--184", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Binod Gyawali, Thamar Solorio, and Yassine Bena- jiba. 2012. Grading the quality of medical evidence. In BioNLP: Proceedings of the 2012 Workshop on Biomedical Natural Language Processing, pages 176- 184, Montr\u00e9al, Canada. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Automatic grading of evidence: the 2011 ALTA shared task", |
|
"authors": [ |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Molla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abeed", |
|
"middle": [], |
|
"last": "Sarker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Australasian Language Technology Association Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diego Molla and Abeed Sarker. 2011. Automatic grad- ing of evidence: the 2011 ALTA shared task. In Pro- ceedings of the Australasian Language Technology Association Workshop 2011, pages 4-8, Canberra, Australia.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "BERTweet: A Pre-trained Language Model for English Tweets", |
|
"authors": [ |
|
{ |
|
"first": "Thanh", |
|
"middle": [], |
|
"last": "Dat Quoc Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anh", |
|
"middle": [ |
|
"Tuan" |
|
], |
|
"last": "Vu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dat Quoc Nguyen, Thanh Vu, and Anh Tuan Nguyen. 2020. BERTweet: A Pre-trained Language Model for English Tweets. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 9-14.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Scikit-learn: Machine learning in Python", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pedregosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Varoquaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gramfort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Thirion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Grisel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Blondel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Prettenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Dubourg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Vanderplas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Cournapeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Brucher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Perrot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Duchesnay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2825--2830", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Evidence based medicine: what it is and what it isn't", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sackett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M C", |
|
"middle": [], |
|
"last": "William", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J A Muir", |
|
"middle": [], |
|
"last": "Rosenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Gray", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Haynes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Scott Richardson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "BMJ", |
|
"volume": "", |
|
"issue": "7023", |
|
"pages": "71--72", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1136/bmj.312.7023.71" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David L Sackett, William M C Rosenberg, J A Muir Gray, R Brian Haynes, and W Scott Richardson. 1996. Evidence based medicine: what it is and what it isn't. BMJ, 312(7023):71-72.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A rule based approach for automatic identification of publication types of medical papers", |
|
"authors": [ |
|
{ |
|
"first": "Abeed", |
|
"middle": [], |
|
"last": "Sarker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Moll\u00e1-Aliod", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the ADCS Annual Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "84--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abeed Sarker and Diego Moll\u00e1-Aliod. 2010. A rule based approach for automatic identification of publi- cation types of medical papers. In Proceedings of the ADCS Annual Symposium, pages 84-88. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Do NLP models know numbers? probing numeracy in embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yizhong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5307--5315", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1534" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Wallace, Yizhong Wang, Sujian Li, Sameer Singh, and Matt Gardner. 2019. Do NLP models know num- bers? probing numeracy in embeddings. In Proceed- ings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Inter- national Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 5307-5315, Hong Kong, China. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "XLNet: Generalized Autoregressive Pretraining for Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "5753--5763", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. XLNet: Generalized Autoregressive Pre- training for Language Understanding. In H. Wal- lach, H. Larochelle, A. Beygelzimer, F. d'Alch\u00e9-Buc, E. Fox, and R. Garnett, editors, Advances in Neu- ral Information Processing Systems 32, pages 5753- 5763. Curran Associates, Inc.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "An example of the data re-organization process. The first column contains the evidence IDs, the second column contains the SORT grades, and the third column contains the publication IDs.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "The confusion matrix for the result of the ensemble model on the test set.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "The accuracies and 95% confidence intervals (CIs) on the development and test set." |
|
} |
|
} |
|
} |
|
} |