ACL-OCL / Base_JSON /prefixC /json /clinicalnlp /2020.clinicalnlp-1.10.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2020",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T12:27:46.232114Z"
},
"title": "Comparison of Machine Learning Methods for Multi-label Classification of Nursing Education and Licensure Exam Questions",
"authors": [
{
"first": "John",
"middle": [
"T"
],
"last": "Langton",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Wolters Kluwer Health",
"location": {
"addrLine": "230 3rd Avenue",
"postCode": "02451",
"settlement": "Waltham",
"region": "MA"
}
},
"email": "[email protected]"
},
{
"first": "Krishna",
"middle": [],
"last": "Srihasam",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Wolters Kluwer Health",
"location": {
"addrLine": "230 3rd Avenue",
"postCode": "02451",
"settlement": "Waltham",
"region": "MA"
}
},
"email": "[email protected]"
},
{
"first": "Junlin",
"middle": [],
"last": "Jiang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Wolters Kluwer Health",
"location": {
"addrLine": "230 3rd Avenue",
"postCode": "02451",
"settlement": "Waltham",
"region": "MA"
}
},
"email": "[email protected]"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "In this paper, we evaluate several machine learning methods for multi-label classification of text questions. Every nursing student in the United States must pass the National Council Licensure Examination (NCLEX) to begin professional practice. NCLEX defines a number of competencies on which students are evaluated. By labeling test questions with NCLEX competencies, we can score students according to their performance in each competency. This information helps instructors measure how prepared students are for the NCLEX, as well as which competencies they may need help with. A key challenge is that questions may be related to more than one competency. Labeling questions with NCLEX competencies, therefore, equates to a multilabel, text classification problem where each competency is a label. Here we present an evaluation of several methods to support this use case along with a proposed approach. While our work is grounded in the nursing education domain, the methods described here can be used for any multi-label, text classification use case.",
"pdf_parse": {
"paper_id": "2020",
"_pdf_hash": "",
"abstract": [
{
"text": "In this paper, we evaluate several machine learning methods for multi-label classification of text questions. Every nursing student in the United States must pass the National Council Licensure Examination (NCLEX) to begin professional practice. NCLEX defines a number of competencies on which students are evaluated. By labeling test questions with NCLEX competencies, we can score students according to their performance in each competency. This information helps instructors measure how prepared students are for the NCLEX, as well as which competencies they may need help with. A key challenge is that questions may be related to more than one competency. Labeling questions with NCLEX competencies, therefore, equates to a multilabel, text classification problem where each competency is a label. Here we present an evaluation of several methods to support this use case along with a proposed approach. While our work is grounded in the nursing education domain, the methods described here can be used for any multi-label, text classification use case.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "All nurses within the United States must pass the National Council Licensure Examination (NCLEX \u00ae ) to begin professional practice. A nursing curriculum will typically cover a wide range of topics related to the theory and practice of nursing. However, the NCLEX measures students against a specific set of competencies comprising the activities that entry-level nurses are most commonly expected to perform. These activities are identified by the National Council of State Boards of Nursing (NCSBN) through analysis of nursing practice. Figure 1 shows a subset of NCLEX competencies called \"activity statements\" with descriptions. Activity statements are grouped into primary topics and sub-topics, as shown in the image. Nursing education content may be related to one or more competency; they are not mutually exclusive. Passage of the NCLEX has significance not only for students but also for learning institutions. Nursing school accreditation is partially based on how well their student body performs on the NCLEX. If their performance drops below a certain threshold for too many consecutive years, the school risks losing its accreditation. It is, therefore, paramount for instructors to gauge student preparedness for the NCLEX and course correct where necessary. One way to do this is by repeatedly testing students with simulated exams. This approach may reveal that gaps exist, however, it does not necessarily identify what competencies are deficient or what content may address those deficiencies. By labeling both questions and educational content with the competencies that they relate to, instructors can more precisely identify where students are struggling and what content may help with remediation. This approach enables coursework to be tailored for individual students based on their performance in a manner that maximizes likelihood of passing the NCLEX. For instance, if a student incorrectly answers questions related to \"Provide pulmonary hygiene\" (activity statement shown in Figure 1 ), the instructor may assign the student additional content (e.g., simulations and practice problems) related to that competency. This general approach is called formative testing.",
"cite_spans": [],
"ref_spans": [
{
"start": 538,
"end": 546,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 2005,
"end": 2013,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "PrepU is a Wolters Kluwer product for nursing education that features several types of content including books, simulations, videos, audio, and quizzes. To support formative testing in PrepU, quiz questions are tagged with the NCLEX competency related to them. When students take quizzes, their scores can be aggregated according to NCLEX competency. Figure 2 shows an example of the interface displaying this information. The image shows the student achieves a score of 66.7% for the competency \"Prioritize the delivery of client care\". This is calculated based on the student answering 4 out of 6 questions correctly that were labeled with that competency. There is also a tab that shows class performance so that instructors can see if there is a pattern of multiple students struggling with a particular competency. Instructors can use this information to make changes to the curriculum to address problem areas. Corrective actions may include assigning students additional content or practice materials related to the competencies they are struggling with. To aggregate scores according to NCLEX competencies as shown in Figure 2 , each question needs to be labeled according to which competencies it relates to. Prior to our work, editors would manually label questions using the editorial platform shown in Figure 3 . A drop-down menu shows a selection of NCLEX competencies. The editor must scroll through this list, identify which are appropriate, and select them to add them to the question. This process was costly and time-consuming. One challenge is that each question can belong to more than one competency. Further, different editors may have differing opinions as to which competencies a question relates to. Reconciling these differences and maintaining consistency across editors and content is a huge challenge.",
"cite_spans": [],
"ref_spans": [
{
"start": 351,
"end": 359,
"text": "Figure 2",
"ref_id": "FIGREF1"
},
{
"start": 1126,
"end": 1134,
"text": "Figure 2",
"ref_id": "FIGREF1"
},
{
"start": 1314,
"end": 1322,
"text": "Figure 3",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "To streamline the labeling of nursing education questions, we integrated a machine learning model for automated tagging into the current workflow. As editors review each question, the model makes suggestions about which NCLEX competencies are related to that question. Rather than scrolling through a long list of options, editors can rapidly click to accept or reject suggestions (though they still have the ability to scroll through all possibilities if they believe none of the suggestions are applicable). This approach has greatly streamlined the process of labeling questions and added additional consistency in the application of labels. The following sections detail the data involved, the modeling techniques evaluated, and the chosen solution.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this paper, we focus on NCLEX competencies related to what are called \"activity statements\". Activity statements are presented in a hierarchical structure with two levels as shown in Figure 1 . We consider only leaf nodes to simplify the problem. Given this consideration, there are 138 activity statements or labels in total.",
"cite_spans": [],
"ref_spans": [
{
"start": 186,
"end": 195,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "The Data",
"sec_num": "2"
},
{
"text": "41125 questions were manually labeled with one or more of the 138 possible activity statements related to them. This data was used for both training and testing of our machine learning models. The distribution of questions across activity statements was non-uniform and presented a class imbalance challenge. The majority of activity statements were assigned to 5 or fewer questions. However, there was a small set of activity statements that were commonly used, and two that were associated with nearly 3000 questions. The distribution of questions to activity statements is shown in Figure 4 . Each bar corresponds to one of the 138 activity statements and its height represents the number of questions assigned to it.",
"cite_spans": [],
"ref_spans": [
{
"start": 585,
"end": 593,
"text": "Figure 4",
"ref_id": "FIGREF3"
}
],
"eq_spans": [],
"section": "The Data",
"sec_num": "2"
},
{
"text": "Less than 100 questions that were assigned more than one activity statement label. However, there was a desire to accommodate multiple activity statements per question for future labeling efforts. Therefore, we maintained an approach using multilabel classification techniques.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Data",
"sec_num": "2"
},
{
"text": "The task of tagging questions with relevant activity statements can be considered a multi-label document classification task where each question is a document. There are several well-known methods for this type of task. Many of them represent a document as a vector of numbers. We can use similarity and/or distance metrics between document vectors to perform several operations such as clustering and classification. A key set of decisions is how to represent documents as vectors, and what distance metrics to use for comparing them. The following sections describe a number of approaches for document vectorization as well as methods for multi-label classification.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Research",
"sec_num": "3"
},
{
"text": "Bag of words approaches for document vectorization are quite common and have been used with a number of different algorithms (Mccallum and Nigam, 2001 ). These approaches use word frequency to determine vector representations for documents and may employ a number of feature selection and normalization techniques (Xu et al., 2009) . One dominant technique is called Term Frequency -Inverse Document Frequency (TF-IDF).",
"cite_spans": [
{
"start": 125,
"end": 150,
"text": "(Mccallum and Nigam, 2001",
"ref_id": "BIBREF8"
},
{
"start": 314,
"end": 331,
"text": "(Xu et al., 2009)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Text Vectorization and Classification",
"sec_num": "3.1"
},
{
"text": "While bag of words methods have proven quite effective, they suffer a number of weaknesses. When paired with algorithms such as na\u00efve Bayes classifiers, there is no consideration of word order, proximity, or co-occurrence within a document. This can be somewhat mitigated using n-gram techniques (i.e. considering n consecutive words as one element in document vectors). Synonyms can also confound bag of word approaches since two or more words may appear as unique elements in a document vector despite being semantically equivalent. For instance, \"water\" and \"H2O\" may show up as distinct vocabulary terms in a TF-IDF vector. When computing the cosine similarity between the vector for a document that discusses \"water\" and one that discusses \"H2O\", the result would inaccurately indicate they were dissimilar.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Text Vectorization and Classification",
"sec_num": "3.1"
},
{
"text": "Word embeddings using neural networks are a more recent and popular method for vectorizing text (Kim, 2014) . Long Short Term Memory (LSTM) and Gated Recurrent Unit (GRU) are recurrent neural network (RNN) models that leverage connections between adjacent nodes in a single layer to better address word order and context. Huang, Xu, and Yu (Huang et al., 2015) compare several ensembles of bidirectional LSTMs and Conditional Random Fields (CRF) for sentence classification. Neural network models have specifically been used for multi-label document classification (Baumel et al., 2017) (Lenc and Kr\u00e1l, 2017) .",
"cite_spans": [
{
"start": 96,
"end": 107,
"text": "(Kim, 2014)",
"ref_id": "BIBREF5"
},
{
"start": 322,
"end": 360,
"text": "Huang, Xu, and Yu (Huang et al., 2015)",
"ref_id": "BIBREF4"
},
{
"start": 565,
"end": 586,
"text": "(Baumel et al., 2017)",
"ref_id": "BIBREF1"
},
{
"start": 587,
"end": 608,
"text": "(Lenc and Kr\u00e1l, 2017)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Text Vectorization and Classification",
"sec_num": "3.1"
},
{
"text": "One of the most recent advances in natural language processing with neural networks is the use of pretrained, deep transformer models such as BERT (Devlin et al., 2018) . BERT has outperformed many competing methods in standard language understanding tasks and has been used specifically for document classification (Adhikari et al., 2019) . There is a great deal of research combining these different approaches for multiple use cases.",
"cite_spans": [
{
"start": 147,
"end": 168,
"text": "(Devlin et al., 2018)",
"ref_id": "BIBREF2"
},
{
"start": 316,
"end": 339,
"text": "(Adhikari et al., 2019)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Text Vectorization and Classification",
"sec_num": "3.1"
},
{
"text": "Multi-label classification refers to a classification problem where each item being classified can belong to more than one class (or label) at the same time. This contrasts with standard classification where each item is assigned to only one class. A trivial example would be classifying geometric shapes where a square could be both a square and a rectangle.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Multi-label Classification",
"sec_num": "3.2"
},
{
"text": "There are a few standard techniques for dealing with multi-label classification. Many transform the problem into a standard classification task. One approach is to train a binary classifier for every label independently. Each classifier is then executed on the same input to predict whether its associated label should be applied (Read et al., 2015) . In this scenario, each classifier only has the knowledge of one label and only makes predictions for membership or non-membership in that label group or class. This strategy is similar to \"one-versusrest\" approaches, however, it often employs techniques more analogous to one class classification or anomaly detection. An extension to this method is to chain multiple binary classifiers together in a sequence. The predictions from one classifier is passed as a feature to the next classifier until a final set of predictions is output. Probabilistic methods can be used to optimize the order of classifiers.",
"cite_spans": [
{
"start": 330,
"end": 349,
"text": "(Read et al., 2015)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Multi-label Classification",
"sec_num": "3.2"
},
{
"text": "Another common approach for multi-label classification is to take the power set of label permutations and treat each as an independent class. This approach transforms the problem into a standard multi-class classification task. Newton et. al. compare a number of methods for such problem transformations (Spola\u00f4R et al., 2013) . For instance, we can transform a set of 3 labels, (A, B, C), into a power set of classes:",
"cite_spans": [
{
"start": 304,
"end": 326,
"text": "(Spola\u00f4R et al., 2013)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Multi-label Classification",
"sec_num": "3.2"
},
{
"text": "{(A), (B)(C), (A, B), (A, C), (B, C)}.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Multi-label Classification",
"sec_num": "3.2"
},
{
"text": "We began our analysis by evaluating how different vectorization techniques and similarity metrics perform at differentiating questions related to one label (i.e., activity statement) from another. The ability to differentiate questions in this manner directly affects the performance of classification and clustering algorithms. The results helped establish a baseline of how much overlap there was between questions in different label groups. It also informed decisions on which vectorization methods and similarity metrics to use with what algorithms for evaluation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluating Vectorization Methods and Similarity Metrics for Clustering and Classification",
"sec_num": "4"
},
{
"text": "To perform this analysis, we leveraged techniques often used in clustering. The nursing education questions were grouped into clusters based on the activity statements they were associated with. This resulted in 138 clusters, one for each of the activity statements. Questions associated with more than one activity statement were included in the groups for each. We experimented with several vectorization methods (techniques for transforming the questions into numeric vectors) as well as similarity metrics for comparing vectors. We converged on using cosine similarity to compare vectors because of its ability to deal with both sparse and dense vectors when normalized. The vectorizations evaluated included the following:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluating Vectorization Methods and Similarity Metrics for Clustering and Classification",
"sec_num": "4"
},
{
"text": "\u2022 term frequency -inverse document frequency (TF-IDF)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluating Vectorization Methods and Similarity Metrics for Clustering and Classification",
"sec_num": "4"
},
{
"text": "\u2022 word embeddings pretrained on google news [ (Mikolov et al., 2013)] \u2022 word embeddings pretrained on PubMed [ (Pyysalo et al., 2013)] \u2022 word embeddings pretrained on PubMed and updated on text content from Wolters Kluwer nursing education",
"cite_spans": [
{
"start": 46,
"end": 69,
"text": "(Mikolov et al., 2013)]",
"ref_id": "BIBREF9"
},
{
"start": 111,
"end": 134,
"text": "(Pyysalo et al., 2013)]",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluating Vectorization Methods and Similarity Metrics for Clustering and Classification",
"sec_num": "4"
},
{
"text": "For each vectorization method, we computed the silhouette score across our manually constructed clusters. The silhouette score measures the similarity of questions within a cluster (cohesion) versus the dissimilarity of questions in one cluster as compared to those in other clusters (separation). Higher silhouette scores indicate better cohesion within clusters and separation between clusters. In clustering, this measure can help inform the number of clusters to use. For our analysis, we were more interested in what vectorization methods achieved better separation of questions assigned to different NCLEX labels. The vectorizations achieving the best silhouette scores could be expected to perform better in classification tasks. We therefore controlled the number of clusters to the number of activity statements, i.e. 138. Table 1 shows the different vectorization methods evaluated along with their respective silhouette scores. We also include metrics based on the cohesion component of the silhouette score. Specifically, the binary relevance scores measure the distance between question vectors that are all tagged with the same label. The table reports the mean, minimum, maximum, and standard deviation of binary relevance scores across all 138 label clusters. Figure 5 shows the silhouette scores for every pair of activity statement clusters using TF-IDF vectorization of questions. TF-IDF resulted in the highest silhouette score of -.02. However, the scores for all vectorization methods were relatively low. This result indicated two things 1) there is a great degree of similarity between questions assigned to different activity statement labels, and 2) no vectorization method performed much better than the others. This result indicated that algorithms may need further grouping and sampling of questions to better differentiate them during classification.",
"cite_spans": [],
"ref_spans": [
{
"start": 832,
"end": 839,
"text": "Table 1",
"ref_id": null
},
{
"start": 1276,
"end": 1284,
"text": "Figure 5",
"ref_id": "FIGREF4"
}
],
"eq_spans": [],
"section": "Evaluating Vectorization Methods and Similarity Metrics for Clustering and Classification",
"sec_num": "4"
},
{
"text": "We hypothesized that ignoring the current labels and clustering questions may achieve better separation for classification algorithms. To evaluate this hypothesis, we performed a standard clustering of questions using the various vectorization methods. Normally we would optimize the number of clusters based on the silhouette score or other related metrics. However, in the interest of time, we used a fixed number of 512 clusters. This number was estimated from the number of questions and their distribution across activity statements. The resulting silhouette scores improved by .02 on average but did not reflect a significant change. On average, each cluster contained questions from five different activity statement label groups. This result motivated some of the modeling experiments described in the following section.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluating Vectorization Methods and Similarity Metrics for Clustering and Classification",
"sec_num": "4"
},
{
"text": "We use micro-averaged area under the receiver operating characteristic (AUC-ROC) to compare multiple algorithms for the described use case. This metric is able to address class imbalance and is used canonically for benchmarking models (Harutyunyan et al., 2019) . Note that the metrics reported here only reference the AUC score of first label predicted. In production, the top five labels with the highest confidence values are shown to users and results in an accuracy of 95% in predicting all relevant labels. This is discussed further in Section 5.6. Nonetheless, the initial AUC score of the first prediction was a good benchmark for comparing models. The following sections provide details on each algorithm evaluated.",
"cite_spans": [
{
"start": 235,
"end": 261,
"text": "(Harutyunyan et al., 2019)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Modeling",
"sec_num": "5"
},
{
"text": "The first model employed a variant of TF-IDF vectorization referred to as Term Frequency -Inverse Label Frequency (TF-ILF). The primary difference is in what is regarded as a \"document\". Instead of individual questions being treated as a document, questions are grouped according to their labels and then the group is treated as a document. This document specification results in a slight difference in how document vectors are normalized. The vocabulary for the vectorization included the use of bi-grams and tri-grams (i.e., 2 and 3 word sequences). After eliminating stop words (e.g., \"a\", \"and\", \"the\"), stemming, and performing synonym replacement, the total vocabulary was constrained to 30,000 features. Specifically, we kept only the 30,000 features with the highest TF-ILF values.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "One versus Rest Support Vector Machines (SVM) with TF-IDF Vectorization",
"sec_num": "5.1"
},
{
"text": "All questions were first vectorized. Each vector was then labeled according to the manually assigned labels using the LabelEncoder python class from the SciKit Learn package (Pedregosa et al., 2011) . To address the multi-label issue (each question could have more than one activity statement label), we employed a one-versus-rest approach using support vector machines. A binary classifier was trained for each activity statement label with SciKit Learn's LinearSVC algorithm. This approach resulted in 138 models. The hyperparameters for the algorithm included an L2 penalty and ran with 1000 maximum iterations. Models were evaluated using cross-validation with the Cal-ibratedClassifierCV python class. Cross-validation provides a more robust evaluation and can reveal variability between multiple executions of the algorithm.",
"cite_spans": [
{
"start": 174,
"end": 198,
"text": "(Pedregosa et al., 2011)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "One versus Rest Support Vector Machines (SVM) with TF-IDF Vectorization",
"sec_num": "5.1"
},
{
"text": "Given a question as input, each classifier would predict whether a single activity statement label should be assigned to the question, without regard to any other labels. We computed confidence thresholds for each classifier as to whether to accept its prediction or not. Thresholds were established using evaluation metrics such as recall and precision. Questions were then fed into each classifier and any label predictions that met the required thresholds were assigned. In this manner, questions could be assigned more than one label provided more than one model prediction met the required threshold.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "One versus Rest Support Vector Machines (SVM) with TF-IDF Vectorization",
"sec_num": "5.1"
},
{
"text": "The micro-averaged AUC-ROC of the SVM model was .968.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "One versus Rest Support Vector Machines (SVM) with TF-IDF Vectorization",
"sec_num": "5.1"
},
{
"text": "A convolutional neural network (CNN) model was trained using a Keras tokenizer and word embeddings pretrained on articles from PubMed [ (Pyysalo et al., 2013) ]. LabelEncoder was used again to encode question labels. The model used a softmax activation function and categorical crossentropy for the loss function. The output of the model was structured as per-label probabilities between 0 and 1. The softmax output enabled more than one label to have a non-zero probability for a given question input therefore addressed the multilabel problem. Details of the network architecture are shown in Figure 6 . The micro-averaged AUC-ROC of the CNN model was .972.",
"cite_spans": [
{
"start": 136,
"end": 158,
"text": "(Pyysalo et al., 2013)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [
{
"start": 595,
"end": 603,
"text": "Figure 6",
"ref_id": "FIGREF5"
}
],
"eq_spans": [],
"section": "Convolutional Neural Network",
"sec_num": "5.2"
},
{
"text": "A bidirectional LSTM was trained using many of the same parameters as the CNN including a softmax activation function, categorical cross-entropy loss function, and word embeddings pretrained on PubMed. The important difference in this neural network is that layer nodes were connected in a sequential manner, both forward and backwards. Attention was also used to bias more important weights in the network architecture. Details of the network architecture are shown in Figure 7 .",
"cite_spans": [],
"ref_spans": [
{
"start": 470,
"end": 478,
"text": "Figure 7",
"ref_id": "FIGREF6"
}
],
"eq_spans": [],
"section": "Bidirectional LSTM",
"sec_num": "5.3"
},
{
"text": "The micro-averaged AUC-ROC of the bidirectional LSTM model was .940.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Bidirectional LSTM",
"sec_num": "5.3"
},
{
"text": "All models had similar AUC metrics. We hypothesized that different models may perform well on different subsets of labels. If this were true, it would be possible to combine the models in an ensemble to increase overall performance across all labels. We trained an ensemble classifier to evaluate this hypothesis. A random forest model was trained on the outputs of the previously described models to weight the predictions of each and make a final prediction. Questions were first vectorized and input to each component model (i.e., the SVMs, CNN, and LSTM). The output probabilities of each model was then fed into the random forest. Specifically, the inputs of the random forest were 414 values between 0 and 1 consisting of:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Random Forrest Ensemble of SVM, CNN, and LSTM Models",
"sec_num": "5.4"
},
{
"text": "\u2022 a probability from each of 138 binary SVMs",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Random Forrest Ensemble of SVM, CNN, and LSTM Models",
"sec_num": "5.4"
},
{
"text": "\u2022 a probability for each of 138 output nodes of the CNN",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Random Forrest Ensemble of SVM, CNN, and LSTM Models",
"sec_num": "5.4"
},
{
"text": "\u2022 a probability for each of 138 output nodes of the LSTM",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Random Forrest Ensemble of SVM, CNN, and LSTM Models",
"sec_num": "5.4"
},
{
"text": "The output of the random forest was a binary vector of 138 elements. Each element corresponded to an activity statement label. A value of 1 indicated the input question should have that label and a value of 0 indicated that it should not. The micro-averaged AUC-ROC of the random forest ensemble combining the output of the other models was .937. Table 2 shows the micro-averaged AUC-ROC of the models evaluated. The best performing model was the CNN. However, none of the algorithms performed dramatically different from one another. We believe that several confounding factors in the data were equally challenging for the various methods.",
"cite_spans": [],
"ref_spans": [
{
"start": 347,
"end": 354,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Random Forrest Ensemble of SVM, CNN, and LSTM Models",
"sec_num": "5.4"
},
{
"text": "Class imbalance likely complicated classification attempts and may also indicate other issues with the manual labeling process. Editors, pressed for time, may choose labels that are higher in the drop-down list of the editorial platform. They may also choose labels that are less precise but more general and, therefore, likely to be acceptable. These behaviors could explain why a small set of activity statements labels were associated with thousands of questions whereas the rest of the labels were only associated with a few questions each.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model Comparison and Discussion",
"sec_num": "5.5"
},
{
"text": "We also found that editors sometimes disagree about question labels. To address this issue, there is a manual process for label reconciliation. Senior editors can be consulted to make final decisions where necessary. Editors also pointed out that questions could be assigned far more activity statements than is currently the case. To optimize the adaptive quizzing experience for users, editors limit labeling to one or two labels that best fit the question.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model Comparison and Discussion",
"sec_num": "5.5"
},
{
"text": "The best performing model was the CNN though there was not a significant difference between the methods evaluated. While we limited the time spent on hyper-parameter tuning of the ensemble approach, it was interesting that it fared the worst in our evaluation. The AUC-ROC score enabled us to compare modeling approaches but does not reflect the performance in production. When de- Tags Shown Accuracy Top 5 labels 0.95 Top 3 labels 0.76 Top 1 labels 0.47 Table 3 : TF-ILF+SVM Model Accuracy ployed, the model shows users the top five label predictions. Users can pick any subset of those labels to apply them to the question being reviewed.",
"cite_spans": [],
"ref_spans": [
{
"start": 382,
"end": 471,
"text": "Tags Shown Accuracy Top 5 labels 0.95 Top 3 labels 0.76 Top 1 labels 0.47 Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Final Model Evaluation",
"sec_num": "5.6"
},
{
"text": "To get a sense of accuracy in production, we log how many times we cover all relevant labels in the top N predictions as shown in Table 3 .",
"cite_spans": [],
"ref_spans": [
{
"start": 130,
"end": 137,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Number of",
"sec_num": null
},
{
"text": "We are currently logging editor activity and calculating metrics to perform a thorough impact analysis. Initial estimates show that time spent on labeling questions with NCLEX tags went from a few minutes pre-machine learning to less than one minute after our solution was deployed. There are tens of thousands of questions in Wolters Kluwer products like PrepU and CoursePoint and more content being generated every year. This impact is therefore significant, measuring several hours and potentially up to $100,000 or more savings annually.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Impact Analysis",
"sec_num": "6"
},
{
"text": "Editors have responded very positively and regularly use machine learning label suggestions in their current workflow. That said, it will take some time for them to accept a completely automated process. Perhaps more importantly, subject matter experts have assessed that the consistency and quality of labels assigned to questions increase with the model suggestions. Nursing content editors often apply labels based on their personal understanding of content, which is sometimes subjective. There may also be biases in selecting \"convenient\" labels when having to choose from a lengthy list in a complicated workflow. The predictive model provides consistent label suggestions which in turn results in more consistent labels being assigned.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Impact Analysis",
"sec_num": "6"
},
{
"text": "The class imbalance of this task motivates the potential use of active machine learning. Some labels have only been assigned to a handful of questions. For these labels, we may work with editors to find more exemplar questions or create new ones. These new questions can then be merged with training data and the model retrained to ameliorate ef-fects of class imbalance. In active learning, this process is typically repeated in an iterative process to target problem areas for a model. By selectively labeling new questions and down sampling over represented labels, we can fine tune data for retraining models to improve overall accuracy. Active machine learning has specifically been used for multi-label text classification problems (Yang et al., 2009) .",
"cite_spans": [
{
"start": 738,
"end": 757,
"text": "(Yang et al., 2009)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Future Work",
"sec_num": "7"
},
{
"text": "Another area for further study is the evaluation of more recent, deep, transformer models. Because there is a great deal of semantic similarity between questions, these models may not fare better than more traditional vectorization and classification techniques. We intend to evaluate this hypothesis in future work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Future Work",
"sec_num": "7"
},
{
"text": "There are many different tag sets and taxonomies that can be used to label nursing education content. Tagging both content and questions supports more advanced features such as dynamic remediation and adaptive learning. For instance, when a student answers a question incorrectly, learning software can automatically provide links to learning materials that are related to that topics addressed in that question. We are actively investigating how tagging and organizing content can support various use cases for adaptive learning.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Future Work",
"sec_num": "7"
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "DocBERT: BERT for Document Classification. arXiv e-prints",
"authors": [
{
"first": "Ashutosh",
"middle": [],
"last": "Adhikari",
"suffix": ""
},
{
"first": "Achyudh",
"middle": [],
"last": "Ram",
"suffix": ""
},
{
"first": "Raphael",
"middle": [],
"last": "Tang",
"suffix": ""
},
{
"first": "Jimmy",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1904.08398"
]
},
"num": null,
"urls": [],
"raw_text": "Ashutosh Adhikari, Achyudh Ram, Raphael Tang, and Jimmy Lin. 2019. DocBERT: BERT for Document Classification. arXiv e-prints, page arXiv:1904.08398.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Multi-label classification of patient notes a case study on ICD code assignment",
"authors": [
{
"first": "Tal",
"middle": [],
"last": "Baumel",
"suffix": ""
},
{
"first": "Jumana",
"middle": [],
"last": "Nassour-Kassis",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Elhadad",
"suffix": ""
},
{
"first": "No\u00e9mie",
"middle": [],
"last": "Elhadad",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tal Baumel, Jumana Nassour-Kassis, Michael Elhadad, and No\u00e9mie Elhadad. 2017. Multi-label classifica- tion of patient notes a case study on ICD code as- signment. CoRR, abs/1709.09587.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "BERT: pre-training of deep bidirectional transformers for language understanding",
"authors": [
{
"first": "Jacob",
"middle": [],
"last": "Devlin",
"suffix": ""
},
{
"first": "Ming-Wei",
"middle": [],
"last": "Chang",
"suffix": ""
},
{
"first": "Kenton",
"middle": [],
"last": "Lee",
"suffix": ""
},
{
"first": "Kristina",
"middle": [],
"last": "Toutanova",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: pre-training of deep bidirectional transformers for language under- standing. CoRR, abs/1810.04805.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Multitask learning and benchmarking with clinical time series data",
"authors": [
{
"first": "Hrayr",
"middle": [],
"last": "Harutyunyan",
"suffix": ""
},
{
"first": "Hrant",
"middle": [],
"last": "Khachatrian",
"suffix": ""
},
{
"first": "David",
"middle": [
"C"
],
"last": "Kale",
"suffix": ""
},
{
"first": "Greg",
"middle": [
"Ver"
],
"last": "Steeg",
"suffix": ""
},
{
"first": "Aram",
"middle": [],
"last": "Galstyan",
"suffix": ""
}
],
"year": 2019,
"venue": "Scientific Data",
"volume": "6",
"issue": "1",
"pages": "",
"other_ids": {
"DOI": [
"10.1038/s41597-019-0103-9"
]
},
"num": null,
"urls": [],
"raw_text": "Hrayr Harutyunyan, Hrant Khachatrian, David C. Kale, Greg Ver Steeg, and Aram Galstyan. 2019. Multi- task learning and benchmarking with clinical time series data. Scientific Data, 6(1).",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Bidirectional LSTM-CRF models for sequence tagging",
"authors": [
{
"first": "Zhiheng",
"middle": [],
"last": "Huang",
"suffix": ""
},
{
"first": "Wei",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Kai",
"middle": [],
"last": "Yu",
"suffix": ""
}
],
"year": 2015,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zhiheng Huang, Wei Xu, and Kai Yu. 2015. Bidi- rectional LSTM-CRF models for sequence tagging. CoRR, abs/1508.01991.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Convolutional neural networks for sentence classification",
"authors": [
{
"first": "Yoon",
"middle": [],
"last": "Kim",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
"volume": "",
"issue": "",
"pages": "1746--1751",
"other_ids": {
"DOI": [
"10.3115/v1/D14-1181"
]
},
"num": null,
"urls": [],
"raw_text": "Yoon Kim. 2014. Convolutional neural networks for sentence classification. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1746-1751,",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Association for Computational Linguistics",
"authors": [
{
"first": "Qatar",
"middle": [],
"last": "Doha",
"suffix": ""
}
],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Doha, Qatar. Association for Computational Lin- guistics.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Word embeddings for multi-label document classification",
"authors": [
{
"first": "Ladislav",
"middle": [],
"last": "Lenc",
"suffix": ""
},
{
"first": "Pavel",
"middle": [],
"last": "Kr\u00e1l",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "431--437",
"other_ids": {
"DOI": [
"10.26615/978-954-452-049-6_057"
]
},
"num": null,
"urls": [],
"raw_text": "Ladislav Lenc and Pavel Kr\u00e1l. 2017. Word embed- dings for multi-label document classification. In Proceedings of the International Conference Recent Advances in Natural Language Processing, RANLP 2017, pages 431-437, Varna, Bulgaria. INCOMA Ltd.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "A comparison of event models for naive bayes text classification",
"authors": [
{
"first": "Andrew",
"middle": [],
"last": "Mccallum",
"suffix": ""
},
{
"first": "Kamal",
"middle": [],
"last": "Nigam",
"suffix": ""
}
],
"year": 2001,
"venue": "Work Learn Text Categ",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Andrew Mccallum and Kamal Nigam. 2001. A com- parison of event models for naive bayes text classifi- cation. Work Learn Text Categ, 752.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Linguistic regularities in continuous space word representations",
"authors": [
{
"first": "Tomas",
"middle": [],
"last": "Mikolov",
"suffix": ""
},
{
"first": "Geoffrey",
"middle": [],
"last": "Wen Tau Yih",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Zweig",
"suffix": ""
}
],
"year": 2013,
"venue": "HLT-NAACL",
"volume": "",
"issue": "",
"pages": "746--751",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tomas Mikolov, Wen tau Yih, and Geoffrey Zweig. 2013. Linguistic regularities in continuous space word representations. In HLT-NAACL, pages 746- 751. The Association for Computational Linguistics.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Scikit-learn: Machine learning in Python",
"authors": [
{
"first": "F",
"middle": [],
"last": "Pedregosa",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Varoquaux",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Gramfort",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Michel",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Thirion",
"suffix": ""
},
{
"first": "O",
"middle": [],
"last": "Grisel",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Blondel",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Prettenhofer",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Weiss",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Dubourg",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Vanderplas",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Passos",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Cournapeau",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Brucher",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Perrot",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Duchesnay",
"suffix": ""
}
],
"year": 2011,
"venue": "Journal of Machine Learning Research",
"volume": "12",
"issue": "",
"pages": "2825--2830",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Distributional semantics resources for biomedical text processing",
"authors": [
{
"first": "Sampo",
"middle": [],
"last": "Pyysalo",
"suffix": ""
},
{
"first": "Hans",
"middle": [],
"last": "Ginter",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Moen",
"suffix": ""
},
{
"first": "Sophia",
"middle": [],
"last": "Salakoski",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Ananiadou",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of Languages in Biology and Medicine",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sampo Pyysalo, F Ginter, Hans Moen, T Salakoski, and Sophia Ananiadou. 2013. Distributional semantics resources for biomedical text processing. Proceed- ings of Languages in Biology and Medicine.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Scalable multi-output label prediction: From classifier chains to classifier trellises",
"authors": [
{
"first": "Jesse",
"middle": [],
"last": "Read",
"suffix": ""
},
{
"first": "Luca",
"middle": [],
"last": "Martino",
"suffix": ""
},
{
"first": "Pablo",
"middle": [
"M"
],
"last": "Olmos",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Luengo",
"suffix": ""
}
],
"year": 2015,
"venue": "Pattern Recognition",
"volume": "48",
"issue": "6",
"pages": "2096--2109",
"other_ids": {
"DOI": [
"10.1016/j.patcog.2015.01.004"
]
},
"num": null,
"urls": [],
"raw_text": "Jesse Read, Luca Martino, Pablo M. Olmos, and David Luengo. 2015. Scalable multi-output label predic- tion: From classifier chains to classifier trellises. Pattern Recognition, 48(6):2096 -2109.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "A comparison of multi-label feature selection methods using the problem transformation approach",
"authors": [
{
"first": "Newton",
"middle": [],
"last": "Spola\u00f4r",
"suffix": ""
},
{
"first": "Maria",
"middle": [
"Carolina"
],
"last": "Everton Alvares Cherman",
"suffix": ""
},
{
"first": "Huei Diana",
"middle": [],
"last": "Monard",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Lee",
"suffix": ""
}
],
"year": 2013,
"venue": "Electron. Notes Theor. Comput. Sci",
"volume": "292",
"issue": "",
"pages": "135--151",
"other_ids": {
"DOI": [
"10.1016/j.entcs.2013.02.010"
]
},
"num": null,
"urls": [],
"raw_text": "Newton Spola\u00f4R, Everton Alvares Cherman, Maria Carolina Monard, and Huei Diana Lee. 2013. A comparison of multi-label feature selection methods using the problem transformation approach. Electron. Notes Theor. Comput. Sci., 292:135-151.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Research on topic relevancy of sentences based on hownet semantic computation",
"authors": [
{
"first": "Jinzhong",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Jie",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Xiaoming",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2009,
"venue": "9th International Conference on Hybrid Intelligent Systems (HIS 2009)",
"volume": "",
"issue": "",
"pages": "195--198",
"other_ids": {
"DOI": [
"10.1109/HIS.2009.150"
]
},
"num": null,
"urls": [],
"raw_text": "Jinzhong Xu, Jie Liu, and Xiaoming Liu. 2009. Re- search on topic relevancy of sentences based on hownet semantic computation. In 9th Interna- tional Conference on Hybrid Intelligent Systems (HIS 2009), August 12-14, 2009, Shenyang, China, pages 195-198. IEEE Computer Society.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Effective multi-label active learning for text classification",
"authors": [
{
"first": "Bishan",
"middle": [],
"last": "Yang",
"suffix": ""
},
{
"first": "Jian-Tao",
"middle": [],
"last": "Sun",
"suffix": ""
},
{
"first": "Tengjiao",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Zheng",
"middle": [],
"last": "Chen",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining",
"volume": "",
"issue": "",
"pages": "917--926",
"other_ids": {
"DOI": [
"10.1145/1557019.1557119"
]
},
"num": null,
"urls": [],
"raw_text": "Bishan Yang, Jian-Tao Sun, Tengjiao Wang, and Zheng Chen. 2009. Effective multi-label active learning for text classification. In Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 917-926.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"num": null,
"uris": null,
"type_str": "figure",
"text": "Sample of NCLEX competencies or activity statements."
},
"FIGREF1": {
"num": null,
"uris": null,
"type_str": "figure",
"text": "PrepU screenshot showing quiz results broken down according to NCLEX competencies."
},
"FIGREF2": {
"num": null,
"uris": null,
"type_str": "figure",
"text": "Editorial platform where editors manually tag questions with associated NCLEX competencies."
},
"FIGREF3": {
"num": null,
"uris": null,
"type_str": "figure",
"text": "Sample distributions of number of questions per NCLEX activity statement."
},
"FIGREF4": {
"num": null,
"uris": null,
"type_str": "figure",
"text": "Distribution of pairwise silhouette scores across activity statements."
},
"FIGREF5": {
"num": null,
"uris": null,
"type_str": "figure",
"text": "Architecture of convolutional neural network for multi-label text classification."
},
"FIGREF6": {
"num": null,
"uris": null,
"type_str": "figure",
"text": "Architecture of LSTM recurrent neural network for multi-label text classification."
},
"TABREF2": {
"num": null,
"content": "<table/>",
"html": null,
"type_str": "table",
"text": "AUC of different methods"
}
}
}
}