Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "U15-1006",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T03:10:01.052234Z"
},
"title": "Similarity Metrics for Clustering PubMed Abstracts for Evidence Based Medicine",
"authors": [
{
"first": "Hamed",
"middle": [],
"last": "Hassanzadeh",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "The University of Queensland",
"location": {
"settlement": "Brisbane",
"region": "QLD",
"country": "Australia"
}
},
"email": "[email protected]"
},
{
"first": "Diego",
"middle": [],
"last": "Moll\u00e1",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Macquarie University",
"location": {
"settlement": "Sydney",
"region": "NSW",
"country": "Australia"
}
},
"email": ""
},
{
"first": "Tudor",
"middle": [],
"last": "Groza",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Garvan Institute of Medical Research",
"location": {
"settlement": "Darlinghurst",
"region": "NSW",
"country": "Australia"
}
},
"email": "[email protected]"
},
{
"first": "Anthony",
"middle": [],
"last": "Nguyen",
"suffix": "",
"affiliation": {},
"email": "[email protected]"
},
{
"first": "Jane",
"middle": [
"2015"
],
"last": "Hunter",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "The University of Queensland",
"location": {
"settlement": "Brisbane",
"region": "QLD",
"country": "Australia"
}
},
"email": ""
},
{
"first": "",
"middle": [],
"last": "Similarity Metrics",
"suffix": "",
"affiliation": {},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "We present a clustering approach for documents returned by a PubMed search, which enable the organisation of evidence underpinning clinical recommendations for Evidence Based Medicine. Our approach uses a combination of document similarity metrics, which are fed to an agglomerative hierarchical clusterer. These metrics quantify the similarity of published abstracts from syntactic, semantic, and statistical perspectives. Several evaluations have been performed, including: an evaluation that uses ideal documents as selected and clustered by clinical experts; a method that maps the output of PubMed to the ideal clusters annotated by the experts; and an alternative evaluation that uses the manual clustering of abstracts. The results of using our similarity metrics approach shows an improvement over K-means and hierarchical clustering methods using TF-IDF.",
"pdf_parse": {
"paper_id": "U15-1006",
"_pdf_hash": "",
"abstract": [
{
"text": "We present a clustering approach for documents returned by a PubMed search, which enable the organisation of evidence underpinning clinical recommendations for Evidence Based Medicine. Our approach uses a combination of document similarity metrics, which are fed to an agglomerative hierarchical clusterer. These metrics quantify the similarity of published abstracts from syntactic, semantic, and statistical perspectives. Several evaluations have been performed, including: an evaluation that uses ideal documents as selected and clustered by clinical experts; a method that maps the output of PubMed to the ideal clusters annotated by the experts; and an alternative evaluation that uses the manual clustering of abstracts. The results of using our similarity metrics approach shows an improvement over K-means and hierarchical clustering methods using TF-IDF.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Evidence Based Medicine (EBM) is about individual patients care and providing the best treatments using the best available evidence. The motivation of EBM is that clinicians would be able to make more judicious decisions if they had access to up-to-date clinical evidence relevant to the case at hand. This evidence can be found in scholarly publications available in repositories such as PubMed 1 . The volume of available publications is enormous and expanding. PubMed repository, for example, indexes over 24 million abstracts. As a result, methods are required to present relevant recommendations to the clinician in a manner that highlights the clinical evidence and its quality.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The EBMSummariser corpus (Moll\u00e1 and Santiago-martinez, 2011 ) is a collection of evidence-based recommendations published in the Clinical Inquiries column of the Journal of Family Practice 2 , together with the abstracts of publications that provide evidence for the recommendations.",
"cite_spans": [
{
"start": 25,
"end": 59,
"text": "(Moll\u00e1 and Santiago-martinez, 2011",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Visual inspection of the EBMSummariser corpus suggests that a combination of information retrieval, clustering and multi-document summarisation would be useful to present the clinical recommendations and the supporting evidence to the clinician. Figure 1 shows the title (question) and abstract (answer) associated with one recommendation (Mounsey and Henry, 2009) of the EBM-Summariser corpus. The figure shows three main recommendations for treatments to hemorrhoids. Each treatment is briefly presented, and the quality of each recommendation is graded (A, B, C) according to the Strength of Recommendation Taxonomy (SORT) (Ebell et al., 2004) . Following the abstract of the three recommendations (not shown in Figure 1 ), the main text provides the details of the main evidence supporting each treatment, together with the references of relevant publications. A reference may be used for recommending several of the treatments listed in the recommendations. Each recommendation is treated in this study as a cluster of references for evaluation purposes, and the corpus therefore contains overlapping clusters.",
"cite_spans": [
{
"start": 339,
"end": 364,
"text": "(Mounsey and Henry, 2009)",
"ref_id": "BIBREF15"
},
{
"start": 626,
"end": 646,
"text": "(Ebell et al., 2004)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [
{
"start": 246,
"end": 254,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 715,
"end": 723,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "It has been observed that a simple K-means clustering approach provides a very strong base-Which treatments work best for Hemorrhoids? Excision is the most effective treatment for thrombosed external hemorrhoids (strength of recommendation [SOR] : B, retrospective studies). For prolapsed internal hemorrhoids, the best definitive treatment is traditional hemorrhoidectomy (SOR: A, systematic reviews). Of nonoperative techniques, rubber band ligation produces the lowest rate of recurrence (SOR: A, systematic reviews). line for non-overlapping clustering of the EBM-Summariser corpus (Shash and Moll\u00e1, 2013; Ekbal et al., 2013) . Past work was based on the clustering of the documents included in the EBMSummariser corpus. But in a more realistic scenario one would need to cluster the output from a search engine. Such output would be expected to produce much noisier data that might not be easy to cluster.",
"cite_spans": [
{
"start": 240,
"end": 245,
"text": "[SOR]",
"ref_id": null
},
{
"start": 586,
"end": 609,
"text": "(Shash and Moll\u00e1, 2013;",
"ref_id": "BIBREF18"
},
{
"start": 610,
"end": 629,
"text": "Ekbal et al., 2013)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In this paper, we cluster documents retrieved from PubMed searches. We propose a hierarchical clustering method that uses custom-defined similarity metrics. We perform a couple of evaluations using the output of PubMed searches and the EBMSummariser corpus. Our results indicate that this method outperforms a K-means baseline for both the EBMSummariser corpus and PubMed's retrieved documents.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The remainder of the paper is structured as follows. Section 2 describes related work. Section 3 provides details of the clustering approach and the evaluation approaches. Section 4 presents the results, and Section 5 concludes this paper.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Document clustering is an unsupervised machine learning task that aims to discover natural groupings of data and has been used for EBM in several studies. Lin and Demner-Fushman (2007) clustered MEDLINE citations based on the occurrence of specific mentions of interventions in the document abstracts. used K-means clustering to group PubMed query search results based on TF-IDF. Ekbal et al. (2013) used genetic algorithms and multi-objective optimisation to cluster the abstracts referred in the EBMSummariser corpus, and in general observed that it was difficult to improve on Shash and Moll\u00e1 (2013)'s K-means baseline, which uses TF-IDF similar to Lin and Demner-Fushman (2007) .",
"cite_spans": [
{
"start": 155,
"end": 184,
"text": "Lin and Demner-Fushman (2007)",
"ref_id": "BIBREF7"
},
{
"start": 380,
"end": 399,
"text": "Ekbal et al. (2013)",
"ref_id": "BIBREF2"
},
{
"start": 652,
"end": 681,
"text": "Lin and Demner-Fushman (2007)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "It can be argued that clustering the abstracts that are cited in the EBMSummariser corpus is easier than clustering those from Pubmed search results, since the documents in the corpus have been curated by experts. As a result, all documents are relevant to the query, and they would probably cluster according to the criteria determined by the expert. However, in a more realistic scenario the documents that need to be clustered are frequently the output of a search engine. Therefore, there might be documents that are not relevant, as well as duplicates and redundant information. An uneven distribution of documents among the clusters may also result.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "There are several approaches to cluster search engine results (Carpineto et al., 2009) . A common approach is to cluster the documents snippets (i.e., the brief summaries appearing in the search results page) instead of the entire documents (Ferragina and Gulli, 2008). Our approach for clustering search engine results is similar to this group of approaches, since we only use the abstract of publications instead of the whole articles. The abstracts of scholarly publications usually contain the key information that is reported in the document. Hence, it can be considered that there is less noise in abstracts compared to the entire document (from a document clustering perspective). A number of clustering approaches can then be employed to generate meaningful clusters of documents from search results (Zamir and Etzioni, 1998; Carpineto et al., 2009) .",
"cite_spans": [
{
"start": 62,
"end": 86,
"text": "(Carpineto et al., 2009)",
"ref_id": "BIBREF0"
},
{
"start": 808,
"end": 833,
"text": "(Zamir and Etzioni, 1998;",
"ref_id": "BIBREF20"
},
{
"start": 834,
"end": 857,
"text": "Carpineto et al., 2009)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "2"
},
{
"text": "In this section we describe an alternative to Kmeans clustering over TF-IDF data. In particular, we devise separate measures of document similarity and apply hierarchical clustering using our custom matrix of similarities.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Materials and Method",
"sec_num": "3"
},
{
"text": "We first introduce the proposed semantic similarity measures for quantifying the similarity of abstracts. We then describe the process of preparing and annotating appropriate data for clustering semantically similar abstracts. Finally, the experimental set up will be explained.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Materials and Method",
"sec_num": "3"
},
{
"text": "Prior to describing the similarity measures, a glossary of the keywords that are used in this section is introduced:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Materials and Method",
"sec_num": "3"
},
{
"text": "Effective words: The words that have noun, verb, and adjective Part of Speech (POS) roles.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Materials and Method",
"sec_num": "3"
},
{
"text": "Effective lemmas: Lemma (canonical form) of effective words of an abstract.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Materials and Method",
"sec_num": "3"
},
{
"text": "Skipped bigrams: The pairs of words which are created by combining two words in an abstract that are located in arbitrary positions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Materials and Method",
"sec_num": "3"
},
{
"text": "In order to be able to group the abstracts which are related to the same answer (recommendation) for a particular question, the semantic similarity of the abstracts was examined. A number of abstractlevel similarity measures were devised to quantify the semantic similarity of a pair of abstracts. Since formulating the similarity of two natural language pieces of text is a complex task, we performed a comprehensive quantification of textual semantic similarity by comparing two abstracts from different perspectives. Each of the proposed similarity measures represents a different view of the similarity of two abstracts, and therefore the sum of all of them represents a combined view of each of these perspectives. The details of these measures can be found below. Note that all the similarity measures have a normalised value between zero (lowest similarity) and one (highest similarity).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quantifying similarity of PubMed abstracts",
"sec_num": "3.1"
},
{
"text": "Word-level similarity: This measure calculates the number of overlapping words in two abstracts which is then normalised by the size of the longer abstract (in terms of the number of all words). The words are compared in their original forms in the abstracts (even if there were multiple occurrences). Equation 1depicts the calculation of Word-level Similarity (WS).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quantifying similarity of PubMed abstracts",
"sec_num": "3.1"
},
{
"text": "W S(A 1 , A 2 ) = w i \u2208A 1 1 if w i is in A 2 0 Otherwise L",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quantifying similarity of PubMed abstracts",
"sec_num": "3.1"
},
{
"text": "(1) where A 1 and A 2 refer to the bags of all words in two given abstracts (including multiple occurrences of words), and L is the size of the longest abstract in the pair.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quantifying similarity of PubMed abstracts",
"sec_num": "3.1"
},
{
"text": "Word's lemma similarity: This measure is calculated similarly to the previous measure, but the lemma of words from a pair of abstracts are compared to each other, instead of their original display forms in the text, using WordNet (Miller, 1995) . For example, for a given pair of words, such as criteria and corpora, their canonical forms (i.e., criterion and corpus, respectively) are looked up in WordNet prior to performing the comparison.",
"cite_spans": [
{
"start": 230,
"end": 244,
"text": "(Miller, 1995)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Quantifying similarity of PubMed abstracts",
"sec_num": "3.1"
},
{
"text": "Set intersection of effective lemmas: The sets of lemmas of effective words of abstract pairs are compared. The number of overlapping words (or the intersection of two sets) is normalised by the size of the smaller abstract. In contrast to the previous measure, only unique effective lemmas participate in the calculation of this measure. This measure is calculated as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quantifying similarity of PubMed abstracts",
"sec_num": "3.1"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "SEL(A 1 , A 2 ) = |A set 1 \u2229 A set 2 | S",
"eq_num": "(2)"
}
],
"section": "Quantifying similarity of PubMed abstracts",
"sec_num": "3.1"
},
{
"text": "In Equation 2, A set 1 and A set 2 are the sets of effective lemmas of two abstracts, and S is the size of the smallest abstract in a pair.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Quantifying similarity of PubMed abstracts",
"sec_num": "3.1"
},
{
"text": "We generate sliding windows of different sizes of words, from a window of two words up to the size of the longest sentence in a pair of abstracts. We compute the number of equal sequences of words of two abstracts (irrespective of length). Also, we keep the size of the longest equal sequence of words that the two abstracts share together. Hence, this results in two similarity measures; (i) the number of shared sequences of different sizes, and (ii) the size of the longest shared sequence. Due to the variety of sizes of sentences / abstracts and therefore varying sizes and number of sequences, we normalise each of these measures to reach a value between zero and one. In addition, following the same rationale, sequence-based measures are calculated by only considering effective words in abstracts, and alternatively, from a grammatical perspective, by only considering POS tags of the constituent words of abstracts. The number of shared sequences (or Shared Sequence Frequency -SSF) for two given abstracts (i.e., A 1 and A 2 ) is calculated as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sequence of words overlap:",
"sec_num": null
},
{
"text": "SSF (A 1 , A 2 ) = M l=2 S l \u2208A 1 1 if S l \u2208 A 2 0 Otherwise N M",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sequence of words overlap:",
"sec_num": null
},
{
"text": "(3) In Equation 3, M is the size of the longest sentence in both abstracts and N is the number of available sequences (i.e., S in formula) with size l.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sequence of words overlap:",
"sec_num": null
},
{
"text": "POS tags sequence alignment: For this similarity measure, a sequence of the POS tags of words in an abstract is generated. The Needleman-Wunsch algorithm (Needleman and Wunsch, 1970) was employed for aligning two sequences of POS tags from a pair of abstracts to find their similarity ratio. The Needleman-Wunsch algorithm is an efficient approach for finding the best alignment between two sequences, and has been successfully applied, in particular in bioinformatics, to measure regions of similarity in DNA, RNA or protein sequences.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sequence of words overlap:",
"sec_num": null
},
{
"text": "Jaccard Similarity: An abstract can be considered as a bag of words. To incorporate this perspective, we calculate the Jaccard similarity coefficient of a pair of abstracts. We also calculate the Jaccard similarity of sets of effective lemmas of abstract pairs. The former similarity measure shows a very precise matching of the occurrences of words in exactly the same form (singular / plural, noun / adjective / adverb, and so on), while the latter measure considers the existence of words in their canonical forms.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sequence of words overlap:",
"sec_num": null
},
{
"text": "Abstract lengths: Comparing two abstracts from a word-level perspective, the relative length of two abstracts in terms of their words (length of smaller abstracts over the longer one) provides a simple measure of similarity. Although this can be considered as a naive attribute of a pair of abstracts, it has been observed that this measure can be useful when combined with other more powerful measures .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sequence of words overlap:",
"sec_num": null
},
{
"text": "Cosine similarity of effective lemmas: In order to calculate the cosine similarity of the effective lemmas of a pair of abstracts, we map the string vector of the sequence of effective lemmas to its corresponding numerical vector. The numerical vector, with the dimension equal to the number of all unique effective lemmas of both abstracts, contains the frequency of occurrences of each lemma in the pair. For example, for the two sequences [A, B, A, C, B] and [C, A, D, B, A] the numerical vectors of the frequencies of the terms A, B, C and D for the sequences are [2, 2, 1, 0] and [2, 1, 1, 1], respectively. Equation (4) depicts the way the cosine similarity is calculated for two given abstracts A 1 and A 2 .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sequence of words overlap:",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "Cosine(A 1 , A 2 ) = V 1 .V 2 ||V 1 ||||V 2 ||",
"eq_num": "(4)"
}
],
"section": "Sequence of words overlap:",
"sec_num": null
},
{
"text": "where V 1 and V 2 are the vector of lemmas of the effective words of two abstracts in a pair, and V 1 .V 2 denotes the dot product of two vectors which is then divided by the product of their norms (i.e. ||V 1 ||||V 2 ||).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Sequence of words overlap:",
"sec_num": null
},
{
"text": "The set of the skipped bigrams of two abstracts can be used as a basis for similarity computation. We create the skipped bigrams of the effective words and then calculate the intersection of each set of these bigrams with the corresponding set from the other abstract in a pair.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Skipped bigram similarities:",
"sec_num": null
},
{
"text": "In order to assign an overall similarity score to any two given abstracts, the (non-weighted) average of all of the metrics listed above is calculated and is considered as the final similarity score. These metrics compare the abstracts from different perspectives, and hence, the combination of all of them results in a comprehensive quantification of the similarity of abstracts. This averaging technique has been shown to provide good estimation of the similarity of sentences when compared to human assessments both in general English and Biomedical domain corpora .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Combining similarities",
"sec_num": "3.2"
},
{
"text": "In order to prepare a realistic testbed, we generated a corpus of PubMed abstracts. The abstracts are retrieved and serialised from the PubMed repository using E-utilities URLs 3 . PubMed is queried by using the 465 medical questions, unmodified, from the EBMSummariser corpus (Moll\u00e1 and Santiago-martinez, 2011) . The maximum number of search results is set to 20,000 (if any) and the results are sorted based on relevance using PubMed's internal relevance criteria. 4 In total, 212,393 abstracts were retrieved and serialised. The distributions of the retrieved abstracts per question were very imbalanced. There are a considerable number of questions with only one or no results from the PubMed search engine (39% of the questions). Figure 2 shows the frequency of the retrieved results and the number of questions with a given number and/or range of search results. Some types of published studies may contain better quality of evidence than others, and some, such as opinion studies, provide very little evidence, if any at all. In addition, it is common to have a large number of search results for a given query. Hence, in order to find EBM-related publications as well as to ensure the quality and higher relevance of the abstracts, the retrieved abstracts were filtered based on their publication types. The types of publications are provided in the metadata returned by the PubMed abstracts. To determine the filters, we performed statistical analysis over available corpora in the EBM domain, in particular, EBMsummariser corpus (includes 2,658 abstracts), NICTA-PIBOSO corpus (includes 1,000 abstracts) (Kim et al., 2011), and our retrieved PubMed documents (includes 212,393 abstracts) -more details about the corpora can be found in Malmasi et al. (2015) . Table 1 shows the frequency of the most frequent publication types in these EBM corpora. There are 72 different types of publications in PubMed 5 , but we limited the retrieved abstracts to the seven more frequently occurring publication types in the EBM domain. Whenever we needed to reduce the number of retrieved abstracts from PubMed search results, we filter the results and only keep the abstracts with the mentioned publication types in Table 1 . Note that each PubMed abstract can have more than one publication type. For example, a \"Clinical Trial\" abstract can also be a \"Case Report\" and so on. Hence, the sum of the percentages in Table 1 may exceed 100%. We assume that all the documents are informative when the number of returned search results is less than 50, and hence, no filtering was applied in these cases.",
"cite_spans": [
{
"start": 277,
"end": 312,
"text": "(Moll\u00e1 and Santiago-martinez, 2011)",
"ref_id": "BIBREF14"
},
{
"start": 468,
"end": 469,
"text": "4",
"ref_id": null
},
{
"start": 1747,
"end": 1768,
"text": "Malmasi et al. (2015)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [
{
"start": 736,
"end": 744,
"text": "Figure 2",
"ref_id": "FIGREF1"
},
{
"start": 1771,
"end": 1778,
"text": "Table 1",
"ref_id": "TABREF0"
},
{
"start": 2215,
"end": 2222,
"text": "Table 1",
"ref_id": "TABREF0"
}
],
"eq_spans": [],
"section": "Data set preparation and evaluation methods",
"sec_num": "3.3"
},
{
"text": "After retrieving the documents, in order to be able to evaluate the automatically-generated clusters of retrieved abstracts we devised two scenarios for generating gold standard clusters: Semantic Similarity Mapping and Manual Clustering.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data set preparation and evaluation methods",
"sec_num": "3.3"
},
{
"text": "We generated the gold standard clusters automatically using the cluster information from the EBMSummariser corpus. The answers for each question is known according to this corpus; each answer forms a cluster and citations associated with that answer are assigned to the respective cluster. In order to extend the gold standard to include all the retrieved PubMed abstracts, each abstract was assigned to one of these clusters. To assign an abstract to a cluster, we compute the similarity between the abstract and each of the cited abstracts for the question. To achieve this, we used our proposed combination of similarity measures. The abstract is assigned to the cluster with the highest average similarity. For example, suppose that for a given question there are three clusters of abstracts from the EBMSummariser corpus. By following this scenario, we assign each of the retrieved documents to one of these three clusters. We first calculate the average similarity of a given retrieved document to the documents in the three clusters. The cluster label (i.e., 1, 2, or 3 in our example) for this given retrieved abstract is then adopted from the cluster with which it has the highest average similarity. This process is iterated to assign cluster labels to all the retrieved abstracts. However, it could occur that some clusters may not have any abstracts assigned to them. For the mentioned example, this will result when the retrieved documents would be assigned only to two of the three clusters. When that happens, the question is ignored to avoid a possible bias due to cluster (Manning et al., 2008) . In this scenario, a subset of the top k retrieved documents is selected for annotation. To select the top k documents we use the above clusters automatically generated by our system. In order to be able to evaluate these automatically generated clusters, for each of them we determine its central document. A document is considered the central document of a cluster if it has the highest average similarity to all other documents in the same cluster. We then select the k documents that are most similar to the central document. The intuition is that if a document is close to the centre of a cluster, it should be a good representation of the cluster and it would less likely be noise. Two annotators (authors of this paper) manually re-clustered the selected top k documents following an annotation guideline. The annotators are not restricted to group the documents to a specific number of clusters (e.g., to the same number of clusters as the EBMSummariser corpus). These manually generated clusters are then used as the gold standard clusters for the Manual Clustering evaluation scenario. The system is then asked to cluster the output of the search engine. Then, the documents from the subset that represents the pool of documents are evaluated against the manually curated clusters. The value of k in our experiment was set to two per cluster. In total, 10 queries (with different numbers of original clusters, from 2 to 5 clusters) were assessed for a total of 62 PubMed abstracts.",
"cite_spans": [
{
"start": 1589,
"end": 1611,
"text": "(Manning et al., 2008)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Semantic Similarity Mapping scenario:",
"sec_num": null
},
{
"text": "We employed a Hierarchical Clustering (HC) algorithm in order to cluster the retrieved abstracts (Manning et al., 2008) . HC methods construct clusters by recursively partitioning the instances in either a top-down or a bottom-up fashion (Maimon and Rokach, 2005) . A hierarchical algorithm, such as Hierarchical Agglomerative Clustering (HAC), can use as input any similarity matrix, and is therefore suitable for our approach in which we calculate the similarity of documents from different perspectives. As a baseline approach, we use K-means clustering (KM) with the same pre-processing as reported by Shash and Moll\u00e1 (2013) , namely we used the whole XML files output by PubMed and removed punctuation and numerical characters. We then calculated the TF-IDF of the abstracts, normalised each TF-IDF vector by dividing it by its Euclidean norm, and applied K-means clustering over this information. We employed the HC and KM implementations available in the R package (R Core Team, 2015).",
"cite_spans": [
{
"start": 97,
"end": 119,
"text": "(Manning et al., 2008)",
"ref_id": "BIBREF12"
},
{
"start": 238,
"end": 263,
"text": "(Maimon and Rokach, 2005)",
"ref_id": "BIBREF10"
},
{
"start": 606,
"end": 628,
"text": "Shash and Moll\u00e1 (2013)",
"ref_id": "BIBREF18"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental setup",
"sec_num": "3.4"
},
{
"text": "We use the Rand Index metric to report the performance of the clustering approaches. Rand Index (RI) is a standard measure for comparing clusterings. It measures the percentage of clustering decisions on pairs of documents that are correct (Manning et al., 2008) . Eq. 5 depicts the calculation of RI.",
"cite_spans": [
{
"start": 240,
"end": 262,
"text": "(Manning et al., 2008)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental setup",
"sec_num": "3.4"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "RI = T P + T N T P + F P + F N + T N",
"eq_num": "(5)"
}
],
"section": "Experimental setup",
"sec_num": "3.4"
},
{
"text": "A true positive (TP) refers to assigning two similar documents to the same cluster, while a true negative (TN) is a decision of assigning two dissimilar documents to different clusters. A false positive (FP) occurs when two dissimilar docu- ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental setup",
"sec_num": "3.4"
},
{
"text": "In this section, the results from applying our similarity metrics in order to cluster abstracts in the EBM domain are presented. We first introduce our experiments on clustering the abstracts from the EBMSummariser corpus and then we report the results over the retrieved abstracts from PubMed.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental Results",
"sec_num": "4"
},
{
"text": "In order to evaluate our clustering approach using our similarity metrics, we first employ the EBM-Summariser corpus. As previously mentioned, this corpus contains a number of clinical inquiries and their answers. In each of these answers, which are provided by medical experts, one or more citations to published works are provided with their PubMed IDs. We apply our clustering approach to group all the citations mentioned for a question and then compare the system generated clusters with those of the human experts. Table 2 shows the results of using Hierarchical Clustering (HC) and K-means clustering (KM) using the proposed similarity measures and TF-IDF information. In order to have a consistent testbed with our experiments over retrieved documents, the reported results of the corpus are over a subset of the available questions of the EBMSummariser corpus, that is, those 129 questions which were found valid for evaluation in the Semantic similarity mapping scenario in Section 3.3. Note the improvement of the Rand Index against the TF-IDF methods, i.e., 0.0775. This difference between HC using our similarity metrics and the next best approach, namely KM clustering using TF-IDF, is statistically significant (Wilcoxon signed rank test with continuity correction; p-value = 0.01092).",
"cite_spans": [],
"ref_spans": [
{
"start": 521,
"end": 528,
"text": "Table 2",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Results on EBMSummariser corpus",
"sec_num": "4.1"
},
{
"text": "Our implementation of KM used 100 random starts. It should also be noted that KM can not be used over our similarity metrics, because the final representation of these metrics are the quantification of the similarity of a pair of documents and not a representation of a single document (i.e., the appropriate input for KM clustering).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results on EBMSummariser corpus",
"sec_num": "4.1"
},
{
"text": "As mentioned in Section 3.3, we devised two methods for evaluating the system's generated clusters: the manual scenario, and the semantic similarity mapping scenario. The results of the clustering approach are reported for these two scenarios in Table 3 and Table 4 , respectively. Table 3 shows the results for the manual evaluation. It reports the comparison of the system's results against the manually clustered abstracts from the two annotators. This evaluation scenario shows that, in most cases, the HC approach that employs our similarity metrics produced the best Rand Index. The only exception occurs over the Annotator 1 clusters, where KM using TF-IDF gained better results (i.e., 0.4038 RI). However, for this exception, it is noticed that this difference between the HC approach that uses our similarity metrics and KM using TF-IDF is not statistically significant (p-value=0.5). Table 3 also shows that the results are similar for two of the three approaches on each annotator, which suggests close agreement among annotators. Note, incidentally, that the annotations were of clusters, and not of labels, and therefore standard inter-annotator agreements like Cohen's Kappa cannot be computed. Table 4 shows the results of the methods by using the semantic similarity mapping evaluation approach. It can be observed that, similar to the manual evaluation scenario, HC clustering with the similarity metrics gained the best Rand Index. Finally, although the absolute values of Rand Index are much higher than that from the manual clustering evaluations, the difference between HC on our similarity metrics and the HC and KM methods on TF-IDF information is not statistically significant (p-value=0.1873).",
"cite_spans": [],
"ref_spans": [
{
"start": 246,
"end": 265,
"text": "Table 3 and Table 4",
"ref_id": "TABREF2"
},
{
"start": 282,
"end": 289,
"text": "Table 3",
"ref_id": "TABREF2"
},
{
"start": 894,
"end": 901,
"text": "Table 3",
"ref_id": "TABREF2"
},
{
"start": 1209,
"end": 1216,
"text": "Table 4",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "Results on PubMed documents",
"sec_num": "4.2"
},
{
"text": "To compare with the results reported in the literature, we computed the weighted mean cluster Entropy for the entire set of 456 questions. Ta- ble 5 shows our results and the results reported by Shash and Moll\u00e1 (2013) . The entropy generated by the HC system using our similarity metrics was a small improvement (lower entropy values are better) on the KM baseline (our replication of K-means using TF-IDF), which is statistically significant (p-value=0.00276). However, we observe that our KM baseline obtains a higher entropy than that reported in Shash and Moll\u00e1 (2013) , even though our replication would have the same settings as their system. Investigation into the reason for the difference is beyond the scope of this paper.",
"cite_spans": [
{
"start": 195,
"end": 217,
"text": "Shash and Moll\u00e1 (2013)",
"ref_id": "BIBREF18"
},
{
"start": 550,
"end": 572,
"text": "Shash and Moll\u00e1 (2013)",
"ref_id": "BIBREF18"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Results on PubMed documents",
"sec_num": "4.2"
},
{
"text": "In this paper we have presented a clustering approach for documents retrieved via a set of PubMed searches. Our approach uses hierarchical clustering with a combination of similarity metrics and it reveals a significant improvement over a Kmeans baseline with TF-IDF reported in the literature (Shash and Moll\u00e1, 2013; Ekbal et al., 2013) . We have also proposed two possible ways to evaluate the clustering of documents retrieved by PubMed. In the semantic similarity mapping evaluation, we automatically mapped each retrieved document to a cluster provided by the corpus. In the manual clustering evaluation, we selected the top k documents and manually clustered them to form the annotated clusters.",
"cite_spans": [
{
"start": 294,
"end": 317,
"text": "(Shash and Moll\u00e1, 2013;",
"ref_id": "BIBREF18"
},
{
"start": 318,
"end": 337,
"text": "Ekbal et al., 2013)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "Our experiments show that using semantic similarity of abstracts can help gain better clusters of related published studies, and hence, can provide an appropriate platform to summarise multiple similar documents. Further research will focus on employing domain-specific concepts in similarity metrics calculation as well as using tailored NLP tools in biomedical domain, such as BioLemmatizer (Liu et al., 2012) . Further investigations can also be performed in order to track the effects and contribution of each of the proposed similarity measures on formulating the abstract similarities, and hence, on their clustering. In addition, in order to have more precise quantification of the similarity of abstracts, their sentences can be firstly classified using EBM related scientific artefact modeling approaches (Hassanzadeh et al., 2014) . Knowing the types of sentences, the similarity measures can then be narrowed to sentence-level metrics by only comparing sentences of the same type. These investigations can be coupled with the exploration of overlapping clustering methods for allowing the inclusion of a document in several clusters.",
"cite_spans": [
{
"start": 393,
"end": 411,
"text": "(Liu et al., 2012)",
"ref_id": "BIBREF9"
},
{
"start": 814,
"end": 840,
"text": "(Hassanzadeh et al., 2014)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "www.ncbi.nlm.nih.gov/pubmed",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "www.ncbi.nlm.nih.gov/books/NBK25497/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "www.nlm.nih.gov/pubs/techbull/so13/ so13_pm_relevance.html 5 www.ncbi.nlm.nih.gov/books/NBK3827/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "This research is funded by the Australian Research Council (ARC) Discovery Early Career Researcher Award (DECRA) -DE120100508. It is also partially supported by CSIRO Postgraduate Studentship and a visit of the first author to Macquarie University.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "A survey of web clustering engines",
"authors": [
{
"first": "Claudio",
"middle": [],
"last": "Carpineto",
"suffix": ""
},
{
"first": "Stanislaw",
"middle": [],
"last": "Osi\u0144ski",
"suffix": ""
},
{
"first": "Giovanni",
"middle": [],
"last": "Romano",
"suffix": ""
},
{
"first": "Dawid",
"middle": [],
"last": "Weiss",
"suffix": ""
}
],
"year": 2009,
"venue": "ACM Comput. Surv",
"volume": "41",
"issue": "3",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Claudio Carpineto, Stanislaw Osi\u0144ski, Giovanni Ro- mano, and Dawid Weiss. 2009. A survey of web clustering engines. ACM Comput. Surv., 41(3):17:1-17:38.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Strength of Recommendation Taxonomy (SORT): a Patient-Centered Approach to Grading Evidence in the Medical Literature",
"authors": [
{
"first": "H",
"middle": [],
"last": "Mark",
"suffix": ""
},
{
"first": "Jay",
"middle": [],
"last": "Ebell",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Siwek",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Barry",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Weiss",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Steven",
"suffix": ""
},
{
"first": "Jeffrey",
"middle": [],
"last": "Woolf",
"suffix": ""
},
{
"first": "Bernard",
"middle": [],
"last": "Susman",
"suffix": ""
},
{
"first": "Marjorie",
"middle": [],
"last": "Ewigman",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Bowman",
"suffix": ""
}
],
"year": 2004,
"venue": "The Journal of the American Board of Family Practice / American Board of Family Practice",
"volume": "17",
"issue": "1",
"pages": "59--67",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mark H Ebell, Jay Siwek, Barry D Weiss, Steven H Woolf, Jeffrey Susman, Bernard Ewigman, and Mar- jorie Bowman. 2004. Strength of Recommendation Taxonomy (SORT): a Patient-Centered Approach to Grading Evidence in the Medical Literature. The Journal of the American Board of Family Practice / American Board of Family Practice, 17(1):59-67.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Multiobjective Optimization for Clustering of Medical Publications",
"authors": [
{
"first": "Asif",
"middle": [],
"last": "Ekbal",
"suffix": ""
},
{
"first": "Sriparna",
"middle": [],
"last": "Saha",
"suffix": ""
},
{
"first": "Diego",
"middle": [],
"last": "Moll\u00e1",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Ravikumar",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings ALTA",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Asif Ekbal, Sriparna Saha, Diego Moll\u00e1, and K. Ravikumar. 2013. Multiobjective Optimization for Clustering of Medical Publications. In Proceed- ings ALTA 2013.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "A personalized search engine based on web-snippet hierarchical clustering. Software: Practice and Experience",
"authors": [
{
"first": "P",
"middle": [],
"last": "Ferragina",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Gulli",
"suffix": ""
}
],
"year": 2008,
"venue": "",
"volume": "38",
"issue": "",
"pages": "189--225",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "P. Ferragina and A. Gulli. 2008. A personalized search engine based on web-snippet hierarchical clustering. Software: Practice and Experience, 38(2):189-225.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Identifying scientific artefacts in biomedical literature: The evidence based medicine use case",
"authors": [
{
"first": "Hamed",
"middle": [],
"last": "Hassanzadeh",
"suffix": ""
},
{
"first": "Tudor",
"middle": [],
"last": "Groza",
"suffix": ""
},
{
"first": "Jane",
"middle": [],
"last": "Hunter",
"suffix": ""
}
],
"year": 2014,
"venue": "Journal of Biomedical Informatics",
"volume": "49",
"issue": "",
"pages": "159--170",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hamed Hassanzadeh, Tudor Groza, and Jane Hunter. 2014. Identifying scientific artefacts in biomedical literature: The evidence based medicine use case. Journal of Biomedical Informatics, 49:159-170.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "A supervised approach to quantifying sentence similarity: With application to evidence based medicine",
"authors": [
{
"first": "Hamed",
"middle": [],
"last": "Hassanzadeh",
"suffix": ""
},
{
"first": "Tudor",
"middle": [],
"last": "Groza",
"suffix": ""
},
{
"first": "Anthony",
"middle": [],
"last": "Nguyen",
"suffix": ""
},
{
"first": "Jane",
"middle": [],
"last": "Hunter",
"suffix": ""
}
],
"year": 2015,
"venue": "PLoS ONE",
"volume": "10",
"issue": "6",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hamed Hassanzadeh, Tudor Groza, Anthony Nguyen, and Jane Hunter. 2015. A supervised approach to quantifying sentence similarity: With applica- tion to evidence based medicine. PLoS ONE, 10(6):e0129392, 06.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Automatic classification of sentences to support Evidence Based Medicine",
"authors": [
{
"first": "Nam",
"middle": [],
"last": "Su",
"suffix": ""
},
{
"first": "David",
"middle": [],
"last": "Kim",
"suffix": ""
},
{
"first": "Lawrence",
"middle": [],
"last": "Martinez",
"suffix": ""
},
{
"first": "Lars",
"middle": [],
"last": "Cavedon",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Yencken",
"suffix": ""
}
],
"year": 2011,
"venue": "BMC Bioinformatics",
"volume": "13",
"issue": "2",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Su Nam Kim, David Martinez, Lawrence Cavedon, and Lars Yencken. 2011. Automatic classification of sentences to support Evidence Based Medicine. BMC Bioinformatics, 13(Suppl 2):S5.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Semantic clustering of answers to clinical questions",
"authors": [
{
"first": "Jimmy",
"middle": [
"J"
],
"last": "Lin",
"suffix": ""
},
{
"first": "Dina",
"middle": [],
"last": "Demner-Fushman",
"suffix": ""
}
],
"year": 2007,
"venue": "AMIA Annual Symposium Proceedings",
"volume": "33",
"issue": "",
"pages": "63--103",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jimmy J. Lin and Dina Demner-Fushman. 2007. Se- mantic clustering of answers to clinical questions. In AMIA Annual Symposium Proceedings, volume 33, pages 63-103.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "A Document Clustering and Ranking System for Exploring {MEDLINE} Citations",
"authors": [
{
"first": "Yongjing",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "Wenyuan",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Keke",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Ying",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2007,
"venue": "Journal of the American Medical Informatics Association",
"volume": "14",
"issue": "5",
"pages": "651--661",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yongjing Lin, Wenyuan Li, Keke Chen, and Ying Liu. 2007. A Document Clustering and Ranking Sys- tem for Exploring {MEDLINE} Citations. Journal of the American Medical Informatics Association, 14(5):651-661.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Biolemmatizer: a lemmatization tool for morphological processing of biomedical text",
"authors": [
{
"first": "Haibin",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Tom",
"middle": [],
"last": "Christiansen",
"suffix": ""
},
{
"first": "William",
"middle": [
"A"
],
"last": "Baumgartner Karin",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Verspoor",
"suffix": ""
}
],
"year": 2012,
"venue": "Journal of Biomedical Semantics",
"volume": "3",
"issue": "3",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Haibin Liu, Tom Christiansen, and William A. Baum- gartner Karin Verspoor. 2012. Biolemmatizer: a lemmatization tool for morphological processing of biomedical text. Journal of Biomedical Semantics, 3(3).",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Data Mining and Knowledge Discovery Handbook",
"authors": [
{
"first": "Oded",
"middle": [],
"last": "Maimon",
"suffix": ""
},
{
"first": "Lior",
"middle": [],
"last": "Rokach",
"suffix": ""
}
],
"year": 2005,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Oded Maimon and Lior Rokach. 2005. Data Min- ing and Knowledge Discovery Handbook. Springer- Verlag New York, Inc., Secaucus, NJ, USA.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Clinical Information Extraction Using Word Representations",
"authors": [
{
"first": "Shervin",
"middle": [],
"last": "Malmasi",
"suffix": ""
},
{
"first": "Hamed",
"middle": [],
"last": "Hassanzadeh",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Dras",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the Australasian Language Technology Workshop",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Shervin Malmasi, Hamed Hassanzadeh, and Mark Dras. 2015. Clinical Information Extraction Using Word Representations. In Proceedings of the Aus- tralasian Language Technology Workshop (ALTA).",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Introduction to Information Retrieval",
"authors": [
{
"first": "Christopher",
"middle": [
"D"
],
"last": "Manning",
"suffix": ""
},
{
"first": "Prabhakar",
"middle": [],
"last": "Raghavan",
"suffix": ""
},
{
"first": "Hinrich",
"middle": [],
"last": "Sch\u00fctze",
"suffix": ""
}
],
"year": 2008,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christopher D. Manning, Prabhakar Raghavan, and Hinrich Sch\u00fctze. 2008. Introduction to Information Retrieval. Cambridge University Press, New York, NY, USA.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Wordnet -a Lexical Database for English",
"authors": [
{
"first": "George",
"middle": [
"A"
],
"last": "Miller",
"suffix": ""
}
],
"year": 1995,
"venue": "Communications of the ACM",
"volume": "38",
"issue": "11",
"pages": "39--41",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "George A. Miller. 1995. Wordnet -a Lexical Database for English. Communications of the ACM, 38(11):39-41.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Development of a corpus for evidence based medicine summarisation",
"authors": [
{
"first": "Diego",
"middle": [],
"last": "Moll\u00e1",
"suffix": ""
},
{
"first": "Maria",
"middle": [
"Elena"
],
"last": "Santiago-Martinez",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the Australasian Language Technology Association Workshop",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Diego Moll\u00e1 and Maria Elena Santiago-martinez. 2011. Development of a corpus for evidence based medicine summarisation. In Proceedings of the Aus- tralasian Language Technology Association Work- shop 2011.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Clinical inquiries. Which treatments work best for hemorrhoids?",
"authors": [
{
"first": "L",
"middle": [],
"last": "Anne",
"suffix": ""
},
{
"first": "Susan",
"middle": [
"L"
],
"last": "Mounsey",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Henry",
"suffix": ""
}
],
"year": 2009,
"venue": "The Journal of family practice",
"volume": "58",
"issue": "9",
"pages": "492--495",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Anne L Mounsey and Susan L Henry. 2009. Clini- cal inquiries. Which treatments work best for hemor- rhoids? The Journal of family practice, 58(9):492- 3, September.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "A general method applicable to the search for similarities in the amino acid sequence of two proteins",
"authors": [
{
"first": "B",
"middle": [],
"last": "Saul",
"suffix": ""
},
{
"first": "Christian",
"middle": [
"D"
],
"last": "Needleman",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Wunsch",
"suffix": ""
}
],
"year": 1970,
"venue": "Journal of Molecular Biology",
"volume": "48",
"issue": "3",
"pages": "443--453",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Saul B. Needleman and Christian D. Wunsch. 1970. A general method applicable to the search for simi- larities in the amino acid sequence of two proteins. Journal of Molecular Biology, 48(3):443-453.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "R: A Language and Environment for Statistical Computing. R Foundation for Statistical Computing",
"authors": [
{
"first": "",
"middle": [],
"last": "R Core Team",
"suffix": ""
}
],
"year": 2015,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "R Core Team, 2015. R: A Language and Environment for Statistical Computing. R Foundation for Statis- tical Computing, Vienna, Austria.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Clustering of medical publications for evidence based medicine summarisation",
"authors": [
{
"first": "Sarafaisal",
"middle": [],
"last": "Shash",
"suffix": ""
},
{
"first": "Diego",
"middle": [],
"last": "Moll\u00e1",
"suffix": ""
}
],
"year": 2013,
"venue": "Artificial Intelligence in Medicine",
"volume": "7885",
"issue": "",
"pages": "305--309",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "SaraFaisal Shash and Diego Moll\u00e1. 2013. Clus- tering of medical publications for evidence based medicine summarisation. In Niels Peek, Roque Marn Morales, and Mor Peleg, editors, Artificial Intelligence in Medicine, volume 7885 of Lec- ture Notes in Computer Science, pages 305-309.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Web document clustering: A feasibility demonstration",
"authors": [
{
"first": "Oren",
"middle": [],
"last": "Zamir",
"suffix": ""
},
{
"first": "Oren",
"middle": [],
"last": "Etzioni",
"suffix": ""
}
],
"year": 1998,
"venue": "Proceedings of the 21st Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '98",
"volume": "",
"issue": "",
"pages": "46--54",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Oren Zamir and Oren Etzioni. 1998. Web document clustering: A feasibility demonstration. In Proceed- ings of the 21st Annual International ACM SIGIR Conference on Research and Development in Infor- mation Retrieval, SIGIR '98, pages 46-54. ACM.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"type_str": "figure",
"num": null,
"uris": null,
"text": "Title and abstract of one sample (Mounsey and Henry, 2009) of the Clinical Inquiry section of Journal of Family Practice."
},
"FIGREF1": {
"type_str": "figure",
"num": null,
"uris": null,
"text": "Statistics on the queried questions and their retrieved documents."
},
"TABREF0": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table><tr><td>Publication Type</td><td colspan=\"3\">EBMSummariser NICTA-PIBOSO Retrieved</td></tr><tr><td>Clinical Trial</td><td>834 (31%)</td><td>115 (12%)</td><td>12,437 (6%)</td></tr><tr><td colspan=\"2\">Randomized Controlled Trial 763 (29%)</td><td>79 (8%)</td><td>13,849 (7%)</td></tr><tr><td>Review</td><td>620 (23%)</td><td>220 (22%)</td><td>26,162 (12%)</td></tr><tr><td>Comparative Study</td><td>523 (20%)</td><td>159 (16%)</td><td>19,521 (9%)</td></tr><tr><td>Meta-Analysis</td><td>251 (9%)</td><td>22 (2%)</td><td>2,067 (1%)</td></tr><tr><td>Controlled Clinical Trial</td><td>61 (2%)</td><td>9 (1%)</td><td>1,753 (1%)</td></tr><tr><td>Case Reports</td><td>37 (1%)</td><td>82 (8%)</td><td>8,599 (4%)</td></tr><tr><td colspan=\"2\">incompleteness. Following this scenario, we were</td><td/><td/></tr><tr><td colspan=\"2\">able to create proper clusters for retrieved abstracts</td><td/><td/></tr><tr><td>of 129 questions out of the initial 465.</td><td/><td/><td/></tr><tr><td colspan=\"2\">Manual Clustering scenario: This scenario is</td><td/><td/></tr><tr><td colspan=\"2\">based on the Pooling approach used in the evalua-</td><td/><td/></tr><tr><td>tion of Information Retrieval systems</td><td/><td/><td/></tr></table>",
"text": "Statistics over the more common publication types in EBM domain corpora."
},
"TABREF1": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table><tr><td>Method</td><td>Rand Index</td></tr><tr><td>KM + TF-IDF</td><td>0.5261</td></tr><tr><td>HC + TF-IDF</td><td>0.5242</td></tr><tr><td>HC + Similarity Metrics</td><td>0.6036*</td></tr><tr><td colspan=\"2\">* Statistically significant (p-value&lt; 0.05) when</td></tr><tr><td colspan=\"2\">compared with second best method.</td></tr><tr><td colspan=\"2\">ments are grouped into the same cluster. A false</td></tr><tr><td colspan=\"2\">negative (FN) decision assigns two similar docu-</td></tr><tr><td>ments to different clusters.</td><td/></tr></table>",
"text": "Clustering results over 129 questions of the EBMSummariser corpus."
},
"TABREF2": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table><tr><td>Methods</td><td colspan=\"3\">Annotator 1 clusters Annotator 2 clusters Average</td></tr><tr><td>KM + TF-IDF</td><td>0.4038</td><td>0.3095</td><td>0.3566</td></tr><tr><td>HC + TF-IDF</td><td>0.2877</td><td>0.2898</td><td>0.2887</td></tr><tr><td>HC + Similarity Metrics</td><td>0.3825</td><td>0.3926</td><td>0.3875</td></tr></table>",
"text": "Clustering results over retrieved PubMed documents with Manual Clustering evaluation scenario (Rand Index) for 129 questions from the EBMSummariser corpus."
},
"TABREF3": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table><tr><td colspan=\"2\">: Clustering results over retrieved PubMed</td></tr><tr><td colspan=\"2\">documents with Semantic Similarity Mapping</td></tr><tr><td colspan=\"2\">evaluation scenario for 129 questions from the</td></tr><tr><td>EBMSummariser corpus.</td><td/></tr><tr><td>Method</td><td>Rand Index</td></tr><tr><td>KM + TF-IDF</td><td>0.5481</td></tr><tr><td>HC + TF-IDF</td><td>0.5463</td></tr><tr><td>HC + Similarity Metrics</td><td>0.5912</td></tr></table>",
"text": ""
},
"TABREF4": {
"html": null,
"type_str": "table",
"num": null,
"content": "<table><tr><td>Method</td><td>Entropy</td></tr><tr><td>KM + TF-IDF (as in Shash and Moll\u00e1 (2013))</td><td>0.260</td></tr><tr><td colspan=\"2\">KM + TF-IDF (our replication) 0.3959</td></tr><tr><td>HC + Similarity metrics</td><td>0.3548*</td></tr><tr><td colspan=\"2\">* Statistically significant (p-value&lt; 0.05) when</td></tr><tr><td>compared with preceding method.</td><td/></tr></table>",
"text": "Clustering results over the entire EBM-Summariser corpus."
}
}
}
}