|
{ |
|
"paper_id": "U14-1017", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:09:07.396904Z" |
|
}, |
|
"title": "Deep Belief Networks and Biomedical Text Categorisation", |
|
"authors": [ |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Jimeno", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mackinlay \u2666\u2660", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research -Australia", |
|
"location": { |
|
"addrLine": "380 La Trobe Street", |
|
"settlement": "Melbourne", |
|
"region": "VIC", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Bedo \u2666\u2660", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research -Australia", |
|
"location": { |
|
"addrLine": "380 La Trobe Street", |
|
"settlement": "Melbourne", |
|
"region": "VIC", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rahil", |
|
"middle": [], |
|
"last": "Garnavi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research -Australia", |
|
"location": { |
|
"addrLine": "380 La Trobe Street", |
|
"settlement": "Melbourne", |
|
"region": "VIC", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "IBM Research -Australia", |
|
"location": { |
|
"addrLine": "380 La Trobe Street", |
|
"settlement": "Melbourne", |
|
"region": "VIC", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We evaluate the use of Deep Belief Networks as classifiers in a text categorisation task (assigning category labels to documents) in the biomedical domain. Our preliminary results indicate that compared to Support Vector Machines, Deep Belief Networks are superior when a large set of training examples is available, showing an F-score increase of up to 5%. In addition, the training times for DBNs can be prohibitive. DBNs show promise for certain types of biomedical text categorisation.", |
|
"pdf_parse": { |
|
"paper_id": "U14-1017", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We evaluate the use of Deep Belief Networks as classifiers in a text categorisation task (assigning category labels to documents) in the biomedical domain. Our preliminary results indicate that compared to Support Vector Machines, Deep Belief Networks are superior when a large set of training examples is available, showing an F-score increase of up to 5%. In addition, the training times for DBNs can be prohibitive. DBNs show promise for certain types of biomedical text categorisation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Text categorisation is the task of automatically assigning pre-defined labels to text. In the biomedical domain, research in automatic text categorisation has mostly taken place in the context of indexing MEDLINE R citations with Medical Subject Headings (MeSH R ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "MEDLINE is the largest collection of biomedical abstracts and contains over 23M citations with over 800k new citations every year, making it difficult to keep up-to-date with new discoveries. To help cataloging and searching biomedical documents, the US National Library of Medicine (NLM) R has produced the MeSH controlled vocabulary with over 26k headings. At NLM, each MEDLINE citation is manually assigned a number of relevant medical subject headings enumerating the topics of the paper. Machine learning for text categorisation in this context is appealing due to the large number of citations available to train machine learning algorithms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In text categorisation, the most frequently used feature representation is bag-of-words, where text is converted into a feature vector in which each dimension corresponds to a word or phrase and stores either a binary value indicating its presence in the document or a numerical value indicating its frequency (Apte et al., 1994; Dumais et al., 1998; Sebastiani, 2002) . This relatively simple approach has proven to be robust enough (Jimeno- Yepes et al., 2011 ) that it is difficult to improve on its performance with more sophisticated representations. In this work, we explore the use of Deep Belief Networks (DBN) to automatically generate a new representation in biomedical text categorisation. Since DBNs have a richer internal representation than SVMs, we wished to evaluate whether this would lead to improved classification performance compared to bag-of-words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 310, |
|
"end": 329, |
|
"text": "(Apte et al., 1994;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 330, |
|
"end": 350, |
|
"text": "Dumais et al., 1998;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 368, |
|
"text": "Sebastiani, 2002)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 443, |
|
"end": 461, |
|
"text": "Yepes et al., 2011", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There are several approaches being used for text categorisation in the biomedical domain trying to reproduce the manual MeSH indexing. NLM has developed the Medical Text Indexer (MTI) (Aronson et al., 2004; , which is used to suggest MeSH headings for new citations to indexers. MTI combines MetaMap (Aronson and Lang, 2010) annotation and recommendations from similar citations recovered using the PubMed Related Citations (Lin and Wilbur, 2007) tool that are post-processed to comply with NLM indexing rules. Results for the most frequent categories, as used in this work, indicate that machine learning methods can produce better results than MTI (Jimeno . Recently, there has been interest in comparing MeSH indexing approaches in the BioASQ challenge. 1 It has been found that bag-of-word representations without feature selection already provide competitive performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 206, |
|
"text": "(Aronson et al., 2004;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 446, |
|
"text": "(Lin and Wilbur, 2007)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Recently, several studies have utilised different deep learning methods to build multiple layers of feature representation for documents, such as a Stacked De-noising Autoencoder (SDA) (Vincent et al., 2010; Glorot et al., 2011) and a DBN (Hinton and Salakhutdinov, 2006) for tasks including spam filtering (Tzortzis and Likas, 2007) . In this work, we apply DBN as our deep learning algorithm for biomedical text categorisation, trying to reproduce MeSH indexing for the 10 top most frequent MeSH headings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 207, |
|
"text": "(Vincent et al., 2010;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 228, |
|
"text": "Glorot et al., 2011)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 271, |
|
"text": "DBN (Hinton and Salakhutdinov, 2006)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 333, |
|
"text": "(Tzortzis and Likas, 2007)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Restricted Boltzmann Machines (RBM) A (restricted) Boltzmann Machine (RBM) (Salakhutdinov et al., 2007) is a parameterised generative model representing a joint probability distribution. Given some training data, learning an RBM means adjusting the RBM parameters to maximise the likelihood of the training data under the model. Restricted Boltzmann machines consist of two layers containing visible and hidden neurons.", |
|
"cite_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 103, |
|
"text": "(Salakhutdinov et al., 2007)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Belief Networks", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The energy function E(v, h) of an RBM is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Belief Networks", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "E(v, h) = \u2212b v \u2212 c h \u2212 h W v;", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Deep Belief Networks", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where W represents the weights connecting hidden and visible units and b, c are the offsets of the visible and hidden layers respectively. The joint probability distribution is then given by the exponential family ,h) , where Z is a normalisation factor. The likelihood of some data X \u2282 R n is thus L (X) := \u220f v\u2208X \u2211 h P(v, h) and b, c, and W are chosen to maximise this likelihood (or equivalently minimise the negative log likelihood):", |
|
"cite_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 217, |
|
"text": ",h)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Belief Networks", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "P(v, h) = 1 Z e E(v", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Belief Networks", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "arg b,c,W min \u2212 log L (X) = \u2212 \u2211 v\u2208X log \u2211 h P(v, h).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Belief Networks", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We used the Contrastive Divergence method (Hinton, 2002) to find an approximate solution.", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 56, |
|
"text": "(Hinton, 2002)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Belief Networks", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Deep Belief Network A DBN normally is the stack of many layers of RBM model. Hinton and Salakhutdinov (2006) showed that RBMs can be stacked and trained in a greedy manner to form socalled Deep Belief Networks (DBN). DBNs are graphical models which learn to extract a deep hierarchical representation of the training data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 108, |
|
"text": "Salakhutdinov (2006)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Belief Networks", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The hidden neurons extract relevant features from the observations, and these features can serve as input to another RBM. By stacking RBMs in this way, we can learn a higher level representation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Belief Networks", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Practical training strategies In practice, the DBN training often consists of two steps: greedy layer-wise pretraining and fine tuning. Layer-wise pretraining involves training the model parameters layer by layer via unsupervised training. Fine tuning is achieved by supervised gradient descent of the negative log-likelihood cost function. The DBN implementation used in this work has been obtained from http://www.deeplearning. net/tutorial built on Theano 2 . Text data is very sparse with only a few dimensions having non-zero values. We modified the DBN code to deal with sparse matrices.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Deep Belief Networks", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We used a Support Vector Machine (SVM) with a linear kernel as our baseline method. SVM has shown good performance on text categorisation (Joachims, 1998) as well as in MeSH indexing (Jimeno and within BioASQ. In this work, we have used the implementation from the MTI ML package 3 that follows the work of (Zhang, 2004) and uses Hinge loss with stochastic gradient descent.", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 154, |
|
"text": "(Joachims, 1998)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 320, |
|
"text": "(Zhang, 2004)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machine", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Training and test sets have been obtained from the MTI ML site. There are 24,727 training citations and 12,363 test citations. From these data sets, we have selected the top 10 most frequent MeSH headings available from Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 227, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data set", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We have also used a larger set since we realised in the earlier stages of experimentation that more data was needed to train the DBN. This larger set has been obtained from the NLM Indexing Initiative 4 and is split into 94,942 training citations and 48,911 test citations. Results on both sets are reported for the same categories.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data set", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We processed the citations to extract the text from the title and the abstract. From the text, we extracted tokens using a regular expression looking for white spaces and punctuation marks. Tokens were lowercased and filtered using a standard stopword list. Binary values for the features (present or absent) are considered. Only tokens that appear in at least two citations in the training set were considered, considerably reducing the number of features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data set", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The SVM and the DBN were trained and tested on the data sets. Binary classifiers predicting each individual category were trained for each one of the selected MeSH headings. For DBN, we used 2/3 of the training data for unsupervised pretraining and 1/3 for fine tuning the model due to DBN training cost, while for SVM we used all the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Configuring the DBN requires specifying the number of hidden layers and the number of units per layer. We used one hidden layer to give three layers in total. We used two different configuration of training units, set empirically (and semiarbitrarily) from data samples -DBN 250 with 250 units in each of the three layers and DBN 500, with 500 units per layer. Tables 1 and 2 show results for the small set with 16000 for DBN pretraining and 8727 for fine tuning and the large set with 63294 for DBN pretraining and 31647 for fine tuning.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 361, |
|
"end": 375, |
|
"text": "Tables 1 and 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "As shown in Table 1 , with the smaller datasets, SVM performance is superior to DBN, however DBN substantially outperforms SVM on the six most frequent categories. DBN results are much lower when the categories are less frequent and for Adolescent, DBN simply classified all citations as negative. DBN 500 performs better than DBN 250 in the top six most frequent categories. Figure 2 shows learning curves obtained by training the three methods on increasingly large subsets of the small training set. SVM outperforms DBN when there is limited training data, but as the amount of training data is increased, for certain categories DBN, especially DBN 500, surpasses SVM (as expected from Table 1) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 384, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 689, |
|
"end": 697, |
|
"text": "Table 1)", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Results were obtained using the same subset and it could be interesting to see the behavior if different subsets of the training data are used. DBN results depend as well on the partition of the training data, using all the data for pretraining and fine tuning the perfomance of DBN on the small set improves (avg. F1: 0.7282). Table 2 shows that when there is more training data available, the performance penalty for the DBN methods versus SVM over the sparser categories disappears. In addition, there is also less of a difference between results of 250 and 500 units per layer. Overall all three methods are more similar to one another over this larger data set, with better results for DBN on average. Absolute results between Tables 1 and 2 are not comparable since two different collections are used, e.g. some categories have significantly different performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 328, |
|
"end": 335, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In our experiments, DBN overall performance is comparable to SVM with a linear kernel being better in some of the categories when a large set of training data is used. We also evaluated SVM with Radial Basis Function kernel (not reported) but the results were comparable to a linear kernel.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Compared to image processing, text categorisation has a larger dimensionality that varies with the size of the data set since there is the chance of finding new unique words, even though data is sparse and few of the citation features have a value. On the small set, with a batch size of 200 citations, the number of unique features is 2,531 and with a batch size of 8,000 it is 26,491, while in the larger set, 53,784 unique features were found.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "DBN shows competitive performance compared to SVM. We have tried a limited set of configurations with only one hidden layer. Deeper configurations with a more varied number of units can be explored but the pretraining phase is expensive. We would like to explore different pretraining and supervised tuning ratios to reduce training time. In addition, identifying the best DBN configuration can be expensive. (Rahimi and Recht, 2009) suggest approaches to avoid an explosion of possibilities which could be useful here.", |
|
"cite_spans": [ |
|
{ |
|
"start": 409, |
|
"end": 433, |
|
"text": "(Rahimi and Recht, 2009)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Deep learning requires a significant amount of time to train, e.g. SVM was trained in several minutes while the DBN pretraining in the large set took five days. To alleviate this, we could consider methods to reduce dimensionality (Weinberger et al., 2009; Bingham and Mannila, 2001) . Nonetheless, we believe that this work shows that DBNs show promise for text categorisation, as they are able to provide superior performance to SVM-based techniques traditionally for such tasks. Table 2 : Text categorisation results for the 10 selected categories with the large set and a batch size of 31647 citations. Results are reported in precision (Pre), recall (Rec) and F-measure (F1). Average results are shown at the bottom of the table. DBN 250 means using three layers with 250 units each. DBN 500 means using three layers with 500 units each.", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 256, |
|
"text": "(Weinberger et al., 2009;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 257, |
|
"end": 283, |
|
"text": "Bingham and Mannila, 2001)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 482, |
|
"end": 489, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "http://www.bioasq.org/workshop/schedule", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://deeplearning.net/software/theano 3 http://ii.nlm.nih.gov/MTI ML 4 http://ii.nlm.nih.gov/DataSets/index.shtml#2013 MTI ML", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Automated learning of decision rules for text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Chidanand", |
|
"middle": [], |
|
"last": "Apte", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fred", |
|
"middle": [], |
|
"last": "Damerau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sholom", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Weiss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "ACM Transactions on Information Systems", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "233--251", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chidanand Apte, Fred Damerau, and Sholom M Weiss. 1994. Automated learning of decision rules for text categorization. ACM Transactions on Information Systems, 12:233-251.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "An overview of MetaMap: historical perspective and recent advances", |
|
"authors": [ |
|
{ |
|
"first": "Fran\u00e7ois-Michel", |
|
"middle": [], |
|
"last": "Alan R Aronson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Journal of the American Medical Informatics Association", |
|
"volume": "17", |
|
"issue": "3", |
|
"pages": "229--236", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan R Aronson and Fran\u00e7ois-Michel Lang. 2010. An overview of MetaMap: historical perspective and re- cent advances. Journal of the American Medical In- formatics Association, 17(3):229-236.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "The NLM indexing initiative's medical text indexer", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Alan R Aronson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clifford", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Mork", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Susanne", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Gay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Willie J", |
|
"middle": [], |
|
"last": "Humphrey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rogers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Medinfo", |
|
"volume": "", |
|
"issue": "11", |
|
"pages": "268--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan R Aronson, James G Mork, Clifford W Gay, Su- sanne M Humphrey, and Willie J Rogers. 2004. The NLM indexing initiative's medical text indexer. Medinfo, 11(Pt 1):268-72.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Random projection in dimensionality reduction: applications to image and text data", |
|
"authors": [ |
|
{ |
|
"first": "Ella", |
|
"middle": [], |
|
"last": "Bingham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heikki", |
|
"middle": [], |
|
"last": "Mannila", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the seventh ACM SIGKDD international conference on Knowledge discovery and data mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "245--250", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ella Bingham and Heikki Mannila. 2001. Random pro- jection in dimensionality reduction: applications to image and text data. In Proceedings of the seventh ACM SIGKDD international conference on Knowl- edge discovery and data mining, pages 245-250. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Inductive learning algorithms and representations for text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Susan", |
|
"middle": [], |
|
"last": "Dumais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Platt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Heckerman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mehran", |
|
"middle": [], |
|
"last": "Sahami", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the seventh international conference on Information and knowledge management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "148--155", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Susan Dumais, John Platt, David Heckerman, and Mehran Sahami. 1998. Inductive learning algo- rithms and representations for text categorization. In Proceedings of the seventh international conference on Information and knowledge management, pages 148-155. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Domain adaptation for large-scale sentiment classification: A deep learning approach", |
|
"authors": [ |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Glorot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 28th International Conference on Machine Learning (ICML-11)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "513--520", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xavier Glorot, Antoine Bordes, and Yoshua Bengio. 2011. Domain adaptation for large-scale sentiment classification: A deep learning approach. In Pro- ceedings of the 28th International Conference on Machine Learning (ICML-11), pages 513-520.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Reducing the dimensionality of data with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Geoffrey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan R", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Science", |
|
"volume": "313", |
|
"issue": "5786", |
|
"pages": "504--507", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Geoffrey E Hinton and Ruslan R Salakhutdinov. 2006. Reducing the dimensionality of data with neural net- works. Science, 313(5786):504-507.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Training products of experts by minimizing contrastive divergence", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ge Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Neural computation", |
|
"volume": "1800", |
|
"issue": "", |
|
"pages": "1771--1800", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "GE Hinton. 2002. Training products of experts by min- imizing contrastive divergence. Neural computation, 1800:1771-1800.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "A bottom-up approach to MEDLINE indexing recommendations", |
|
"authors": [ |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "James G Mork", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dina", |
|
"middle": [], |
|
"last": "Van Lenten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Demner Fushman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Aronson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "AMIA Annual Symposium Proceedings", |
|
"volume": "2011", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James G Mork, Elizabeth Van Lenten, Dina Demner Fushman, and Alan R Aronson. 2011. A bottom-up approach to MEDLINE indexing recommendations. In AMIA Annual Symposium Proceedings, volume 2011, page 1583. American Medical Informatics Association.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Comparison and combination of several MeSH indexing approaches", |
|
"authors": [ |
|
{ |
|
"first": "Antonio Jose Jimeno", |
|
"middle": [], |
|
"last": "Yepes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Mork", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dina", |
|
"middle": [], |
|
"last": "Demner-Fushman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Aronson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "AMIA Annual Symposium Proceedings", |
|
"volume": "2013", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonio Jose Jimeno Yepes, James G Mork, Dina Demner-Fushman, and Alan R Aronson. 2013. Comparison and combination of several MeSH in- dexing approaches. In AMIA Annual Symposium Proceedings, volume 2013, page 709. American Medical Informatics Association.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Text categorization with suport vector machines: Learning with many relevant features", |
|
"authors": [ |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 10th European Conference on Machine Learning, ECML '98", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "137--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thorsten Joachims. 1998. Text categorization with suport vector machines: Learning with many rele- vant features. In Proceedings of the 10th European Conference on Machine Learning, ECML '98, pages 137-142, London, UK, UK. Springer-Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "PubMed related articles: a probabilistic topic-based model for content similarity", |
|
"authors": [ |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W John", |
|
"middle": [], |
|
"last": "Wilbur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "BMC bioinformatics", |
|
"volume": "8", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jimmy Lin and W John Wilbur. 2007. PubMed related articles: a probabilistic topic-based model for con- tent similarity. BMC bioinformatics, 8(1):423.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "The NLM Medical Text Indexer system for indexing biomedical literature", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "James", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Mork", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Jimeno-Yepes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Aronson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James G Mork, Antonio Jimeno-Yepes, and Alan R Aronson. 2013. The NLM Medical Text In- dexer system for indexing biomedical literature. In BioASQ@ CLEF.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Weighted sums of random kitchen sinks: Replacing minimization with randomization in learning", |
|
"authors": [ |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Rahimi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Recht", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1313--1320", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ali Rahimi and Benjamin Recht. 2009. Weighted sums of random kitchen sinks: Replacing minimiza- tion with randomization in learning. In Advances in neural information processing systems, pages 1313- 1320.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Restricted Boltzmann machines for collaborative filtering", |
|
"authors": [ |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 24th international conference on Machine learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "791--798", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruslan Salakhutdinov, Andriy Mnih, and Geoffrey Hin- ton. 2007. Restricted Boltzmann machines for col- laborative filtering. In Proceedings of the 24th in- ternational conference on Machine learning, pages 791-798. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Machine learning in automated text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Sebastiani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACM computing surveys (CSUR)", |
|
"volume": "34", |
|
"issue": "1", |
|
"pages": "1--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabrizio Sebastiani. 2002. Machine learning in auto- mated text categorization. ACM computing surveys (CSUR), 34(1):1-47.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Deep belief networks for spam filtering", |
|
"authors": [ |
|
{ |
|
"first": "Grigorios", |
|
"middle": [], |
|
"last": "Tzortzis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aristidis", |
|
"middle": [], |
|
"last": "Likas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Tools with Artificial Intelligence, 2007. ICTAI 2007. 19th IEEE International Conference on", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "306--309", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grigorios Tzortzis and Aristidis Likas. 2007. Deep belief networks for spam filtering. In Tools with Ar- tificial Intelligence, 2007. ICTAI 2007. 19th IEEE International Conference on, volume 2, pages 306- 309. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Stacked denoising autoencoders: Learning useful representations in a deep network with a local denoising criterion", |
|
"authors": [ |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Vincent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Larochelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Lajoie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre-Antoine", |
|
"middle": [], |
|
"last": "Manzagol", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "The Journal of Machine Learning Research", |
|
"volume": "11", |
|
"issue": "", |
|
"pages": "3371--3408", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pascal Vincent, Hugo Larochelle, Isabelle Lajoie, Yoshua Bengio, and Pierre-Antoine Manzagol. 2010. Stacked denoising autoencoders: Learning useful representations in a deep network with a local denoising criterion. The Journal of Machine Learn- ing Research, 11:3371-3408.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Feature hashing for large scale multitask learning", |
|
"authors": [ |
|
{ |
|
"first": "Kilian", |
|
"middle": [], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anirban", |
|
"middle": [], |
|
"last": "Dasgupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Langford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Smola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Attenberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 26th Annual International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1113--1120", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kilian Weinberger, Anirban Dasgupta, John Langford, Alex Smola, and Josh Attenberg. 2009. Fea- ture hashing for large scale multitask learning. In Proceedings of the 26th Annual International Con- ference on Machine Learning, pages 1113-1120. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Solving large scale linear prediction problems using stochastic gradient descent algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Tong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the Twenty-first International Conference on Machine Learning, ICML '04", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tong Zhang. 2004. Solving large scale linear predic- tion problems using stochastic gradient descent algo- rithms. In Proceedings of the Twenty-first Interna- tional Conference on Machine Learning, ICML '04, pages 116-, New York, NY, USA. ACM.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Deep Neural Network (left) andRBM (right)", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Training size vs F1 on the small set. There is one plot per category. Three methods are shown: SVM (slashed blue line, square shaped point), DBN with three layers with 250 units each (continuous red line, round shaped point) and DBN with three layers with 500 units each (dotted green line, triangle shaped point).", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"text": "Text categorisation results for the 10 selected categories with the small set and a batch size of 8000 citations. Results are reported in precision (Pre), recall (Rec) and F-measure (F1). Average results are shown at the bottom of the table. DBN 250 means using three layers with 250 units each. DBN 500 means using three layers with 500 units each.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>Methods</td><td colspan=\"2\">SVM (linear)</td><td/><td>DBN</td><td>250</td><td/><td>DBN</td><td>500</td><td/></tr><tr><td>Category</td><td>Positives</td><td>Pre</td><td>Rec</td><td>F1</td><td>Pre</td><td>Rec</td><td>F1</td><td>Pre</td><td>Rec</td><td>F1</td></tr><tr><td>Humans</td><td colspan=\"2\">35967 0.9052</td><td colspan=\"8\">0.9354 0.9201 0.9209 0.9436 0.9321 0.9204 0.9445 0.9323</td></tr><tr><td>Female</td><td colspan=\"2\">16483 0.7464</td><td colspan=\"8\">0.7176 0.7317 0.8305 0.6964 0.7576 0.8216 0.7160 0.7652</td></tr><tr><td>Male</td><td colspan=\"2\">15530 0.7267</td><td colspan=\"8\">0.6889 0.7073 0.7917 0.7025 0.7444 0.7878 0.7213 0.7531</td></tr><tr><td>Animals</td><td colspan=\"2\">11259 0.8431</td><td colspan=\"8\">0.7613 0.8001 0.8895 0.6879 0.7758 0.9407 0.6337 0.7573</td></tr><tr><td>Adult</td><td colspan=\"2\">8792 0.5824</td><td colspan=\"8\">0.5296 0.5547 0.6915 0.4480 0.5438 0.6696 0.3592 0.4676</td></tr><tr><td>Middle Aged</td><td colspan=\"2\">8392 0.6323</td><td colspan=\"8\">0.5728 0.6011 0.7239 0.5654 0.6349 0.7375 0.5853 0.6527</td></tr><tr><td>Aged</td><td colspan=\"2\">6151 0.5616</td><td colspan=\"8\">0.5079 0.5334 0.7147 0.4076 0.5191 0.6937 0.4303 0.5312</td></tr><tr><td>Adolescent</td><td colspan=\"2\">3824 0.4641</td><td colspan=\"8\">0.3690 0.4111 0.5735 0.2529 0.3510 0.6583 0.2202 0.3300</td></tr><tr><td>Mice</td><td colspan=\"2\">3723 0.8386</td><td colspan=\"8\">0.7284 0.7796 0.8746 0.7268 0.7939 0.8984 0.7295 0.8052</td></tr><tr><td>Rats</td><td colspan=\"2\">1613 0.8461</td><td colspan=\"8\">0.7601 0.8008 0.9150 0.7204 0.8061 0.9123 0.7421 0.8185</td></tr><tr><td>Average</td><td colspan=\"2\">11173 0.7146</td><td colspan=\"8\">0.6571 0.6847 0.7926 0.6152 0.6927 0.8040 0.6082 0.6926</td></tr></table>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |