|
{ |
|
"paper_id": "W02-0301", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T05:10:50.661201Z" |
|
}, |
|
"title": "Tuning Support Vector Machines for Biomedical Named Entity Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Jun", |
|
"middle": [ |
|
"'" |
|
], |
|
"last": "Ichi Kazama", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Takaki", |
|
"middle": [], |
|
"last": "Makino", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Tokyo", |
|
"location": { |
|
"addrLine": "Bunkyo-ku", |
|
"postCode": "113-0033", |
|
"settlement": "Tokyo", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yoshihiro", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Central Research Laboratory", |
|
"institution": "Hitachi, Ltd", |
|
"location": { |
|
"postCode": "185-8601", |
|
"settlement": "Kokubunji", |
|
"region": "Tokyo", |
|
"country": "Japan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [ |
|
"'" |
|
], |
|
"last": "Ichi Tsujii", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "JST (Japan Science and Technology Corporation", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We explore the use of Support Vector Machines (SVMs) for biomedical named entity recognition. To make the SVM training with the available largest corpus-the GENIA corpus-tractable, we propose to split the nonentity class into sub-classes, using part-of-speech information. In addition, we explore new features such as word cache and the states of an HMM trained by unsupervised learning. Experiments on the GENIA corpus show that our class splitting technique not only enables the training with the GENIA corpus but also improves the accuracy. The proposed new features also contribute to improve the accuracy. We compare our SVMbased recognition system with a system using Maximum Entropy tagging method.", |
|
"pdf_parse": { |
|
"paper_id": "W02-0301", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We explore the use of Support Vector Machines (SVMs) for biomedical named entity recognition. To make the SVM training with the available largest corpus-the GENIA corpus-tractable, we propose to split the nonentity class into sub-classes, using part-of-speech information. In addition, we explore new features such as word cache and the states of an HMM trained by unsupervised learning. Experiments on the GENIA corpus show that our class splitting technique not only enables the training with the GENIA corpus but also improves the accuracy. The proposed new features also contribute to improve the accuracy. We compare our SVMbased recognition system with a system using Maximum Entropy tagging method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Application of natural language processing (NLP) is now a key research topic in bioinformatics. Since it is practically impossible for a researcher to grasp all of the huge amount of knowledge provided in the form of natural language, e.g., journal papers, there is a strong demand for biomedical information extraction (IE), which extracts knowledge automatically from biomedical papers using NLP techniques (Ohta et al., 1997; Proux et al., 2000; Yakushiji et al., 2001) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 409, |
|
"end": 428, |
|
"text": "(Ohta et al., 1997;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 429, |
|
"end": 448, |
|
"text": "Proux et al., 2000;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 449, |
|
"end": 472, |
|
"text": "Yakushiji et al., 2001)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The process called named entity recognition, which finds entities that fill the information slots, e.g., proteins, DNAs, RNAs, cells etc., in the biomedical context, is an important building block in such biomedical IE systems. Conceptually, named entity recognition consists of two tasks: identification, which finds the region of a named entity in a text, and classification, which determines the se-mantic class of that named entity. The following illustrates biomedical named entity recognition.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\"Thus, CIITA PROTEIN not only activates the expression of class II genes DNA but recruits another B cell-specific coactivator to increase transcriptional activity of class II promoters DNA in B cells CELLTYPE .\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Machine learning approach has been applied to biomedical named entity recognition (Nobata et al., 1999; Yamada et al., 2000; Shimpuku, 2002) . However, no work has achieved sufficient recognition accuracy. One reason is the lack of annotated corpora for training as is often the case of a new domain. Nobata et al. (1999) and trained their model with only 100 annotated paper abstracts from the MEDLINE database (National Library of Medicine, 1999) , and Yamada et al. (2000) used only 77 annotated paper abstracts. In addition, it is difficult to compare the techniques used in each study because they used a closed and different corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 103, |
|
"text": "(Nobata et al., 1999;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 104, |
|
"end": 124, |
|
"text": "Yamada et al., 2000;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 125, |
|
"end": 140, |
|
"text": "Shimpuku, 2002)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 301, |
|
"end": 321, |
|
"text": "Nobata et al. (1999)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 412, |
|
"end": 448, |
|
"text": "(National Library of Medicine, 1999)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 475, |
|
"text": "Yamada et al. (2000)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To overcome such a situation, the GENIA corpus (Ohta et al., 2002) has been developed, and at this time it is the largest biomedical annotated corpus available to public, containing 670 annotated abstracts of the MEDLINE database.", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 66, |
|
"text": "(Ohta et al., 2002)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Another reason for low accuracies is that biomedical named entities are essentially hard to recognize using standard feature sets compared with the named entities in newswire articles . Thus, we need to employ powerful machine learning techniques which can incorporate various and complex features in a consistent way.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Support Vector Machines (SVMs) (Vapnik, 1995) and Maximum Entropy (ME) method (Berger et al., 1996) are powerful learning methods that satisfy such requirements, and are applied successfully to other NLP tasks Nakagawa et al., 2001; Ratnaparkhi, 1996) . In this paper, we apply Support Vector Machines to biomedical named entity recognition and train them with the GENIA corpus. We formulate the named entity recognition as the classification of each word with context to one of the classes that represent region and named entity's semantic class. Although there is a previous work that applied SVMs to biomedical named entity task in this formulation (Yamada et al., 2000) , their method to construct a classifier using SVMs, one-vs-rest, fails to train a classifier with entire GENIA corpus, since the cost of SVM training is super-linear to the size of training samples. Even with a more feasible method, pairwise (Kre\u00dfel, 1998) , which is employed in , we cannot train a classifier in a reasonable time, because we have a large number of samples that belong to the non-entity class in this formulation. To solve this problem, we propose to split the non-entity class to several sub-classes, using part-ofspeech information. We show that this technique not only enables the training feasible but also improves the accuracy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 45, |
|
"text": "(Vapnik, 1995)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 78, |
|
"end": 99, |
|
"text": "(Berger et al., 1996)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 232, |
|
"text": "Nakagawa et al., 2001;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 233, |
|
"end": 251, |
|
"text": "Ratnaparkhi, 1996)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 652, |
|
"end": 673, |
|
"text": "(Yamada et al., 2000)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 917, |
|
"end": 931, |
|
"text": "(Kre\u00dfel, 1998)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In addition, we explore new features such as word cache and the states of an unsupervised HMM for named entity recognition using SVMs. In the experiments, we show the effect of using these features and compare the overall performance of our SVMbased recognition system with a system using the Maximum Entropy method, which is an alternative to the SVM method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The GENIA corpus is an annotated corpus of paper abstracts taken from the MEDLINE database. Currently, 670 abstracts are annotated with named entity tags by biomedical experts and made available to public (Ver. 1.1). 1 These 670 abstracts are a subset of more than 5,000 abstracts obtained by the query \"human AND blood cell AND transcription factor\" to the MEDLINE database. Table 1 shows basic statistics of the GENIA corpus. Since the GE-NIA corpus is intended to be extensive, there exist 24 distinct named entity classes in the corpus. 2 Our task is to find a named entity region in a paper abstract and correctly select its class out of these 24 classes. This number of classes is relatively large compared with other corpora used in previous studies, and compared with the named entity task for newswire articles. This indicates that the task with the GENIA corpus is hard, apart from the difficulty of the biomedical domain itself.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 376, |
|
"end": 383, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The GENIA Corpus", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "1 Available via http://www-tsujii.is.s.u-tokyo.ac.jp/GENIA/ 2 The GENIA corpus also has annotations for conjunctive/disjunctive named entity expressions such as \"human B-or T-cell lines\" (Kim et al., 2001) . In this paper we ignore such expressions and consider that constituents in such expressions are annotated as a dummy class \"temp\". 3 Named Entity Recognition Using SVMs", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 205, |
|
"text": "(Kim et al., 2001)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The GENIA Corpus", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We formulate the named entity task as the classification of each word with context to one of the classes that represent region information and named entity's semantic class. Several representations to encode region information are proposed and examined (Ramshaw and Marcus, 1995; Uchimoto et al., 2000; Kudo and Matsumoto, 2001) . In this paper, we employ the simplest BIO representation, which is also used in (Yamada et al., 2000) . We modify this representation in Section 5.1 in order to accelerate the SVM training.", |
|
"cite_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 279, |
|
"text": "(Ramshaw and Marcus, 1995;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 302, |
|
"text": "Uchimoto et al., 2000;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 328, |
|
"text": "Kudo and Matsumoto, 2001)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 411, |
|
"end": 432, |
|
"text": "(Yamada et al., 2000)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Recognition as Classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In the BIO representation, the region information is represented as the class prefixes \"B-\" and \"I-\", and a class \"O\". B-means that the current word is at the beginning of a named entity, I-means that the current word is in a named entity (but not at the beginning), and O means the word is not in a named entity. For each named entity class C, class B-C and I-C are produced. Therefore, if we have N named entity classes, the BIO representation yields 2N + 1 classes, which will be the targets of a classifier. For instance, the following corresponds to the annotation \"Number of glucocorticoid receptors ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Recognition as Classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Support Vector Machines (SVMs) (Cortes and Vapnik, 1995) are powerful methods for learning a classifier, which have been applied successfully to many NLP tasks such as base phrase chunking and part-of-speech tagging (Nakagawa et al., 2001 ). The SVM constructs a binary classifier that outputs +1 or \u22121 given a sample vector x \u2208 R n . The decision is based on the separating hyperplane as follows.", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 56, |
|
"text": "(Cortes and Vapnik, 1995)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 238, |
|
"text": "(Nakagawa et al., 2001", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "c(x) = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 +1 if w \u2022 x + b > 0, w \u2208 R n , b \u2208 R, \u22121 otherwise", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The class for an input x, c(x), is determined by seeing which side of the space separated by the hyperplane, w \u2022 x + b = 0, the input lies on.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Given a set of labeled training samples", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "{(y 1 , x 1 ), \u2022 \u2022 \u2022 , (y L , x L )}, x i \u2208 R n , y i \u2208 {+1, \u22121},", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "the SVM training tries to find the optimal hyperplane, i.e., the hyperplane with the maximum margin. Margin is defined as the distance between the hyperplane and the training samples nearest to the hyperplane. Maximizing the margin insists that these nearest samples (support vectors) exist on both sides of the separating hyperplane and the hyperplane lies exactly at the midpoint of these support vectors. This margin maximization tightly relates to the fine generalization power of SVMs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Assuming that |w\u2022x i +b| = 1 at the support vectors without loss of generality, the SVM training can be formulated as the following optimization problem. 3 minimize 1 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "||w|| 2 subject to y i (w \u2022 x i + b) \u2265 1, i = 1, \u2022 \u2022 \u2022 , L.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The solution of this problem is known to be written as follows, using only support vectors and weights for them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "f (x) = w \u2022 x + b= i\u2208S V s y i \u03b1 i x \u2022 x i + b", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In the SVM learning, we can use a function k(x i , x j ) called a kernel function instead of the inner product in the above equation. Introducing a kernel function means mapping an original input x using", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u03a6(x), s.t. \u03a6(x i ) \u2022 \u03a6(x j ) = k(x i , x j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "to another, usually a higher dimensional, feature space. We construct the optimal hyperplane in that space. By using kernel functions, we can construct a non-linear separating surface in the original feature space. Fortunately, such non-linear training does not increase the computational cost if the calculation of the kernel function is as cheap as the inner product. A polynomial function defined as (sx", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "i \u2022 x j + r) d", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "is popular in applications of SVMs to NLPs Yamada et al., 2000; Kudo and Matsumoto, 2001) , because it has an intuitively sound interpretation that each dimension of the mapped space is a 3 For many real-world problems where the samples may be inseparable, we allow the constraints are broken with some penalty. In the experiments, we use so-called 1-norm soft margin formulation described as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 63, |
|
"text": "Yamada et al., 2000;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 64, |
|
"end": 89, |
|
"text": "Kudo and Matsumoto, 2001)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "minimize 1 2 ||w|| 2 + C L i=1 \u03be i subject to y i (w \u2022 x i + b) \u2265 1 \u2212 \u03be i , i = 1, \u2022 \u2022 \u2022 , L, \u03be i \u2265 0, i = 1, \u2022 \u2022 \u2022 , L.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(weighted) conjunction of d features in the original sample.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Support Vector Machines", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As described above, the standard SVM learning constructs a binary classifier. To make a named entity recognition system based on the BIO representation, we require a multi-class classifier. Among several methods for constructing a multi-class SVM (Hsu and Lin, 2002) , we use a pairwise method proposed by Kre\u00dfel (1998) instead of the one-vs-rest method used in (Yamada et al., 2000) , and extend the BIO representation to enable the training with the entire GENIA corpus. Here we describe the one-vs-rest method and the pairwise method to show the necessity of our extension. Both one-vs-rest and pairwise methods construct a multi-class classifier by combining many binary SVMs. In the following explanation, K denotes the number of the target classes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 247, |
|
"end": 266, |
|
"text": "(Hsu and Lin, 2002)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 319, |
|
"text": "Kre\u00dfel (1998)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 362, |
|
"end": 383, |
|
"text": "(Yamada et al., 2000)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Class SVMs", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "one-vs-rest Construct K binary SVMs, each of which determines whether the sample should be classified as class i or as the other classes. The output is the class with the maximum f (x) in Equation 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Class SVMs", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "pairwise Construct K(K \u2212 1)/2 binary SVMs, each of which determines whether the sample should be classified as class i or as class j. Each binary SVM has one vote, and the output is the class with the maximum votes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Class SVMs", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Because the SVM training is a quadratic optimization program, its cost is super-linear to the size of the training samples even with the tailored techniques such as SMO (Platt, 1998) and kernel evaluation caching (Joachims, 1998) . Let L be the number of the training samples, then the one-vs-rest method takes time in K \u00d7 O S V M (L). The BIO formulation produces one training sample per word, and the training with the GENIA corpus involves over 100,000 training samples as can be seen from Table 1. Therefore, it is apparent that the one-vsrest method is impractical with the GENIA corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 182, |
|
"text": "(Platt, 1998)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 229, |
|
"text": "(Joachims, 1998)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Class SVMs", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "On the other hand, if target classes are equally distributed, the pairwise method will take time in", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Class SVMs", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "K(K\u2212 1)/2 \u00d7 O S V M (2L/K).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Class SVMs", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "This method is worthwhile because each training is much faster, though it requires the training of (K \u2212 1)/2 times more classifiers. It is also reported that the pairwise method achieves higher accuracy than other methods in some benchmarks (Kre\u00dfel, 1998; Hsu and Lin, 2002) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 241, |
|
"end": 255, |
|
"text": "(Kre\u00dfel, 1998;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 256, |
|
"end": 274, |
|
"text": "Hsu and Lin, 2002)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-Class SVMs", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "An input x to an SVM classifier is a feature representation of the word to be classified and its context. We use a bit-vector representation, each dimension of which indicates whether the input matches with a certain feature. The following illustrates the wellused features for the named entity recognition task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "w k,i = \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "1 if a word at k,W k , is the ith word in the vocabulary V 0 otherwise (word feature) ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "pos k,i = \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 1 if W k is", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "pc k,i = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "1 if W k (k < 0) was assigned ith class 0 otherwise (preceding class feature)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "In the above definitions, k is a relative word position from the word to be classified. A negative value represents a preceding word's position, and a positive value represents a following word's position. Note that we assume that the classification proceeds left to right as can be seen in the definition of the preceding class feature. For the SVM classification, we does not use a dynamic argmax-type classification such as the Viterbi algorithm, since it is difficult to define a good comparable value for the confidence of a prediction such as probability. The consequences of this limitation will be discussed with the experimental results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Features usually form a group with some variables such as the position unspecified. In this paper, we instantiate all features, i.e., instantiate for all i, for a group and a position. Then, it is convenient to denote a set of features for a group g and a position k as g k (e.g., w k and pos k ). Using this notation, we write a feature set as {w \u22121 , w 0 , pre \u22121 , pre 0 , pc \u22121 }. 4 This feature description derives the following input vector. 5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "x = {w \u22121,1 , w \u22121,2 , \u2022 \u2022 \u2022 , w \u22121,|V| , w 0,1 , \u2022 \u2022 \u2022 , w 0,|V| , pre \u22121,1 , \u2022 \u2022 \u2022 , pre 0,|P| , pc \u22121,1 , \u2022 \u2022 \u2022 , pc \u22121,K }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input Features", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "The Maximum Entropy method, with which we compare our SVM-based method, defines the probability that the class is c given an input vector x as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Recognition Using ME Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "P(c|x) = 1 Z(x) i \u03b1 f i (c,x) i ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Recognition Using ME Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "where Z(x) is a normalization constant, and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Recognition Using ME Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "f i (c, x)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Recognition Using ME Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "is a feature function. A feature function is defined in the same way as the features in the SVM learning, except that it includes c in it like f (c, x) = (c is the jth class) \u2227 w i,k (x). If x contains previously assigned classes, then the most probable class sequence,\u0109", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Recognition Using ME Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "T 1 = argmax c 1 ,\u2022\u2022\u2022 ,c T T t=1 P(c t |x t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Recognition Using ME Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "is searched by using the Viterbi-type algorithm. We use the maximum entropy tagging method described in (Kazama et al., 2001) for the experiments, which is a variant of (Ratnaparkhi, 1996) modified to use HMM state features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 125, |
|
"text": "(Kazama et al., 2001)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 188, |
|
"text": "(Ratnaparkhi, 1996)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named Entity Recognition Using ME Model", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In Section 3.3, we described that if target classes are equally distributed, the pairwise method will reduce the training cost. In our case, however, we have a very unbalanced class distribution with a large number of samples belonging to the class \"O\" (see Table 1 ). This leads to the same situation with the one-vsrest method, i.e., if L O is the number of the samples belonging to the class \"O\", then the most dominant part of the training takes time in", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 258, |
|
"end": 266, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Class Splitting Technique", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "K \u00d7 O S V M (L O ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Class Splitting Technique", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "One solution to this unbalanced class distribution problem is to split the class \"O\" into several subclasses effectively. This will reduce the training cost for the same reason that the pairwise method works.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Class Splitting Technique", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose to split the non-entity class according to part-of-speech (POS) information of the word. That is, given a part-of-speech tag set POS, we produce new |POS| classes, \"Op\" p \u2208 POS. Since we use a POS tagger that outputs 45 Penn Treebank's POS tags in this paper, we have new 45 sub-classes which correspond to nonentity regions such as \"O-NNS\" (plural nouns), \"O-JJ\" (adjectives), and \"O-DT\" (determiners).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Class Splitting Technique", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Splitting by POS information seems useful for improving the system accuracy as well, because in the named entity recognition we must discriminate between nouns in named entities and nouns in ordinal noun phrases. In the experiments, we show this class splitting technique not only enables the feasible training but also improves the accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Class Splitting Technique", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In addition to the standard features, we explore word cache feature and HMM state feature, mainly to solve the data sparseness problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Although the GENIA corpus is the largest annotated corpus for the biomedical domain, it is still small compared with other linguistic annotated corpora such as the Penn Treebank. Thus, the data sparseness problem is severe, and must be treated carefully. Usually, the data sparseness is prevented by using more general features that apply to a broader set of instances (e.g., disjunctions). While polynomial kernels in the SVM learning can effectively generate feature conjunctions, kernel functions that can effectively generate feature disjunctions are not known. Thus, we should explicitly add dimensions for such general features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The word cache feature is defined as the disjunction of several word features as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "wc k{k 1 ,\u2022\u2022\u2022 ,k n },i \u2261 \u2228 k\u2208k w k,i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We intend that the word cache feature captures the similarities of the patterns with a common key word such as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "(a) \"human W \u22122 W \u22121 W 0 \" and \"human W \u22121 W 0 \" (b) \"W 0 gene\" and \"W 0 W 1 gene\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We use a left word cache defined as lwc k,i \u2261 wc {\u2212k,\u2022\u2022\u2022 ,0},i , and a right word cache defined as rwc k,i \u2261 wc {1,\u2022\u2022\u2022 ,k},i for patterns like (a) and (b) in the above example respectively. Kazama et al. (2001) proposed to use as features the Viterbi state sequence of a hidden Markov model (HMM) to prevent the data sparseness problem in the maximum entropy tagging model. An HMM is trained with a large number of unannotated texts by using an unsupervised learning method. Because the number of states of the HMM is usually made smaller than |V|, the Viterbi states give smoothed but maximally informative representations of word patterns tuned for the domain, from which the raw texts are taken.", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 210, |
|
"text": "Kazama et al. (2001)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The HMM feature is defined in the same way as the word feature as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "hmm k,i = \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "1 if the Viterbi state for W k is the ith state in the HMM's states H 0 otherwise (HMM feature)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In the experiments, we train an HMM using raw MEDLINE abstracts in the GENIA corpus, and show that the HMM state feature can improve the accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM Features", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Towards practical named entity recognition using SVMs, we have tackled the following implementation issues. It would be impossible to carry out the experiments in a reasonable time without such efforts. Parallel Training: The training of pairwise SVMs has trivial parallelism, i.e., each SVM can be trained separately. Since computers with two or more CPUs are not expensive these days, parallelization is very practical solution to accelerate the training of pairwise SVMs. Fast Winner Finding: Although the pairwise method reduces the cost of training, it greatly increases the number of classifications needed to determine the class of one sample. For example, for our experiments using the GENIA corpus, the BIO representation with class splitting yields more than 4,000 classification pairs. Fortunately, we can stop classifications when a class gets K \u2212 1 votes and this stopping greatly saves classification time (Kre\u00dfel, 1998) . Moreover, we can stop classifications when the current votes of a class is greater than the others' possible votes. Support Vector Caching: In the pairwise method, though we have a large number of classifiers, each classifier shares some support vectors with other classifiers. By storing the bodies of all support vectors together and letting each classifier have only the weights, we can greatly reduce the size of the classifier. The sharing of support vectors also can be exploited to accelerate the classification by caching the value of the kernel function between a support vector and a classifiee sample.", |
|
"cite_spans": [ |
|
{ |
|
"start": 920, |
|
"end": 934, |
|
"text": "(Kre\u00dfel, 1998)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Issues", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "To conduct experiments, we divided 670 abstracts of the GENIA corpus (Ver. 1.1) into the training part (590 abstracts; 4,487 sentences; 133,915 words) and the test part (80 abstracts; 622 sentences; 18,211 words). 6 Texts are tokenized by using Penn Treebank's tokenizer. An HMM for the HMM state features was trained with raw abstracts of the GENIA corpus (39,116 sentences). 7 The number of states is 160. The vocabulary for the word feature is constructed by taking the most frequent 10,000 words from the above raw abstracts, the prefix/suffix/prefix list by taking the most frequent 10,000 prefixes/suffixes/substrings. 8 The performance is measured by precision, recall, and F-score, which are the standard measures for the named entity recognition. Systems based on the BIO representation may produce an inconsistent class sequence such as \"O B-DNA I-RNA O\". We interpret such outputs as follows: once a named entity starts with \"B-C\" then we interpret that the named entity with class \"C\" ends only when we see another \"B-\" or \"O-\" tag.", |
|
"cite_spans": [ |
|
{ |
|
"start": 377, |
|
"end": 378, |
|
"text": "7", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 625, |
|
"end": 626, |
|
"text": "8", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We have implemented SMO algorithm (Platt, 1998) and techniques described in (Joachims, 1998) for soft margin SVMs in C++ programming language, and implemented support codes for pairwise classification and parallel training in Java programming language. To obtain POS information required for features and class splitting, we used an English POS tagger described in (Kazama et al., 2001 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 47, |
|
"text": "(Platt, 1998)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 76, |
|
"end": 92, |
|
"text": "(Joachims, 1998)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 365, |
|
"end": 385, |
|
"text": "(Kazama et al., 2001", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "First, we show the effect of the class splitting described in Section 5.1. Varying the size of training data, we compared the change in the training time and the accuracy with and without the class splitting. We used a feature set { w, pre, suf , sub, pos [\u22122,\u2022\u2022\u2022 ,2] , pc [\u22122,\u22121] } and the inner product kernel. 9 The training time was measured on a machine with four 700MHz PentiumIIIs and 16GB RAM. Table 2 shows the results of the experiments. Figure 1 shows the results graphically. We can see that without splitting we soon suffer from super-linearity of the SVM training, while with splitting we can handle the training with over 100,000 samples in a reasonable time. It is very important that the splitting technique does not sacrifice the accuracy for speed, rather improves the accuracy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 256, |
|
"end": 267, |
|
"text": "[\u22122,\u2022\u2022\u2022 ,2]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 402, |
|
"end": 409, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 448, |
|
"end": 456, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Class Splitting Technique", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In this experiment, we see the effect of the word cache feature and the HMM state feature described in Section 3.4. The effect is assessed by the accuracy gain observed by adding each feature set to a base feature set and the accuracy degradation observed by subtracting it from a (com- w, pre, suf , sub, pos, hmm [\u2212k,\u2022\u2022\u2022 ,k] , lwc k , rwc k , pc [\u22122,\u22121] } with k = 2 and k = 3 respectively. The kernel function is the inner product. We can see that word cache and HMM state features surely improve the recognition accuracy. In the table, we also included the accuracy change for other standard features. Preceeding classes and suffixes are definitely helpful. On the other hand, the substring feature is not effective in our setting. Although the effects of part-of-speech tags and prefixes are not so definite, it can be said that they are practically effective since they show positive effects in the case of the maximum performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 287, |
|
"end": 326, |
|
"text": "w, pre, suf , sub, pos, hmm [\u2212k,\u2022\u2022\u2022 ,k]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Cache and HMM State Features", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "In this set of experiments, we compare our SVM-based system with a named entity recognition system based on the Maximum Entropy method. For the SVM system, we used the feature set { w, pre, suf , pos, hmm [\u22123,\u2022\u2022\u2022 ,3] , lwc 3 , rwc 3 , pc [\u22122,\u22121] }, which is shown to be the best in the previous experiment. The compared system is a maximum entropy tagging model described in (Kazama et al., 2001) . Though it supports several character type features such as number and hyphen and some conjunctive features such as word n-gram, we do not use these features to compare the performance under as close a condition as possible. The feature set used in the maximum entropy system is expressed as { w, pre, suf , pos, hmm [\u22122,\u2022\u2022\u2022 ,2] , pc [\u22122,\u22121] }. 10 Both systems use the BIO representation with splitting. Table 4 shows the accuracies of both systems. For the SVM system, we show the results with the inner product kernel and several polynomial kernels. The row \"All (id)\" shows the accuracy from the view- point of the identification task, which only finds the named entity regions. The accuracies for several major entity classes are also shown. The SVM system with the 2-dimensional polynomial kernel achieves the highest accuracy. This comparison may be unfair since a polynomial kernel has the effect of using conjunctive features, while the ME system does not use such conjunctive features. Nevertheless, the facts: we can introduce the polynomial kernel very easily; there are very few parameters to be tuned; 11 we could achieve the higher accuracy; show an advantage of the SVM system.", |
|
"cite_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 216, |
|
"text": "[\u22123,\u2022\u2022\u2022 ,3]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 396, |
|
"text": "(Kazama et al., 2001)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 715, |
|
"end": 726, |
|
"text": "[\u22122,\u2022\u2022\u2022 ,2]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 802, |
|
"end": 809, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison with the ME Method", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "It will be interesting to discuss why the SVM systems with the inner product kernel (and the polynomial kernel with d = 1) are outperformed by the ME system. We here discuss two possible reasons. The first is that the SVM system does not use a dynamic decision such as the Viterbi algorithm, while the ME system uses it. To see this, we degrade the ME system so that it predicts the classes deterministically without using the Viterbi algorithm. We found that this system only marks 51.54 in F-score. Thus, it can be said that a dynamic decision is important for this named entity task. However, although a method to convert the outputs of a binary SVM to probabilistic values is proposed (Platt, 1999) , the way to obtain meaningful probabilistic values needed in Viterbitype algorithms from the outputs of a multi-class SVM is unknown. Solving this problem is certainly a part of the future work. The second possible reason is that the SVM system in this paper does not use any cut-off or feature truncation method to remove data noise, while the ME system uses a simple feature cut-off method. 12 We observed that the ME system without the cut-off only marks 49.11 in F-score. Thus, such a noise reduction method is also important. However, the cut-off method for the ME method cannot be applied without modification since, as described in Section 3.4, the definition of the features are different in the two approaches. It can be said the features in the ME method is \"finer\" than those in SVMs. In this sense, the ME method allows us more flexible feature selection. This is an advantage of the ME method.", |
|
"cite_spans": [ |
|
{ |
|
"start": 689, |
|
"end": 702, |
|
"text": "(Platt, 1999)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1097, |
|
"end": 1099, |
|
"text": "12", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with the ME Method", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "The accuracies achieved by both systems can be said high compared with those of the previous methods if we consider that we have 24 named entity classes. However, the accuracies are not sufficient for a practical use. Though higher accuracy will be achieved with a larger annotated corpus, we should also explore more effective features and find effective feature combination methods to exploit such a large corpus maximally.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison with the ME Method", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "We have described the use of Support Vector Machines for the biomedical named entity recognition task. To make the training of SVMs with the GE-NIA corpus practical, we proposed to split the nonentity class by using POS information. In addition, we explored the new types of features, word cache and HMM states, to avoid the data sparseness problem. In the experiments, we have shown that the class splitting technique not only makes training feasible but also improves the accuracy. We have also shown that the proposed new features also improve the accuracy and the SVM system with the polynomial kernel function outperforms the ME-based system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We will further compress this as { w, pre[\u22121,0] , pc \u22121 }. 5 Although a huge number of features are instantiated, only a few features have value one for a given g and k pair.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Randomly selected set used in(Shimpuku, 2002). We do not use paper titles, while he used.7 These do not include the sentences in the test part. 8 These are constructed using the training part to make the comparison with the ME method fair.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Soft margin constant C is 1.0 throughout the experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "When the width becomes [\u22123, \u2022 \u2022 \u2022 , 3], the accuracy degrades (53.72 to 51.73 in F-score).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "C, s, r, and d 12 Features that occur less than 10 times are removed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Dr. Jin-Dong Kim for providing us easy-to-use preprocessed training data. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "polynomial (s = 0.01, r = 1.0)) type(2,782) 50.7 / 49.8 / 50.2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ME inner product", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A maximum entropy approach to natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Berger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Della Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Della Pietra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Computational Linguistics", |
|
"volume": "22", |
|
"issue": "1", |
|
"pages": "39--71", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. L. Berger, S. A. Della Pietra, and V. J. Della Pietra. 1996. A maximum entropy approach to natural language processing. Computational Linguistics, 22(1):39-71.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Extracting the names of genes and gene products with a hidden Markov model", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Collier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Nobata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proc. of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "201--207", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N. Collier, C. Nobata, and J. Tsujii. 2000. Extracting the names of genes and gene products with a hidden Markov model. In Proc. of COLING 2000, pages 201-207.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Support vector networks. Machine Learning", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Cortes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "273--297", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Cortes and V. Vapnik. 1995. Support vector networks. Ma- chine Learning, 20:273-297.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A comparison of methods for multiclass Support Vector Machines", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Hsu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "IEEE Transactions on Neural Networks", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Hsu and C. Lin. 2002. A comparison of methods for multi- class Support Vector Machines. In IEEE Transactions on Neural Networks. to appear.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Making large-scale support vector machine learning practical", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Advances in Kernel Methods", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "169--184", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Joachims. 1998. Making large-scale support vector machine learning practical. In Advances in Kernel Methods, pages 169-184. The MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A maximum entropy tagger with unsupervised hidden markov models", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Kazama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Miyao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proc. of the 6th NLPRS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "333--340", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Kazama, Y. Miyao, and J. Tsujii. 2001. A maximum entropy tagger with unsupervised hidden markov models. In Proc. of the 6th NLPRS, pages 333-340.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "XMLbased linguistic annotation of corpus", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Tateisi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Mima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proc. of the First NLP and XML Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Kim, T. Ohta, Y. Tateisi, H. Mima, and J. Tsujii. 2001. XML- based linguistic annotation of corpus. In Proc. of the First NLP and XML Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Pairwise classification and support vector machines", |
|
"authors": [ |
|
{ |
|
"first": "U", |
|
"middle": [], |
|
"last": "Kre\u00dfel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Advances in Kernel Methods", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "255--268", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "U. Kre\u00dfel. 1998. Pairwise classification and support vector machines. In Advances in Kernel Methods, pages 255-268. The MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Use of support vector learning for chunk identification", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proc. of CoNLL-2000 and LLL-2000", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Kudo and Y. Matsumoto. 2000. Use of support vector learn- ing for chunk identification. In Proc. of CoNLL-2000 and LLL-2000.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Chunking with Support Vector Machines", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proc. of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "192--199", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Kudo and Y. Matsumoto. 2001. Chunking with Support Vector Machines. In Proc. of NAACL 2001, pages 192-199.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Unknown word guessing and part-of-speech tagging using support vector machines", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Nakagawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kudoh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proc. of the 6th NLPRS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "325--331", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Nakagawa, T. Kudoh, and Y. Matsumoto. 2001. Unknown word guessing and part-of-speech tagging using support vec- tor machines. In Proc. of the 6th NLPRS, pages 325-331.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Automatic term identification and classification in biology texts", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Nobata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Collier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proc. of the 5th NLPRS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "369--374", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Nobata, N. Collier, and J. Tsujii. 1999. Automatic term identification and classification in biology texts. In Proc. of the 5th NLPRS, pages 369-374.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Comparison between tagged corpora for the named entity task", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Nobata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Collier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proc. of the Workshop on Comparing Corpora (at ACL'2000)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "20--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Nobata, N. Collier, and J. Tsujii. 2000. Comparison between tagged corpora for the named entity task. In Proc. of the Workshop on Comparing Corpora (at ACL'2000), pages 20- 27.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Automatic construction of knowledge base from biological papers", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Yamamoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Okazaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Uchiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Takagi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proc. of the 5th ISMB", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "218--225", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Ohta, Y. Yamamoto, T. Okazaki, I. Uchiyama, and T. Tak- agi. 1997. Automatic construction of knowledge base from biological papers. In Proc. of the 5th ISMB, pages 218-225.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "The GENIA corpus: An annotated research abstract corpus in molecular biology domain", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ohta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Tateisi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Mima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsujii", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. of HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Ohta, Y. Tateisi, J. Kim, H. Mima, and Tsujii J. 2002. The GENIA corpus: An annotated research abstract corpus in molecular biology domain. In Proc. of HLT 2002.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Fast training of support vector machines using sequential minimal optimization", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Platt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Advances in Kernel Methods", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "185--208", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. C. Platt. 1998. Fast training of support vector machines us- ing sequential minimal optimization. In Advances in Kernel Methods, pages 185-208. The MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Probabilistic outputs for support vector machines and comparisons to regularized likelihood methods", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Platt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Advances in Large Margin Classifiers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. C. Platt. 1999. Probabilistic outputs for support vector ma- chines and comparisons to regularized likelihood methods. Advances in Large Margin Classifiers.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A pragmatic information extraction strategy for gathering data on genetic interactions", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Proux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Prechenmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Julliard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proc. of the 8th ISMB", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "279--285", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Proux, F. Prechenmann, and L. Julliard. 2000. A pragmatic information extraction strategy for gathering data on genetic interactions. In Proc. of the 8th ISMB, pages 279-285.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Text chunking using transformation-based learning", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Ramshaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Marcus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proc. of the 3rd ACL Workshop on Very Large Corpora", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. A. Ramshaw and M. P. Marcus. 1995. Text chunking us- ing transformation-based learning. In Proc. of the 3rd ACL Workshop on Very Large Corpora.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A maximum entropy model for partof-speech tagging", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ratnaparkhi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proc. of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "133--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Ratnaparkhi. 1996. A maximum entropy model for part- of-speech tagging. In Proc. of the Conference on Empirical Methods in Natural Language Processing, pages 133-142.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A medical/biological term recognizer with a term hidden Markov model incorporating multiple information sources. A master thesis", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Shimpuku", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Shimpuku. 2002. A medical/biological term recognizer with a term hidden Markov model incorporating multiple infor- mation sources. A master thesis. University of Tokyo.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Named entity extraction based on a maximum entropy model and transformation rules", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Uchimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Murata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ozaku", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Isahara", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proc. of the 38th ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "326--335", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Uchimoto, M. Murata, Q. Ma, H. Ozaku, and H. Isahara. 2000. Named entity extraction based on a maximum entropy model and transformation rules. In Proc. of the 38th ACL, pages 326-335.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "The Nature of Statistical Learning Theory", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. Vapnik. 1995. The Nature of Statistical Learning Theory. Springer Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Event extraction from biomedical papers using a full parser", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Yakushiji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Tateisi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Miyao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proc. of PSB", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "408--419", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Yakushiji, Y. Tateisi, Y. Miyao, and J. Tsujii. 2001. Event extraction from biomedical papers using a full parser. In Proc. of PSB 2001, pages 408-419.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Using substrings for technical term extraction and classification", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Yamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "IPSJ SIGNotes", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "77--84", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Yamada, T. Kudo, and Y. Matsumoto. 2000. Using sub- strings for technical term extraction and classification. IPSJ SIGNotes, (NL-140):77-84. (in Japanese).", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Effect of the class splitting technique." |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td># of sentences</td><td>5,109</td></tr><tr><td># of words</td><td>152,216</td></tr><tr><td># of named entities</td><td>23,793</td></tr><tr><td># of words in NEs</td><td>50,229</td></tr><tr><td># of words not in NEs</td><td>101,987</td></tr><tr><td colspan=\"2\">Av. length of NEs (\u03c3) 2.11 (1.40)</td></tr></table>", |
|
"text": "Basic statistics of the GENIA corpus" |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td/><td/><td>no splitting</td><td colspan=\"2\">splitting</td></tr><tr><td colspan=\"2\">training time</td><td>acc.</td><td>time</td><td>acc.</td></tr><tr><td colspan=\"2\">samples (sec.)</td><td>(F-score)</td><td>(sec.)</td><td>(F-</td></tr><tr><td/><td/><td/><td/><td>score)</td></tr><tr><td>16,000</td><td colspan=\"2\">2,809 37.04</td><td colspan=\"2\">5,581 36.82</td></tr><tr><td colspan=\"3\">32,000 13,614 40.65</td><td colspan=\"2\">9,175 41.36</td></tr><tr><td colspan=\"3\">48,000 21,174 42.44</td><td colspan=\"2\">9,709 42.49</td></tr><tr><td colspan=\"3\">64,000 40,869 42.52</td><td colspan=\"2\">12,502 44.34</td></tr><tr><td>96,000 -</td><td/><td>-</td><td colspan=\"2\">21,922 44.93</td></tr><tr><td>128,000 -</td><td/><td>-</td><td colspan=\"2\">36,846 45.99</td></tr></table>", |
|
"text": "Training time and accuracy with/without the class splitting technique. The number of training samples includes SOS and EOS (special words for the start/end of a sentence)." |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"content": "<table><tr><td>feature set</td><td>(A) adding</td><td colspan=\"2\">(B) sub. (k=2) (C) sub. (k=3)</td></tr><tr><td>Base</td><td>42.86</td><td>47.82</td><td>49.27</td></tr><tr><td>Left cache</td><td colspan=\"2\">43.25 (+0.39) 47.77 (-0.05)</td><td>49.02 (-0.25)</td></tr><tr><td colspan=\"2\">Right cache 42.34 (-0.52)</td><td>47.81 (-0.01)</td><td>49.07 (-0.20)</td></tr><tr><td colspan=\"3\">HMM state 44.70 (+1.84) 47.25 (-0.57)</td><td>48.03 (-1.24)</td></tr><tr><td>POS</td><td colspan=\"3\">44.82 (+1.96) 48.29 (+0.47) 48.75 (-0.52)</td></tr><tr><td>Prec. class</td><td colspan=\"2\">44.58 (+1.72) 43.32 (-4.50)</td><td>43.84 (-5.43)</td></tr><tr><td>Prefix</td><td>42.77 (-0.09)</td><td colspan=\"2\">48.11 (+0.29) 48.73 (-0.54)</td></tr><tr><td>Suffix</td><td colspan=\"2\">45.88 (+3.02) 47.07 (-0.75)</td><td>48.48 (-0.79)</td></tr><tr><td>Substring</td><td>42.16 (-0.70)</td><td colspan=\"2\">48.38 (+0.56) 50.23 (+0.96)</td></tr><tr><td colspan=\"4\">plete) base set. The first column (A) in Ta-</td></tr><tr><td colspan=\"4\">ble 3 shows an adding case where the base fea-ture set is {w [\u22122,\u2022\u2022\u2022 ,2] }. The columns (B) and</td></tr><tr><td colspan=\"4\">(C) show subtracting cases where the base feature set is {</td></tr></table>", |
|
"text": "Effect of each feature set assessed by adding/subtracting (F-score). Changes in bold face means positive effect." |
|
} |
|
} |
|
} |
|
} |