|
{ |
|
"paper_id": "W06-0124", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T04:00:39.822625Z" |
|
}, |
|
"title": "Boosting for Chinese Named Entity Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Xiaofeng", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Science and Technology Clear Water Bay", |
|
"location": { |
|
"settlement": "Hong Kong" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Marine", |
|
"middle": [], |
|
"last": "Carpuat", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Science and Technology Clear Water Bay", |
|
"location": { |
|
"settlement": "Hong Kong" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Dekai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Science and Technology Clear Water Bay", |
|
"location": { |
|
"settlement": "Hong Kong" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We report an experiment in which a highperformance boosting based NER model originally designed for multiple European languages is instead applied to the Chinese named entity recognition task of the third SIGHAN Chinese language processing bakeoff. Using a simple characterbased model along with a set of features that are easily obtained from the Chinese input strings, the system described employs boosting, a promising and theoretically well-founded machine learning method to combine a set of weak classifiers together into a final system. Even though we did no other Chinese-specific tuning, and used only one-third of the MSRA and CityU corpora to train the system, reasonable results are obtained. Our evaluation results show that 75.07 and 80.51 overall F-measures were obtained on MSRA and CityU test sets respectively.", |
|
"pdf_parse": { |
|
"paper_id": "W06-0124", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We report an experiment in which a highperformance boosting based NER model originally designed for multiple European languages is instead applied to the Chinese named entity recognition task of the third SIGHAN Chinese language processing bakeoff. Using a simple characterbased model along with a set of features that are easily obtained from the Chinese input strings, the system described employs boosting, a promising and theoretically well-founded machine learning method to combine a set of weak classifiers together into a final system. Even though we did no other Chinese-specific tuning, and used only one-third of the MSRA and CityU corpora to train the system, reasonable results are obtained. Our evaluation results show that 75.07 and 80.51 overall F-measures were obtained on MSRA and CityU test sets respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Named entity recognition (NER), which includes the identification and classification of certain proper nouns, such as person names, organizations, locations, temporal, numerical and monetary phrases, plays an important part in many natural language processing applications, such as machine translation, information retrieval, information extraction and question answering. Much of the NER research was pioneered in the MUC/DUC and Multilingual Entity Task (MET) evaluations, as a result of which significant progress has been made and many NER systems of fairly high accuracy have been constructed. In addition, the shared tasks of CoNLL-2002 and CoNLL-2003 helped spur the development toward more language-independent NER systems, by evaluating four types of entities (people, locations, organizations and names of miscellaneous entities) in English, German, Dutch and Spanish.", |
|
"cite_spans": [ |
|
{ |
|
"start": 632, |
|
"end": 646, |
|
"text": "CoNLL-2002 and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 647, |
|
"end": 657, |
|
"text": "CoNLL-2003", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, these are all European languages, and Chinese NER appears to be significantly more challenging in a number of important respects. We believe some of the main reasons to be as follows: (1) Unlike European languages, Chinese lacks capitalization information which plays a very important role in identifying named entities. (2) There is no space between words in Chinese, so ambiguous segmentation interacts with NER decisions. Consequently, segmentation errors will affect the NER performance, and vice versa. (3) Unlike European languages, Chinese allows an open vocabulary for proper names of persons, eliminating another major source of explicit clues used by European language NER models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper presents a system that introduces boosting to Chinese named entity identification and classification. Our primary aim was to conduct a controlled experiment to test how well the boosting based models we designed for European languages would fare on Chinese, without major modeling alterations to accommodate Chinese. We evaluated the system using data from the third SIGHAN Chinese language processing bakeoff, the goal of which was to perform NER on three types of named entities: PERSON, LO-CATION and ORGANIZATION. 1 Three training corpora from MSRA, CityU and LDC were given. The MSRA and LDC corpora were simplified Chinese texts while the CityU corpus was traditional Chinese. In addition, the competition also specified open and closed tests. In the open test, the participants may use any other material including material from other training corpora, proprietary dictionaries, and material from the Web besides the given training corpora. In the closed test, the participants can only use the three training corpora. No other material or knowledge is allowed, including part-of-speech (POS) information, externally generated word-frequency counts, Arabic and Chinese numbers, feature characters for place names, common Chinese surnames, and so on.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The approach we used is based on selecting a number of features, which are used to train several weak classifiers. Using boosting, which has been shown to perform well on other NLP problems and is a theoretically well-founded method, the weak classifiers are then combined to perform a strong classifier.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The main idea behind the boosting algorithm is that a set of many simple and moderately accurate weak classifiers (also called weak hypotheses) can be effectively combined to yield a single strong classifier (also called the final hypothesis). The algorithm works by training weak classifiers sequentially whose classification accuracy is slightly better than random guessing and finally combining them into a highly accurate classifier. Each weak classifier searches for the hypothesis in the hypotheses space that can best classify the current set of training examples. Based on the evaluation of each iteration, the algorithm reweights the training examples, forcing the newly generated weak classifier to give higher weights to the examples that are misclassified in the previous iteration. The boosting algorithm was originally created to deal with binary classification in supervised learning. The boosting algorithm is simple to implement, does feature selection resulting in a relatively simple classifier, and has fairly good generalization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boosting", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Based on the boosting framework, our system uses the AdaBoost.MH algorithm (Schapire and Singer, 1999) as shown in Figure 1 , an n-ary classification variant of the original well-known binary AdaBoost algorithm (Freund and Schapire, 1997) . The original AdaBoost algorithm was designed for the binary classification problem but did not fulfill the requirements of the Chinese NER Input: A training set Tr = {< d1, C1 >, . . . , < dg, Cg >} where Cj \u2286 C = {c1, ..., c m } for all j = 1, . . . , g.", |
|
"cite_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 102, |
|
"text": "(Schapire and Singer, 1999)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 238, |
|
"text": "(Freund and Schapire, 1997)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 115, |
|
"end": 123, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Boosting", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Output: A final hypothesis \u03a6(d, c) = S s=1 \u03b1s\u03a6s(d, c). Algorithm: LetD1(dj, ci) = 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boosting", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "mg for all j = 1, . . . , g and for all i = 1, . . . , m. For s = 1, . . . , S do:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boosting", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 pass distribution Ds(dj, ci)to the weak classifier;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boosting", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 derive the weak hypothesis \u03a6s from the weak classifier;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boosting", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 choose \u03b1s \u2208 R;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boosting", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "\u2022 set Ds+1(dj, ci) = task. AdaBoost.MH has shown its usefulness on standard machine learning tasks through extensive theoretical and empirical studies, where different standard machine learning methods have been used as the weak classifier (e.g., Bauer and Kohavi (1999) , Opitz and Maclin (1999) , Schapire (2002) ). It also performs well on a number of natural language processing problems, including text categorization (e.g., Schapire and Singer (2000) , Sebastiani et al. (2000) ) and word sense disambiguation (e.g., Escudero et al. (2000) ). In particular, it has also been demonstrated that boosting can be used to build language-independent NER models that perform exceptionally well (Wu et al. (2002) , Wu et al. (2004) , Carreras et al. (2002) ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 247, |
|
"end": 270, |
|
"text": "Bauer and Kohavi (1999)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 273, |
|
"end": 296, |
|
"text": "Opitz and Maclin (1999)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 314, |
|
"text": "Schapire (2002)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 430, |
|
"end": 456, |
|
"text": "Schapire and Singer (2000)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 483, |
|
"text": "Sebastiani et al. (2000)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 523, |
|
"end": 545, |
|
"text": "Escudero et al. (2000)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 693, |
|
"end": 710, |
|
"text": "(Wu et al. (2002)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 713, |
|
"end": 729, |
|
"text": "Wu et al. (2004)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 732, |
|
"end": 754, |
|
"text": "Carreras et al. (2002)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boosting", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Ds(d j ,c i )exp(\u2212\u03b1sC j [c i ]\u03a6s(d j ,c i )) Zs where Zs = m i=1 g j=1 Ds(dj, ci )exp( \u2212 \u03b1sCj [ci] \u03a6s(dj, ci)) is a normalization factor chosen so that m i=1 g j=1 D s+1 (dj, ci) = 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boosting", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The weak classifiers used in the boosting algorithm come from a wide range of machine learning methods. We have chosen to use a simple classifier called a decision stump in the algorithm. A decision stump is basically a one-level decision tree where the split at the root level is based on a specific attribute/value pair. For example, a possible attribute/value pair could be W 2 = \u2122/.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Boosting", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In order to implement the boosting/decision stumps, we used the publicly available software AT&T BoosTexter (Schapire and Singer, 2000) , which implements boosting on top of decision stumps. For preprocessing we used an off-theshelf Chinese lexical analysis system, the open source ICTCLAS (Zhang et al., 2003) , to segment and POS tag the training and test corpora.", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 135, |
|
"text": "(Schapire and Singer, 2000)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 290, |
|
"end": 310, |
|
"text": "(Zhang et al., 2003)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment Details", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The training corpora provided by the SIGHAN bakeoff organizers were in the CoNLL two column format, with one Chinese character per line and hand-annotated named entity chunks in the second column.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Preprocessing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In order to provide basic features for training the decision stumps, the training corpora were segmented and POS tagged by ICTCLAS, which labels Chinese words using a set of 39 tags. This module employs a hierarchical hidden Markov model (HHMM) and provides word segmentation, POS tagging and unknown word recognition. It performs reasonably well, with segmentation precision recently evaluated at 97.58%. 2 The recall rate of unknown words using role tagging was over 90%.", |
|
"cite_spans": [ |
|
{ |
|
"start": 406, |
|
"end": 407, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Preprocessing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We note that about 200 words in each training corpora remained untagged. For these words we simply assigned the most frequently occurring tags in each training corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Preprocessing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The boosting/decision stumps were able to accommodate a large number of features. The primitive features we used were:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 The current character and its POS tag.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 The characters within a window of 2 characters before and after the current character.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 The POS tags within a window of 2 characters before and after the current character.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 The chunk tags (gold standard named entity label during the training) of the previous two characters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The chunk tag is the BIO representation, which was employed in the CoNLL-2002 and CoNLL-2003 evaluations. In this representation, each character is tagged as either the beginning of a named entity (B tag), a character inside a named entity (I tag), or a character outside a named entity (O tag).", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 81, |
|
"text": "CoNLL-2002 and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 82, |
|
"end": 105, |
|
"text": "CoNLL-2003 evaluations.", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "When we used conjunction features, we found that they helped the NER performance significantly. The conjunction features used are basically conjunctions of 2 consecutive characters and 2 consecutive POS tags. We also found that a larger context window (3 characters instead of 2 before and after the current character) to be quite helpful to performance. Apart from the training and test corpora, we considered the gazetteers from LDC which contain about 540K persons, 242K locations and 98K organization names. Named entities in the training corpora which appeared in the gazetteers were identified lexically or by using a maximum forward match algorithm. Once named entities have been identified, each character can then be annotated with an NE chunk tag. The boosting learner can view the NE chunk tag as an additional feature. Here we used binary gazetteer features. If the character was annotated with an NE chunk tag, its gazetteer feature was set to 1; otherwise it was set to 0. However we found that adding binary gazetteer features does not significantly help the performance when conjunction features were used. In fact, it actually hurt the performance slightly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The features used in the final experiments were:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 The current character and its POS tag.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 The characters within a window of 3 characters before and after the current character.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 The POS tags within a window of 3 characters before and after the current character.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 A small set of conjunctions of POS tags and characters within a window of 3 characters of the current character.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 The BIO chunk tags of the previous 3 characters. Table 1 presents the results obtained on the MSRA and CityU development test set. Table 2 presents the results obtained on the MSRA, CityU and LDC test sets. These numbers greatly underrepresent what could be expected from the boosting model, since we only used one-third of MSRA and CityU training corpora due to limitations of the boosting software. Another problem for the LDC corpus was training/testing mismatch: we did not train any models at all with the LDC training corpus, which was the only training set annontated with geopolitical entities (GPE). Instead, for the LDC test set, we simply used the system trained on the MSRA corpus. Thus, when we consider the geopolitical entity (GPE), our low overall Fmeasure on the LDC test set cannot be interpreted meaningfully. 3 Even so, using only one-third of the training data, the results on the MSRA and CityU test sets are reasonable: 75.07 and 80.51 overall F-measures were obtained on the MSRA and CityU test sets, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 58, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 133, |
|
"end": 140, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We have described an experiment applying a boosting based NER model originally designed 3 Our LDC test result was scored twice by the organizer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 89, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "for multiple European languages instead to the Chinese named entity recognition task. Even though we only used one-third of the MSRA and CityU corpora to train the system, the model produced reasonable results, obtaining 75.07 and 80.51 overall F-measures on MSRA and CityU test sets respectively. Having established this baseline for comparison against our multilingual European language boosting based NER models, our next step will be to incorporate Chinese-specific attributes into the model to compare with.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Except in the LDC corpus, which contains four types of entities: PERSON, LOCATION, ORGANIZATION and GEOPOLITICAL.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Results from the recent official evaluation in the national 973 project.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "An empirical comparison of voting classification algorithms: Bagging, boosting, and variants", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Kohavi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Machine Learning", |
|
"volume": "36", |
|
"issue": "", |
|
"pages": "105--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Bauer and Ron Kohavi. An empirical comparison of voting classification algorithms: Bagging, boosting, and variants. Machine Learning, 36:105-142, 1999.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Named entity extraction using AdaBoost", |
|
"authors": [ |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Carreras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "Padr\u00f3", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Computational Natural Language Learning (CoNLL-2002)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "171--174", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xavier Carreras, Llu\u00eds M\u00e0rquez, and Llu\u00eds Padr\u00f3. Named en- tity extraction using AdaBoost. In Computational Natural Language Learning (CoNLL-2002), at COLING-2002, pages 171-174, Taipei, Sep 2002.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Boosting applied to word sense disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Gerard", |
|
"middle": [], |
|
"last": "Escudero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "German", |
|
"middle": [], |
|
"last": "Rigau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "11th European Conference on Machine Learning (ECML-00)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "129--141", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gerard Escudero, Llu\u00eds M\u00e0rquez, and German Rigau. Boost- ing applied to word sense disambiguation. In 11th Euro- pean Conference on Machine Learning (ECML-00), pages 129-141, 2000.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A decision-theoretic generalization of on-line learning and an application to boosting", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Freund", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Schapire", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Computer and System Sciences", |
|
"volume": "55", |
|
"issue": "1", |
|
"pages": "119--139", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Freund and Robert E. Schapire. A decision-theoretic generalization of on-line learning and an application to boosting. Computer and System Sciences, 55(1):119-139, 1997.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Popular ensemble methods: An empirical study", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Opitz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Maclin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "11", |
|
"issue": "", |
|
"pages": "169--198", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Opitz and Richard Maclin. Popular ensemble meth- ods: An empirical study. Journal of Artificial Intelligence Research, 11:169-198, 1999.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Improved boosting algorithms using confidence-rated predictions", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Schapire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoram", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Machine Learning", |
|
"volume": "37", |
|
"issue": "", |
|
"pages": "297--336", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert E. Schapire and Yoram Singer. Improved boosting algorithms using confidence-rated predictions. Machine Learning, 37(3):297-336, 1999.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Boostexter: A boosting-based system for text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Schapire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoram", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Machine Learning", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "135--168", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert E. Schapire and Yoram Singer. Boostexter: A boosting-based system for text categorization. Machine Learning, 39(2-3):135-168, 2000.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "The boosting approach to machine learning: An overview", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Schapire", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "MSRI workshop on Nonlinear Estimation and Classification", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert E. Schapire. The boosting approach to machine learn- ing: An overview. In MSRI workshop on Nonlinear Esti- mation and Classification, 2002.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "An improved boosting algorithm and its application to automated text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Sebastiani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Sperduti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Valdambrini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of 9th ACM International Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "78--85", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabrizio Sebastiani, Alessandro Sperduti, and Nicola Val- dambrini. An improved boosting algorithm and its appli- cation to automated text categorization. In Proceedings of 9th ACM International Conference on Information and Knowledge Management, pages 78-85, 2000.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Boosting for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Dekai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grace", |
|
"middle": [], |
|
"last": "Ngai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marine", |
|
"middle": [], |
|
"last": "Carpuat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeppe", |
|
"middle": [], |
|
"last": "Larsen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongsheng", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Computational Natural Language Learning (CoNLL-2002)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "195--198", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekai Wu, Grace Ngai, Marine Carpuat, Jeppe Larsen, and Yongsheng Yang. Boosting for named entity recognition. In Computational Natural Language Learning (CoNLL- 2002), at COLING-2002, pages 195-198, Taipei, Sep 2002.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Why nitpicking works: Evidence for Occam's razor in error correctors", |
|
"authors": [ |
|
{ |
|
"first": "Dekai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grace", |
|
"middle": [], |
|
"last": "Ngai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marine", |
|
"middle": [], |
|
"last": "Carpuat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "20th International Conference on Computational Linguistics (COLING-2004)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekai Wu, Grace Ngai, and Marine Carpuat. Why nitpicking works: Evidence for Occam's razor in error correctors. In 20th International Conference on Computational Linguis- tics (COLING-2004), Geneva, 2004.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Chinese lexical analysis using Hierarchical Hidden Markov Model", |
|
"authors": [ |
|
{ |
|
"first": "Ping", |
|
"middle": [], |
|
"last": "Hua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xue-Qi", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong Kui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the second SIGHAN workshop on Chinese language processing", |
|
"volume": "17", |
|
"issue": "", |
|
"pages": "63--70", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hua Ping Zhang, Qun Liu, Xue-Qi Cheng, Hao Zhang, and Hong Kui Yu. Chinese lexical analysis using Hierarchi- cal Hidden Markov Model. In Proceedings of the second SIGHAN workshop on Chinese language processing, vol- ume 17, pages 63-70, 2003.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "The AdaBoost.MH algorithm.", |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"text": "Dev set results on MSRA and CityU.", |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td/><td colspan=\"2\">Precision Recall</td><td>F \u03b2=1</td></tr><tr><td>MSRA</td><td/><td/></tr><tr><td>LOC</td><td>82.00%</td><td colspan=\"2\">85.93% 83.92</td></tr><tr><td>ORG</td><td>76.99%</td><td colspan=\"2\">61.44% 68.34</td></tr><tr><td>PER</td><td>89.33%</td><td colspan=\"2\">74.47% 81.22</td></tr><tr><td colspan=\"2\">Overall 82.62%</td><td colspan=\"2\">76.45% 79.41</td></tr><tr><td>CityU</td><td/><td/></tr><tr><td>LOC</td><td>88.62%</td><td colspan=\"2\">81.69% 85.02</td></tr><tr><td>ORG</td><td>82.50%</td><td colspan=\"2\">66.44% 73.61</td></tr><tr><td>PER</td><td>84.05%</td><td colspan=\"2\">84.58% 84.31</td></tr><tr><td colspan=\"2\">Overall 86.46%</td><td colspan=\"2\">79.26% 82.71</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF1": { |
|
"text": "Test set results on MSRA, CityU, LDC.", |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table><tr><td/><td colspan=\"2\">Precision Recall</td><td>F \u03b2=1</td></tr><tr><td>MSRA</td><td/><td/><td/></tr><tr><td>LOC</td><td>84.98%</td><td colspan=\"2\">80.94% 82.91</td></tr><tr><td>ORG</td><td>72.82%</td><td colspan=\"2\">57.78% 64.43</td></tr><tr><td>PER</td><td>82.89%</td><td colspan=\"2\">59.91% 69.55</td></tr><tr><td>Overall</td><td>81.95%</td><td colspan=\"2\">69.26% 75.07</td></tr><tr><td>CityU</td><td/><td/><td/></tr><tr><td>LOC</td><td>88.65%</td><td colspan=\"2\">83.58% 86.04</td></tr><tr><td>ORG</td><td>83.75%</td><td colspan=\"2\">57.25% 68.01</td></tr><tr><td>PER</td><td>86.11%</td><td colspan=\"2\">76.42% 80.98</td></tr><tr><td>Overall</td><td>86.92%</td><td colspan=\"2\">74.98% 80.51</td></tr><tr><td>LDC</td><td/><td/><td/></tr><tr><td>LOC</td><td>65.84%</td><td colspan=\"2\">76.51% 70.78</td></tr><tr><td>ORG</td><td>53.69%</td><td colspan=\"2\">39.52% 45.53</td></tr><tr><td>PER</td><td>80.29%</td><td colspan=\"2\">68.97% 74.20</td></tr><tr><td>Overall</td><td>67.20%</td><td colspan=\"2\">65.54% 66.36</td></tr><tr><td>LDC (w/GPE)</td><td/><td/><td/></tr><tr><td>GPE</td><td>0.00%</td><td>0.00%</td><td>0.00</td></tr><tr><td>LOC</td><td>1.94%</td><td colspan=\"2\">37.74% 3.70</td></tr><tr><td>ORG</td><td>53.69%</td><td colspan=\"2\">39.52% 45.53</td></tr><tr><td>PER</td><td>80.29%</td><td colspan=\"2\">68.97% 74.20</td></tr><tr><td>Overall</td><td>30.58%</td><td colspan=\"2\">29.82% 30.19</td></tr></table>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |