|
{ |
|
"paper_id": "C08-1016", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:25:29.480188Z" |
|
}, |
|
"title": "Other-Anaphora Resolution in Biomedical Texts with Automatically Mined Patterns", |
|
"authors": [ |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Bin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Singapore $^I nstitute for Infocomm Research", |
|
"location": { |
|
"region": "A-STAR", |
|
"country": "Singapore" |
|
} |
|
}, |
|
"email": "#[email protected]" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Xiaofeng $", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Singapore $^I nstitute for Infocomm Research", |
|
"location": { |
|
"region": "A-STAR", |
|
"country": "Singapore" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Su", |
|
"middle": [], |
|
"last": "Jian^", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Singapore $^I nstitute for Infocomm Research", |
|
"location": { |
|
"region": "A-STAR", |
|
"country": "Singapore" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Tan", |
|
"middle": [ |
|
"Chew" |
|
], |
|
"last": "Lim", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Singapore $^I nstitute for Infocomm Research", |
|
"location": { |
|
"region": "A-STAR", |
|
"country": "Singapore" |
|
} |
|
}, |
|
"email": "*[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper proposes an other-anaphora resolution approach in bio-medical texts. It utilizes automatically mined patterns to discover the semantic relation between an anaphor and a candidate antecedent. The knowledge from lexical patterns is incorporated in a machine learning framework to perform anaphora resolution. The experiments show that machine learning approach combined with the auto-mined knowledge is effective for otheranaphora resolution in the biomedical domain. Our system with auto-mined patterns gives an accuracy of 56.5%., yielding 16.2% improvement against the baseline system without pattern features, and 9% improvement against the system using manually designed patterns.", |
|
"pdf_parse": { |
|
"paper_id": "C08-1016", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper proposes an other-anaphora resolution approach in bio-medical texts. It utilizes automatically mined patterns to discover the semantic relation between an anaphor and a candidate antecedent. The knowledge from lexical patterns is incorporated in a machine learning framework to perform anaphora resolution. The experiments show that machine learning approach combined with the auto-mined knowledge is effective for otheranaphora resolution in the biomedical domain. Our system with auto-mined patterns gives an accuracy of 56.5%., yielding 16.2% improvement against the baseline system without pattern features, and 9% improvement against the system using manually designed patterns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The last decade has seen an explosive growth in the amount of textual information in biomedicine. There is a need for an effective and efficient text-mining system to gather and utilize the knowledge encoded in the biomedical literature.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For a correct discourse analysis, a text-mining system should have the capability of understanding the reference relations among different expressions in texts. Hence, anaphor resolution, the task of resolving a given text expression to its referred expression in prior texts, is important for an intelligent text processing system. \u00a9 2008. Licensed under the Creative Commons Attribution-Noncommercial-Share Alike 3.0 Unported license (http://creativecommons.org/licenses/by-ncsa/3.0/). Some rights reserved.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In linguistics, an expression that points back to a previously mentioned expression is called an anaphor, and the expression being referred to by the anaphor is called its antecedent. Most previous work on anaphora resolution aims at identity-anaphora in which both an anaphor and its antecedent are mentions of the same entity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we focus on a special type of anaphora resolution, namely, other-anaphora resolution, in which an anaphor to be resolved has a prefix modifier \"other\" or \"another\". The antecedent of an other-anaphor is a complement expression to the anaphor in a super set. In other words, an other-anaphor is a set of elements excluding the element(s) specified by the antecedent. If the modifier \"other\" or \"another\" is removed, an anaphor becomes the super set including the antecedent. Thus, other-anaphora in fact represents a \"part-whole\" relation. Consider the following text \"IL-10 inhibits nuclear stimulation of nuclear factor kappa B (NF kappa B) . Several other transcription factors including NF- GR, CREB, are not affected by Here, the expression \"other transcription factors\" is an other-anaphor, while the \"NF kappa B\" is its antecedent. The anaphor refers to any transcription factors except the antecedent. By removing the lexical modifier \"other\", we can get a supper set \"transcription factors\" that includes the antecedent. The anaphor and antecedent thus have a \"part-whole\" relation 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 644, |
|
"end": 656, |
|
"text": "(NF kappa B)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 709, |
|
"end": 712, |
|
"text": "GR,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 713, |
|
"end": 718, |
|
"text": "CREB,", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Other-anaphora resolution is an important sub-task in information extraction for biomedical domain. It also contributes to biomedical ontology building as it targeted at a \"part-whole\" relation which is in the same hierarchical orders as in ontology. Furthermore, other-anaphora resolution is a first-step exploration in the resolution of bridging anaphora. Furthermore, other-anaphora resolution is a first-step exploration in the resolution of bridging, a special anaphora phenomenon in which the semantic relation between an anaphor and its antecedent is more complex (e.g. part-whole) than co-reference.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous work on other-anaphora resolution relies on knowledge resources, for example, ontology like WordNet to determine the \"partwhole\" relation. However, in the biomedical domain, a document is full of technical terms which are usually missing in a general-purpose ontology. To deal with this problem, pattern-based approaches have been widely employed, in which a pattern that represents the \"part-whole\" relation is designed. Two expressions are connected with the specific pattern and form a query. The query is searched in a large corpus for the occurrence frequency which would indicate how likely the two given expressions have the part-whole relation. The solution can avoid the efforts of constructing the ontology knowledge for the \"partwhole\" relation. However, the pattern is designed in an ad-hoc method, usually from linguistic intuition and its effectiveness for other-anaphora resolution is not guaranteed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a method to automatically mine effective patterns for otheranaphora resolution in biomedical texts. Our method runs on a small collection of seed word pairs. It searches a large corpus (e.g., PubMed abstracts as in our system) for the texts where the seed pairs co-occur, and collects the surrounding words as the surface patterns. The automatically found patterns will be used in a machine learning framework for other-anaphora resolution. To our knowledge, our work is the first effort of applying the pattern-base technique to other-anaphora resolution in biomedical texts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of this paper is organized as follows. Section 2 introduces previous related work. Section 3 describes the machine learning framework for other-anaphora resolution. Section 4 presents in detail our method for automatically pattern mining. Section 5 gives experiment results and has some discussions. Finally, Section 6 concludes the paper and shows some future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous work on other-anaphora resolution commonly depends on human engineered knowledge and/or deep semantic knowledge for the \"part-whole\" relation, and mostly works only in the news domain. Markert et al., (2003) presented a patternbased algorithm for other-anaphor resolution. They used a manually designed pattern \"ANTE-CEDENT and/or other ANAPHOR \". Given two expression to be resolved, a query is formed by instantiating the pattern with the two given expressions. The query is searched in the Web. The higher the hit number returned, the more likely that the anaphor and the antecedent candidate have the \"part-whole\" relation. The anaphor is resolved to the candidate with the highest hit number. Their work was tested on 120 otheranaphora cases extracted from Wall Street Journal. The final accuracy was 52.5%. Modjeska et al., (2003) also presented a similar pattern-based method for other-anaphora resolution, using the same pattern \"ANTECEDENT and/or other ANAPHOR\". The hit number returned from the Web is used as a feature for a Na\u00ef ve Bayesian Classifier to resolve otheranaphors. Other features include surface words, substring matching, distance, gender/number agreement, and semantic tag of the NP. They evaluated their method with 500 other-anaphor cases extracted from Wall Street Journal, and reported a result of 60.8% precision and 53.4% recall. Markert and Nissim (2005) compared three systems for other-anaphora resolution, using the same data set as in (Modjeska et al., 2003) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 194, |
|
"end": 216, |
|
"text": "Markert et al., (2003)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 822, |
|
"end": 845, |
|
"text": "Modjeska et al., (2003)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1371, |
|
"end": 1396, |
|
"text": "Markert and Nissim (2005)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1481, |
|
"end": 1504, |
|
"text": "(Modjeska et al., 2003)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The first system consults WordNet for the part-whole relation. The WordNet provides information on meronym/holonym (part-of relation) and hypernym/ hyponym (type-of relation). Their system achieves a performance of 56.8% for precision and 37.0% for recall.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The second and third systems employ the pattern based approach, employing the same manual pattern \"ANTECEDENT and/or other ANA-PHOR\". The second system did search in British Nation Corpus, giving 62.6% precision and 26.2% recall. The third system did search in the Web as in (Markert et al., 2003) , giving 53.8% precision and 51.7% recall.", |
|
"cite_spans": [ |
|
{ |
|
"start": 275, |
|
"end": 297, |
|
"text": "(Markert et al., 2003)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In our study, we used the GENIA corpus 2 for our other-anaphora resolution in biomedical texts. The corpus consists of 2000 MEDLINE abstracts (around 440,000 words). From the GENIA corpus, we extracted 598 other-anaphora cases. The 598 cases do not contain compound prepositions or idiomatic uses of \"other\", like \"on the other hand\" and \"other than\". And all these anaphors have their antecedents found in the current and previous two sentences of the other-anaphor. On average, there are 15.33 candidate antecedents for each anaphor to be resolved.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To conduct other-anaphora resolution, an input document is preprocessed through a pipeline of NLP components, including tokenization, sentence boundary detection, part-of-speech (POS) tagging, noun phrase (NP) chunking, and namedentity recognition (NER). These preprocessing modules are aimed to determine the boundaries of each NP in a text, and to provide necessary information of an NP for subsequent processing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In our system, we employed the tool-kits built by our group for these components. The POS tagger was trained and tested on the GENIA corpus (version 2.1) and achieved an accuracy of 97.4%. The NP-chunking module, evaluated on UPEN WSJ TreeBank, produced 94% F-measure. The NER module, trained on GENIA corpus (version 3.0), achieved 71.2% F-measure covering 22 entity types (e.g., Virus, Protein, Cell, DNA, etc).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Our other-anaphora resolution system adopts the common learning-based model for identityanaphora resolution, as employed by (Soon et al., 2001) and (Ng and Cardie, 2002) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 143, |
|
"text": "(Soon et al., 2001)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 148, |
|
"end": 169, |
|
"text": "(Ng and Cardie, 2002)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Framework", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In the learning framework, a training or testing instance has the form of , where is the th candidates of the antecedent of anaphor . An instance is labelled as positive if is the antecedent of , or negative if is not the antecedent of . An instance is associated with a feature vector which records different properties and relations between and . The features used in our system will be discussed later in the paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Framework", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "During training, for each other-anaphor, we consider as the candidate antecedents the preceding NPs in its current and previous two sentences. 2 http://www-tsujii.is.s.u-tokyo.ac.jp/~genia/topics/Corpus/ A positive instance is formed by pairing the anaphor and the correct antecedent. And a set of negative instances is formed by pairing the anaphor and each of the other candidates.", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 144, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Framework", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Based on these generated training instances, we can train a binary classifier using any discriminative learning algorithm. In our work, we employed support vector machine (SVM) due to its good performance in high dimensional feature vector spaces.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Framework", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "During the resolution process, for each otheranaphor encountered, all of the preceding NPs in a three-sentence window are considered. A test instance is created for each of the candidate antecedents. The feature vector is presented to the trained classifier to determine the otheranaphoric relation. The candidate with highest SVM outcome value is selected as the antecedent.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Framework", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Knowledge is usually represented as features for machine learning. In our system, we used the following groups of features for other-anaphora resolution \uf0b7 Word Distance Indicator This feature measures the word distance between an anaphor and a candidate antecedent, with the assumption that the candidate closer to the anaphor has a higher preference to be the antecedent.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Features", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "This feature is either 0 or 1 indicating whether an anaphor and a candidate antecedent are in the same sentence. Here, the assumption is that the candidate in the same sentence as the anaphor is preferred for the antecedent.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\uf0b7 Same Sentence Indicator", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A named-entity can be classified to a semantic category such as \"DNA\", \"RNA\", \"Protein\" and so on 3 . Thus we use a set of features to record the category pair of an anaphor and a candidate antecedent. For example, \"DNA-DNA\" is generated for the case when both anaphor and candidate are DNAs. And \"DNA-Protein\" is generated if an anaphor is a DNA and a candidate is a protein.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\uf0b7 Semantic Group Indicators", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "These features indicate whether a semantic group can refer to another.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\uf0b7 Semantic Group Indicators", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that an anaphor and its antecedent may possibly belong to different semantic categories. For example, in the GENIA corpus we found that in some cases an expression of a protein name actually denotes the gene that encodes the protein. Thus for a given anaphor and a candidate under consideration, it is necessary to record the pair-wise semantic groups, instead of using a single feature indicating whether two expressions are of the same group.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\uf0b7 Semantic Group Indicators", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The semantic group for a named entity is given by our preprocessing NER. For the common NPs produced from the NP chunker, we classify the semantic group by looking for the words inside NPs. For example, an NP ending with \"cells\" is classified to \"Cell\" group while an NP ending with \"gene\" or \"allele\" is classified to \"DNA\" group.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\uf0b7 Semantic Group Indicators", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In some cases, the surrounding words of an anaphor and a candidate antecedent strongly indicate the \"part-whole\" relation. For example, in \"...asthma and other hypereosinophilic diseases\", the reference between \"other hypereosinophilic diseases\" and \"asthma\" is clear if the inbetween words \"and other\" are taken into consideration. Another example of such a hint pattern is \"one\u2026 the other \u2026\" The feature is 1 if the specific patterns are present for the current anaphor and candidate pair. A candidate with such a feature is preferred to be the antecedent.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\uf0b7 Lexical Pattern Indicators", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This feature indicates whether an antecedent candidate is a substring of an anaphor or vice versa. This feature is used to capture cases like \"Jun\" and \"JunB\" (\"Jun\" is a family of protein while \"JunB\" is a member of this family). In many cases, an expression that is a super set comes with certain postfix words, for example, \"family members\" in \"Fludarabine caused a specific depletion of STAT1 protein (and mRNA) but not of other STAT family members.\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\uf0b7 Hierarchical Name Indicator", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This kind of phenomenon is more common in bio-medical texts than in news articles.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\uf0b7 Hierarchical Name Indicator", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our system, we utilized the open-source software SVM-Light 4 for the classifier training and testing. SVM is a robust statistical model which has been applied to many NLP tasks. SVM tries to learn a separating line to separate the positive instances from negative instances. Kernel transformations are applied for non-linear separable 4 http://svmlight.joachims.org/ cases (Vapnik, 1995) . In our study, we just used the default learning parameters provided by SVM-Light with the linear kernel. A more sophisticated kernel may further improve the performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 376, |
|
"end": 390, |
|
"text": "(Vapnik, 1995)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM Training and Classification", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "The baseline features listed in Section 3.3 only rely on shallow lexical, position and semantic information about an anaphor and a candidate antecedent. It could not, nevertheless, disclose the \"part-whole\" relation between two given expressions. In section 2, we have shown some existing pattern-based solutions that mine the \"partwhole\" relation in a large corpus with some patterns that can represent the relation. However, these manually designed patterns are usually selected by heuristics, which may not necessarily lead to a high coverage with a good accuracy in different domains. To overcome this shortcoming, we would like to use an automatic method to mine effective patterns from a large data set. First, we create a set of seed pairs of the \"partwhole\" relation. And then, we use the seed pairs to discover the patterns that encode the \"partwhole\" relation from a large data set (PubMed as in our system). Such a solution is supposed to improve the coverage of lexical patterns, while still retain the desired \"part-whole\" relation for other-anaphora resolution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Using Auto-mined Pattern Features", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The overview of our system with the automatic mined patterns is illustrated in figure 1. Figure 1 : System Overview There are three major parts in our system, namely, seed-pairs generation, pattern mining and SVM learning and classification. In the subsequent subsections, we will discuss each of the three parts in details.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 97, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Using Auto-mined Pattern Features", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "A seed pair is a pair of phrases/words following \"part-whole\" order, for example, \"integrin alpha\" -\"adhesion molecules\" where \"integrin alpha\" is a kind of \"adhesion molecules\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Seed Pairs Preparation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We extracted the seed pairs automatically from the GENIA corpus. The auto-extracting procedure makes uses of some lexical clues like \"A, such as B, C and D\", \"A (e.g. B and C)\", \"A including B\" and etc. The capital letter A, B, C and D refer to a noun phrase such as \"integrin alpha\" and \"adhesion molecules\". For each occurrence of \"A such as B, C and D\", the program will generate seed pairs \"B-A\", \"C-A\" and \"D-A\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Seed Pairs Preparation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Consider the following example, \"Mouse thymoma line EL-4 cells produce cytokines such as interleukin (IL) -2, IL-3, IL-4, IL-10, and granulocyte-macrophage colonystimulating factor in response to phorbol 12myristate 13-acetate (PMA).\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Seed Pairs Preparation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We can extract the following seed pairs, \"interleukin (IL) -2\" -\"cytokines\" \"IL -3\" -\"cytokines\" \"IL -4\" -\"cytokines\" \"IL -10\" -\"cytokines\" \"granulocyte-macrophage colony-stimulating factor\" -\"cytokines\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Seed Pairs Preparation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "A similar action is taken for other lexical clues. Totally, we got 909 distinct seed pairs extracted from the GENIA corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Seed Pairs Preparation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "After the seed pairs have been extracted, an automatic verification of the seed pairs is performed. The first purpose of the verification is to correct chunking errors. For example, \"HLA Class II Gene\" may likely be wrongly split into \"HLA Class\" and \"II Gene\". This kind of errors is repaired by several simple syntactic rules. The second purpose of the verification is to remove the inappropriate seed pairs. In our system, we abandoned the seed pairs containing pronouns like \"those\", \"they\", or nouns like \"element\", \"member\" and \"agent\". Such seed pairs may either find no patterns, or lead to meaningless patterns because \"those\" or \"elements\" have no specific semantics and could refer to anything.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Seed Pairs Preparation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Having obtained the set of seed pairs, we will use them to mine patterns for the \"part-whole\" relation. For each seed pair \"antecedent -anaphor\" (anaphor represents the NP for the \"whole\", while antecedent represents the NP for the \"part\"), our system will search in a large data set for two queries: \"antecedent * anaphor\" and \"anaphor * antecedent\" where the \"*\" denotes any sequence of words or symbols. For a returned search results, the text in between \"antecedent\" and \"anaphora\" is extracted as a pattern.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Mining", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In our study, we used PubMed 2007 data set for the pattern mining. The data set contains about 52,000 abstracts with around 9,400,000 words, and is an ideal large-scale resource for pattern mining.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Mining", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Consider, as an example, a seed pair \"NK kappa B \" -\"transcription factor\". Suppose that a returned sentence for the query \"NK kappa B * transcription factor\" is \"...NK kappa B family transcription factors...\" And a returned sentence for the query \"transcription factor * NK kappa B\" is \"...transcription factors, including NF kappa B...\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Mining", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We can extract a pattern, \"ANTECEDENT family ANAPHOR\" from the first sentence and a pattern \"ANAPHOR, including ANTECEDENT\" from the second sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Mining", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We restrict the patterns so that no pattern span across two or more sentences. In other words, the pattern shall not contain the symbol \".\". The violated patterns will be removed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Mining", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The count that a pattern occurs in the PubMed for a seed pair is recorded. As a pattern could be reduced by different seed pairs, we define the occurrence frequency of a pattern as the sum of the counts of the pattern for all the seed pairs, using following formula:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Mining", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "= ( , ) \u2208 (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Mining", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where is the frequency of pattern ; is a seed pair;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Mining", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "is the set of all seed pairs. ( , ) is the count of the pattern for .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Mining", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "All the mined patterns are sorted according to its frequency as defined in (1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Mining", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For classifier training and testing, the patterns with high frequency are used as features. In our system, we used the top 40 patterns, while we also examined the influence the number of the patterns on the performance. (See Section 5.2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Application", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Given an instance ( , ) and a pattern feature , a query is constructed by in-stantiating with and . For example, for an instance (\" \", \" -\") and a pattern feature \"ANA-PHOR, including ANTECEDENT\", we can get a query \"transcription factors, including NF kappa B\". The query is searched in the PubMed data set. The count of the query is recorded. The value of the pattern feature of a candidate is calculated by normalizing the occurrence frequency among all the candidates of the anaphor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Application", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "For demonstration, suppose we have an anaphor \"other transcription factors\" with two antecedent candidates \"IL-10\" and \"NF kappa B\". Given a pattern feature \"ANAPHOR, including ANTECEDENT\", the count of the query \"transcription factors, including IL-10\" is 100 while that for \"transcription factors, including NF-Kappa B\" is 300. Then the values of the pattern feature for \"IL-10\" and \"NF kappa B\" are 0.25 ( 100 100+300) and 0.75 ( 300 100+300), respectively. The value of a pattern feature can be interpreted as a degree of belief that an anaphor and a candidate antecedent have the \"part-whole\" relation, with regard to the specific pattern. Since the value of a pattern feature is normalized among all the candidates, it could indicate the preference of a candidate against other competing candidates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern Application", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In our experiments, we conducted a 3-fold cross validation to evaluate the performances. The total 598 other-anaphora cases were divided into 3 sets of size 200, 199 and 199 respectively. For each experiment, two sets were used for training while the other set was used for testing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For evaluation, we used the accuracy as the performance metric, which is defined as the correctly resolved other-anaphors divided by all the testing other-anaphors, that is, = # of correctly resolved anaphors # of total anaphors Table 1 shows the performance of different other-anaphora resolution systems. The first line is for the baseline system with only the normal features as described in Section 3.3. From the table, we can find that the baseline system only achieves around 40% accuracy. A performance is lower than a similar system in news domain by Modjeska et al., (2003) where they reported 51.6 % precision with 40.6% recall. This difference is probably because they utilized more semantic knowledge such as hypernymy and meronymy acquired from WordNet. Such knowledge, nevertheless, is not easily available in the biomedical domain. In our experiments, we tested the system with manually designed pattern features. We tried 10 patterns that can represent the \"part-whole\" relation. Table 2 summaries the patterns used in the system. Among them, the pattern \"Anaphor such as Antecedent\" and \"Antecedent and other Anaphor\" are commonly used in previous pattern based approaches (Markert et al., 2003; Modjeska et al., 2003) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 559, |
|
"end": 582, |
|
"text": "Modjeska et al., (2003)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1190, |
|
"end": 1212, |
|
"text": "(Markert et al., 2003;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1213, |
|
"end": 1235, |
|
"text": "Modjeska et al., 2003)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 236, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 996, |
|
"end": 1003, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments Setup", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "ANTECEDENT is a kind of ANAPHOR ANTECEDENT is a type of ANAPHOR ANTECEDENT is a member of ANAPHOR ANTECEDENT is a part of ANAPHOR ANAPHOR such as ANTECEDENT ANTECEDENT and other ANAPHOR ANTECEDENT within ANAPHOR ANTECEDENT is a component of ANAPHOR ANTECEDENT is a sort of ANAPHOR ANTECEDENT belongs to ANAPHOR Table 2 : Manually Selected Patterns The second line of Table 1 shows the results of the system with the manual pattern features. We can find that adding these pattern features produces an overall accuracy of 47%, yielding an increase of 7% accuracy against the baseline system without the pattern features.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 318, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 367, |
|
"end": 374, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pattern", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The improvement in accuracy is consistent with previous work using the pattern-based approaches in the news domain (Modjeska et al., 2003) . However, we found the performance in the biomedical domain is worse than that in the news domain. For example, Modjeska et al. (2003) reported a precision around 53%. This difference of performance suggests that the ma-nually designed patterns may not necessarily work equally well in different domains.", |
|
"cite_spans": [ |
|
{ |
|
"start": 115, |
|
"end": 138, |
|
"text": "(Modjeska et al., 2003)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 274, |
|
"text": "Modjeska et al. (2003)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The last system we examined in the experiment is the one with the automatically mined pattern features. Table 3 summarizes the top mined patterns ranked based on their occurrence frequency. Some of the patterns are intuitively good representation of the \"part-whole\" relation. For example, \"ANAPHOR, including ANTE-CEDENT\". \"ANAPHOR, such as ANTECE-DENT\" and \"ANAPHOR and other ANTECE-DENT\" which are in the manually designed pattern list, are generated.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 111, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pattern", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The last line of Table 1 lists the result of the system with automatically mined pattern features. It outperforms the baseline system (up to 16% accuracy), and the system with manually selected patterns (9% accuracy). These results prove that our pattern features are effective for the other-anaphora resolution. Table 3 : Auto-Mined Patterns To further compare the manually designed patterns and the automatically discovered patterns. We examined the coverage rate of the two pattern sets. The coverage rate measures the capability that a set of patterns could lead to positive anaphor-antecedent pairs. An other-anaphor is said to be covered by a pattern set, if the anaphor and its antecedent could be hit (i.e., the corresponding query has a non-zero hit number) by at least one pattern in the list. Thus the coverage rate could be defined as ( ) = #anaphors covered by the pattern set P # total anaphors", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 24, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 313, |
|
"end": 320, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pattern", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The coverage rates of the two pattern sets are tabulated in table 4. It is apparent that the auto-mined patterns have a significantly higher coverage (more than twice) than the manually designed patterns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Coverage Rate Manually Designed 36.0 % Auto-Mined 92.1 % Table 4 : Coverage Comparison In our experiments we were also concerned about the usefulness of each individual pattern. For this purpose, we examined the loss of the accuracy when withdrawing a pattern feature from the feature list. The top 10 patterns with the largest accuracy loss are summarized in table 5.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 64, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Patterns", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Acc Loss ANAPHOR, including ANTECEDENT 4.18% ANAPHOR including ANTECEDENT 3.18% ANAPHOR such as ANTECEDENT 2.84% ANTECEDENT transcription ANAPHOR 2.17%", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "ANTECEDENT and other ANAPHOR 2.01% ANAPHOR, such as ANTECEDENT 1.84% ANTECEDENT family ANAPHOR 1.84% ANAPHOR (e.g., ANTECEDENT", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "1.51% ANTECEDENT-like ANAPHOR 1.17% ANTECEDENT/rel ANAPHOR 1.17% Table 5 : Usefulness of Each Pattern The process of automatic pattern mining would generate numerous surface patterns. It is not reasonable to use all the patterns as features. As mentioned in section 4.3, we rank the pattern based on their occurrence frequency and select the top ones as the features. It would be interesting to see how the number of patterns influences the performance of anaphora resolution. In figure 2, we plot the accuracy under different number top pattern features. We can find by using more patterns, the coverage keeps increasing. The accuracy also increases, but it reaches the peak with around 40 patterns. With more patterns, the accuracy remains at the same level. This is because the low frequency patterns usually are not that indicative of the \"part-whole\" relation. Including these pattern features would bring noises but not help the performance. The flat curve after the peak point suggests that the machine learning algorithm can effectively identify the importance of the pattern features for the resolution decision, and therefore including non-indicative patterns would not damage the performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 72, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pattern", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our experiment, we also interested to compare the utility of PubMed with other general data sets. Thus, we tested pattern mining by us-ing the Google-5-grams corpus 5 which lists the hit number of all the queries of five words or less in the Web. Unfortunately, we found that the performance is worse than using PubMed. The patterns mined from the Web corpus only gives an accuracy of around 41%, almost the same as the baseline system without using any pattern features. The bad performance is due to the fact that most of bio-medical names are quite long (2~4 words) and occur infrequently in the nontechnique data set. Consequently, a query formed by a biomedical seed pair usually cannot be found in the Web corpus (We found the coverage of the auto-mined patterns mined from the corpus is only about 20%). ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pattern", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, we have presented how to automatically mined pattern features for learning-based other-anaphora resolution in bio-medical texts. The patterns that represent the \"part-whole\" relations are automatically mined from a large data set. They are used as features for a SVM-based classifier learning and testing. The results of our experiments show a reasonably good performance with 56.5% accuracy). It outperforms (16% in accuracy) the baseline system without the pattern features, and also beats (9%) the system with manually designed pattern features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion & Future Works", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "There are several directions for future work. We would like to employ a pattern pruning process to remove those less indicative patterns such as \"ANAPHOR, ANTECEDENT\". And we also plan to perform pattern normalization which integrates two similar or literally identical pat-5 http://www.ldc.upenn.edu/Catalog/CatalogEntry.jsp? catalogId=LDC2006T13", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion & Future Works", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "terns into a single one. By doing so, the useful patterns may come to the top of the pattern list. Also we would like to explore ontology resources like MESH and Genes Ontology, which can provide enriched hierarchies of bio-medical terms and thus would benefit other-anaphora resolution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion & Future Works", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Other-anaphora could be also held between expressions that have subset-set or member-collection relations. In this paper, we treat them in a uniform way by using the patterned-based method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our study, we followed the semantic categories defined in the annotation scheme of the GENIA corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This study on co-reference resolution is partially supported by a Specific Targeted Research Project (STREP) of the European Union's 6th Framework Programme within IST call 4, Bootstrapping of Ontologies and Terminologies STrategic REsearch Project (BOOTStrep).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Anaphora Resolution in Biomedical Literature", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Castano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Pustejovsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "Thinking. Readings in Cognitive Science. Johnson-Laird and Wason edition. Cambridge", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "411--420", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Castano J, Zhang J and Pustejovsky J. Anaphora Resolution in Biomedical Literature. Submitted to International Sym- posium on Reference Resolution 2002, Alicante, Spain Clark H. Bridging. In Thinking. Readings in Cognitive Science. Johnson-Laird and Wason edition. Cambridge. Cambridge University Press; 1977.411-420", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Using Word Similarity Lists for Resolving Indirect Anaphora", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Gasperin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Vieira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of ACL Workshop on Reference Resolution and Its Application", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "40--46", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gasperin C and Vieira R. Using Word Similarity Lists for Resolving Indirect Anaphora. In Proceedings of ACL Workshop on Reference Resolution and Its Application. 30 June 2004; Barcelona. 2004.40-46", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Automatic Discovery of Part-Whole Relations", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Girju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Badulescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Moldovan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Computational Linguistics", |
|
"volume": "32", |
|
"issue": "2", |
|
"pages": "83--135", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Girju R, Badulescu A and Moldovan D. Automatic Discov- ery of Part-Whole Relations. Computational Linguistics, 2006, 32(2):83-135", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Analysis of Part-Whole Relation and Subsumption in Medical Domain", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Bernauer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Data Knowledge Enginnering", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "405--415", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernauer J.. Analysis of Part-Whole Relation and Subsump- tion in Medical Domain. Data Knowledge Enginnering 1996, 20:405-415", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Comparing Knowledge Sources for Nominal Anaphora Resolution", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Markert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Nissim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Computational Linguistics", |
|
"volume": "31", |
|
"issue": "3", |
|
"pages": "367--402", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Markert K. and Nissim M. Comparing Knowledge Sources for Nominal Anaphora Resolution. Computational Lin- guistics, 2005, 31(3):367-402", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Using the Web for Nominal Anaphora Resolution", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Markert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Modjeska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Nissim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of EACL Workshop on the Computational Treatment of Anaphora", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "39--46", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Markert K, Modjeska N and Nissim M. Using the Web for Nominal Anaphora Resolution. In Proceedings of EACL Workshop on the Computational Treatment of Anaphora. 14 April 2003; Budapest. 2003.39-46", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Anaphor Resolution. The State of The Art", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mitokov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mitokov R. Anaphor Resolution. The State of The Art. Working Paper, University of Wolverhampton, UK, 1999", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Using the Web in Machine Learning for Other-anaphor Resolution", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Modjeska", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Markert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Nissim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the 2003 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "176--183", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Modjeska N, Markert K and Nissim M. Using the Web in Machine Learning for Other-anaphor Resolution. In Pro- ceedings of the 2003 Conference on Empirical Methods in Natural Language Processing. July2003,Sapporo.176-183", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A Machine Learning Approach to Coreference Resolution of Noun Phrases", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Soon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Lim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Computational Linguistics", |
|
"volume": "", |
|
"issue": "4", |
|
"pages": "521--544", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Soon WM, Ng HT and Lim CY. A Machine Learning Ap- proach to Coreference Resolution of Noun Phrases. Com- putational Linguistics, 2001, 27(4).521-544", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Chapter 5 Methods of Pattern Recognition", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "The Nature of Statistical Learning Theory", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "123--167", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vapnik, V. Chapter 5 Methods of Pattern Recognition. In The Nature of Statistical Learning Theory. New York. Springer-Verlag, 1995.123-167", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Wholes, and Part-whole Relation. The Prospects of the Mereotopology. Data & Knowledge Engineering", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Varzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Parts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "259--286", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Varzi C. Parts, Wholes, and Part-whole Relation. The Pros- pects of the Mereotopology. Data & Knowledge Engi- neering, 1996, 20.259-286", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Semantic Tagging for Resolution of Indirect Anaphora", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Vieira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Bick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Coelho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Muller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Collovini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Souza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Rino", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of 7 th SIGdial Workshop on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "76--79", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vieira R, Bick E, Coelho J, Muller V, Collovini S, Souza J and Rino L. Semantic Tagging for Resolution of Indirect Anaphora. In Proceedings of 7 th SIGdial Workshop on Discourse and Dialogue. July 2006; Sydney.76-79", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A Tutorial on Supporting Vector Machines for Pattern Recognition. Data Mining and Knowledge Discovery", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Burges", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "121--167", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Burges C. A Tutorial on Supporting Vector Machines for Pattern Recognition. Data Mining and Knowledge Dis- covery 1998, 2:121-167", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Improving machine learning approaches to coreference resolution", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of Annual Conference for Association of Computational Linguistics 2002", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "104--111", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ng V. and Cardie C. Improving machine learning ap- proaches to coreference resolution. In Proceedings of An- nual Conference for Association of Computational Lin- guistics 2002, Philadelphia.104-111", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Performance of Various No. of Patterns", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Performance Comparisons", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |