|
{ |
|
"paper_id": "N07-1026", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:48:00.808202Z" |
|
}, |
|
"title": "Data-Driven Graph Construction for Semi-Supervised Graph-Based Learning in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Andrei", |
|
"middle": [], |
|
"last": "Alexandrescu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Washington Seattle", |
|
"location": { |
|
"postCode": "98195", |
|
"region": "WA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Kirchhoff", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Washington Seattle", |
|
"location": { |
|
"postCode": "98195", |
|
"region": "WA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Graph-based semi-supervised learning has recently emerged as a promising approach to data-sparse learning problems in natural language processing. All graph-based algorithms rely on a graph that jointly represents labeled and unlabeled data points. The problem of how to best construct this graph remains largely unsolved. In this paper we introduce a data-driven method that optimizes the representation of the initial feature space for graph construction by means of a supervised classifier. We apply this technique in the framework of label propagation and evaluate it on two different classification tasks, a multi-class lexicon acquisition task and a word sense disambiguation task. Significant improvements are demonstrated over both label propagation using conventional graph construction and state-of-the-art supervised classifiers.", |
|
"pdf_parse": { |
|
"paper_id": "N07-1026", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Graph-based semi-supervised learning has recently emerged as a promising approach to data-sparse learning problems in natural language processing. All graph-based algorithms rely on a graph that jointly represents labeled and unlabeled data points. The problem of how to best construct this graph remains largely unsolved. In this paper we introduce a data-driven method that optimizes the representation of the initial feature space for graph construction by means of a supervised classifier. We apply this technique in the framework of label propagation and evaluate it on two different classification tasks, a multi-class lexicon acquisition task and a word sense disambiguation task. Significant improvements are demonstrated over both label propagation using conventional graph construction and state-of-the-art supervised classifiers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Natural Language Processing (NLP) applications benefit from the availability of large amounts of annotated data. However, such data is often scarce, particularly for non-mainstream languages. Semisupervised learning addresses this problem by combining large amounts of unlabeled data with a small set of labeled data in order to learn a classification function. One class of semi-supervised learning algorithms that has recently attracted increased interest is graph-based learning. Graph-based techniques represent labeled and unlabeled data points as nodes in a graph with weighted edges encoding the similarity of pairs of samples. Various techniques are then available for transferring class labels from the labeled to the unlabeled data points. These approaches have shown good performance in cases where the data is characterized by an underlying manifold structure and samples are judged to be similar by local similarity measures. However, the question of how to best construct the graph forming the basis of the learning procedure is still an underinvestigated research problem. NLP learning tasks present additional problems since they often rely on discrete or heterogeneous feature spaces for which standard similarity measures (such as Euclidean or cosine distance) are suboptimal.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We propose a two-pass data-driven technique for graph construction in the framework of label propagation (Zhu, 2005) . First, we use a supervised classifier trained on the labeled subset to transform the initial feature space (consisting of e.g. lexical, contextual, or syntactic features) into a continuous representation in the form of soft label predictions. This representation is then used as a basis for measuring similarity among samples that determines the structure of the graph used for the second, semisupervised learning step. It is important to note that, rather than simply cascading the supervised and the semi-supervised learner, we optimize the combination with respect to the properties required of the graph. We present several techniques for such optimization, including regularization of the first-pass classifier, biasing by class priors, and linear combi-nation of classifier predictions with known features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 116, |
|
"text": "(Zhu, 2005)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The proposed approach is evaluated on a lexicon learning task using the Wall Street Journal (WSJ) corpus, and on the SENSEVAL-3 word sense disambiguation task. In both cases our technique significantly outperforms our baseline systems (label propagation using standard graph construction and discriminatively trained supervised classifiers).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Several graph-based learning techniques have recently been developed and applied to NLP problems: minimum cuts (Pang and Lee, 2004) , random walks (Mihalcea, 2005; Otterbacher et al., 2005) , graph matching (Haghighi et al., 2005) , and label propagation (Niu et al., 2005 ). Here we focus on label propagation as a learning technique.", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 131, |
|
"text": "(Pang and Lee, 2004)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 163, |
|
"text": "(Mihalcea, 2005;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 164, |
|
"end": 189, |
|
"text": "Otterbacher et al., 2005)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 207, |
|
"end": 230, |
|
"text": "(Haghighi et al., 2005)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 272, |
|
"text": "(Niu et al., 2005", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The basic label propagation (LP) algorithm (Zhu and Ghahramani, 2002; Zhu, 2005) has as inputs:", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 69, |
|
"text": "(Zhu and Ghahramani, 2002;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 70, |
|
"end": 80, |
|
"text": "Zhu, 2005)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "\u2022 a labeled set", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "{(x 1 , y 1 ), (x 2 , y 2 ), . . . , (x n , y n )},", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where x i are samples (feature vectors) and y i \u2208 {1, 2, . . . , C} are their corresponding labels;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "\u2022 an unlabeled set {x n+1 , . . . , x N }; \u2022 a distance measure d(i, j) i, j \u2208 {1, . . . N } de-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "fined on the feature space. The goal is to infer the labels {y n+1 , . . . , y N } for the unlabeled set. The algorithm represents all N data points as vertices in an undirected graph with weighted edges. Initially, only the known data vertices are labeled. The edge linking vertices i and j has weight:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "w ij = exp \u2212 d(i, j) 2 \u03b1 2 (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where \u03b1 is a hyperparameter that needs to be empirically chosen or learned separately. w ij indicates the label affinity of vertices: the larger w ij is, the more likely it is that i and j have the same label. The LP algorithm constructs a row-normalized N \u00d7 N transition probability matrix P as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P ij = P (i \u2192 j) = w ij N k=1 w ik", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The algorithm probabilistically pushes labels from the labeled nodes to the unlabeled nodes. To do so, it defines the n\u00d7C hard labels matrix Y and the N \u00d7C soft labels matrix f , whose first n rows are identical to Y . The hard labels matrix Y is invariant through the algorithm and is initialized with probability 1 for the known label and 0 for all other labels:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Y ic = \u03b4(y i , C)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "(3) where \u03b4 is Kronecker's delta function. The algorithm iterates as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "1. f \u2190 P \u00d7 f 2. f [rows 1 to n] \u2190 Y 3. If f \u223c = f , stop 4. f \u2190 f 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Repeat from step 1 In each iteration, step 2 fixes the known labels, which might otherwise be overriden by propagated labels. The resulting labels for each feature x i , where i \u2208 {n + 1, . . . , N }, are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "l i = arg max j=1,...,C f ij (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "It is important that the distance measure is locally accurate, i.e. nodes connected by an edge with a high weight should have the same label. The global distance is less relevant since label information will be propagated from labeled points through the entire space. This is why LP works well with a local distance measure that might be unsuitable as a global distance measure. Applications of LP include handwriting recognition (Zhu and Ghahramani, 2002) , image classification (Balcan et al., 2005) and retrieval (Qin et al., 2005) , and protein classification (Weston et al., 2003) . In NLP, label propagation has been used for word sense disambiguation (Niu et al., 2005) , document classification (Zhu, 2005) , sentiment analysis (Goldberg and Zhu, 2006) , and relation extraction (Chen et al., 2006) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 430, |
|
"end": 456, |
|
"text": "(Zhu and Ghahramani, 2002)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 501, |
|
"text": "(Balcan et al., 2005)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 516, |
|
"end": 534, |
|
"text": "(Qin et al., 2005)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 564, |
|
"end": 585, |
|
"text": "(Weston et al., 2003)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 658, |
|
"end": 676, |
|
"text": "(Niu et al., 2005)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 703, |
|
"end": 714, |
|
"text": "(Zhu, 2005)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 736, |
|
"end": 760, |
|
"text": "(Goldberg and Zhu, 2006)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 787, |
|
"end": 806, |
|
"text": "(Chen et al., 2006)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label propagation", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "One of the main problems in LP, as well as other graph-based learning techniques, is how to best construct the graph. Currently, graph construction \"is more of an art than science\" (Zhu, 2005) . Typically, edge weights are derived from a simple Euclidean or cosine distance measure, regardless of the nature of the underlying features. Edges are then established either by connecting all nodes, by applying a single global threshold to the edge weights, or by connecting each node to its k nearest neighbors according to the edge weights. This procedure is often suboptimal: Euclidean distance relies on a model of normally distributed i.i.d. random variables; cosine distance likewise assumes that the different feature vector dimensions are uncorrelated. However, many applications, particularly in NLP, rely on feature spaces with correlated dimensions. Moreover, features may have different ranges and different types (e.g. continuous, binary, multi-valued), which entails the need for normalization, binning, or scaling. Finally, common distance measures do not take advantage of domain knowledge that might be available.", |
|
"cite_spans": [ |
|
{ |
|
"start": 181, |
|
"end": 192, |
|
"text": "(Zhu, 2005)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph construction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Some attempts have been made at improving the standard method of graph construction. For instance, in a face identification task (Balcan et al., 2005) , domain knowledge was used to identify three different edge sets based on time, color and face features, associating a different hyperparameter with each. The resulting graph was then created by superposing edge sets. Zhu (Zhu, 2005 , Ch. 7) describes graph construction using separate \u03b1 hyperparameters for each feature dimension, and presents a datadriven way (evidence maximization) for learning the values of the parameters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 150, |
|
"text": "(Balcan et al., 2005)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 384, |
|
"text": "(Zhu, 2005", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Graph construction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Unlike previous work, we propose to optimize the feature representation used for graph construction by learning it with a first-pass supervised classifier. Under this approach, similarity of samples is defined as similarity of the output values produced by a classifier applied to the original feature representation of the samples. This idea bears similarity to classifier cascading (Alpaydin and Kaynak, 1998) , where classifiers are trained around a ruleexceptions paradigm; however, in our case, the classifiers work together, the first acting as a jointly optimized feature mapping function for the second.", |
|
"cite_spans": [ |
|
{ |
|
"start": 384, |
|
"end": 411, |
|
"text": "(Alpaydin and Kaynak, 1998)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data-driven graph construction", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "1. Train a first-pass supervised classifier that outputs soft label predictions Z i for all samples i \u2208 {1, . . . N }, e.g. a posterior probability distribution over target labels:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data-driven graph construction", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Z i = p i1 , p i2 , .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data-driven graph construction", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ". . , p iC ; 2. Apply postprocessing to Z i if needed. 3. Use vectors Z i and an appropriately chosen distance measure to construct a graph for LP. 4. Perform label propagation over the constructed graph to find the labeling of the test samples. The advantages of this procedure are:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data-driven graph construction", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Uniform range and type of features: The out-put from a first-pass classifier can produce welldefined features, e.g. posterior probability distributions. This eliminates the problem of input features of different ranges and types (e.g. binary vs. multivalued, continuous vs. categorical attributes) which are often used in combination.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data-driven graph construction", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Feature postprocessing: The transformation of features into a different space also opens up possibilities for postprocessing (e.g. probability distribution warping) depending on the requirements of the second-pass learner. In addition, different distance functions (e.g. those defined on probability spaces) can be used, which avoids violating assumptions made by metrics such as Euclidean and cosine distance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data-driven graph construction", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Optimizing class separation: The learned representation of labeled training samples might reveal better clusters in the data than the original representation: a discriminatively-trained first pass classifier will attempt to maximize the separation of samples belonging to different classes. Moreover, the firstpass classifier may learn a feature transformation that suppresses noise in the original input space.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data-driven graph construction", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Difficulties with the proposed approach might arise when the first-pass classifier yields confident but wrong predictions, especially for outlier samples in the original space. For this reason, the first-pass classifier and the graph-based learner should not simply be concatenated without modification, but the first classifier should be optimized with respect to the requirements of the second. In our case, the choice of first-pass classifier and joint optimization techniques are determined by the particular learning task and are detailed below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data-driven graph construction", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our first task is a part-of-speech (POS) lexicon acquisition task, i.e. the labels to be predicted are the sets of POS tags associated with each word in a lexicon. Note that this is not a tagging task: we are not attempting to identify the correct POS of each word in running text. Rather, for each word in the vocabulary, we attempt to infer the set of possible POS tags. Our choice of this task is motivated by our long-term goal of applying this technique to lexicon acquisition for resource-poor languages: POS lexi-cons are one of the most basic language resources, which enable subsequent training of taggers, chunkers, etc. We assume that a small set of words can be reliably annotated, and that POS-sets for the remaining words can be inferred by semi-supervised learning. Rather than choosing a genuinely resource-poor language for this task, we use the English Wall Street Journal (WSJ) corpus and artificially limit the size of the labeled set. This is because the WSJ corpus is widely obtainable and allows easy replication of our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexicon acquisition task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We use sections 0-18 of the Wall Street Journal corpus (N = 44, 492) . Words have between 1 and 4 POS tags, with an average of 1.1 per word. The number of POS tags is 36, and we treat every POS combination as a unique class, resulting in C = 158 distinct labels. We use three different randomly selected training sets of various sizes: 5000, 10000, and 15000 words, representing about 11%, 22%, and 34% of the entire data set respectively; the rest of the data was used for testing. In order to avoid experimental bias, we run all experiments on five different randomly chosen labeled subsets and report averages and standard deviations. Due to the random sampling of the data it is possible that some labels never occur in the training set or only occur once. We train our classifiers only on those labels that occur at least twice, which results in 60-63 classes. Labels not present in the training set will therefore not be hypothesized and are guaranteed to be errors. We delete samples with unknown labels from our unlabeled set since their percentage is less than 0.5% on average.", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 68, |
|
"text": "(N = 44, 492)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexicon acquisition task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We use the following features to represent samples:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexicon acquisition task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Integer: the three-letter suffix of the word;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexicon acquisition task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Integer: The four-letter suffix of the word;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexicon acquisition task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Integer \u00d7 4: The indices of the four most frequent words that immediately precede the word in the WSJ text; \u2022 Boolean: word contains capital letters;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexicon acquisition task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Boolean: word consists only of capital letters;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexicon acquisition task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Boolean: word contains digits;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexicon acquisition task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Boolean: word contains a hyphen;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexicon acquisition task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Boolean: word contains other special characters (e.g. \"&\"). We have also experimented with shorter suffixes and with prefixes but those features tended to degrade performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexicon acquisition task", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The second task is word sense disambiguation using the SENSEVAL-3 corpus (Mihalcea et al., 2004) , to enable a comparison of our method with previously published results. The goal is to disambiguate the different senses of each of 57 words given the sentences within which they occur. There are 7860 samples for training and 3944 for testing. In line with existing work (Lee and Ng, 2002; Niu et al., 2005) , we use the following features:", |
|
"cite_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 96, |
|
"text": "(Mihalcea et al., 2004)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 388, |
|
"text": "(Lee and Ng, 2002;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 406, |
|
"text": "Niu et al., 2005)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SENSEVAL-3 word sense disambiguation task", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Integer \u00d7 7: seven features consisting of the POS of the previous three words, the POS of the next three words, and the POS of the word itself. We used the MXPOST tagger (Ratnaparkhi, 1996) for POS annotation. \u2022 Integer \u00d7 variable length : a bag of all words in the surrounding context. \u2022 Integer \u00d7 15: Local collocations C ij (i, j are the bounds of the collocation window)-word combinations from the context of the word to disambiguate. In addition to the 11 collocations used in similar work (Lee and Ng, 2002) , we also used C \u22123,1 , C \u22123,2 , C \u22122,3 , C \u22121,3 . Note that syntactic features, which have been used in some previous studies on this dataset (Mohammad and Pedersen, 2004) , were not included. We apply a simple feature selection method: a feature X is selected if the conditional entropy H(Y |X) is above a fixed threshold (1 bit) in the training set, and if X also occurs in the test set (note that no label information from the test data is used for this purpose).", |
|
"cite_spans": [ |
|
{ |
|
"start": 497, |
|
"end": 515, |
|
"text": "(Lee and Ng, 2002)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 659, |
|
"end": 688, |
|
"text": "(Mohammad and Pedersen, 2004)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SENSEVAL-3 word sense disambiguation task", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For both tasks we compare the performance of a supervised classifier, label propagation using the standard input features and either Euclidean or cosine distance, and LP using the output from a first-pass supervised classifier.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For this task, the first-pass classifier is a multilayer perceptron (MLP) with the topology shown in Fig. 1 . The input features are mapped to con- . tinuous values by a discrete-to-continuous mapping layer M , which is itself learned during the MLP training process. This layer connects to the hidden layer h, which in turn is connected to the output layer o. The entire network is trained via backpropagation. The training criterion maximizes the regularized log-likelihood of the training data:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 107, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "First-pass classifier", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L = 1 n n t=1 log P (y t |x t , \u03b8) + R(\u03b8)", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "First-pass classifier", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "The use of an additional continuous mapping layer is similar to the use of hidden continuous word representations in neural language modeling (Bengio et al., 2000) and yields better results than a standard 3-layer MLP topology. Problems caused by data scarcity arise when some of the input features of the unlabeled words have never been seen in the training set, resulting in untrained, randomly-initialized values for those feature vector components. We address this problem by creating an approximation layer A that finds the known input feature vector x that is most similar to x (by measuring the cosine similarity between the vectors). Then x k is replaced with x k , resulting in vectorx = x 1 , . . . , x k\u22121 , x k , x k+1 , . . . , x f that has no unseen features and is closest to the original vector.", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 163, |
|
"text": "(Bengio et al., 2000)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "First-pass classifier", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "We use a dense graph approach. The WSJ set has a total of 44,492 words, therefore the P matrix that the algorithm requires would have 44, 492\u00d7 44, 492 \u223c = 2 \u00d7 10 9 elements. Due to the matrix size, we avoid the analytical solution of the LP problem, which requires inverting the P matrix, and choose the iterative approach described above (Sec. 2.1) instead. Convergence is stopped when the maximum relative difference between each cell of f and the corresponding cell of f is less than 1%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "LP Setup", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "Also for data size reasons, we apply LP in chunks. While the training set stays in memory, the test data is loaded in fixed-size chunks, labeled, and discarded. This approach has yielded similar results for various chunk sizes, suggesting that chunking is a good approximation of whole-set label propagation. 1 LP in chunks is also amenable to parallelization: Our system labels different chunks in parallel.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "LP Setup", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "We trained the \u03b1 hyperparameter by three-fold cross-validation on the training data, using a geometric progression with limits 0.1 and 10 and ratio 2. We set fixed upper limits of edges between an unlabeled node and its labeled neighbors to 15, and between an unlabeled node and its unlabeled neighbors to 5. The approach of setting different limits among different kinds of nodes is also used in related work (Goldberg and Zhu, 2006) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 410, |
|
"end": 434, |
|
"text": "(Goldberg and Zhu, 2006)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "LP Setup", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "For graph construction we tested: (a) the original discrete input representation with cosine distance; (b) the classifier output features (probability distributions) with the Jeffries-Matusita distance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "LP Setup", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "The static parameters of the MLP (learning rate, regularization rate, and number of hidden units) were optimized for the LP step by 5-fold cross-validation on the training data. This process is important because overspecialization is detrimental to the combined system: an overspecialized first-pass classifier may output very confident but wrong predictions for unseen patterns, thus placing such samples at large distances from all correctly labeled samples. A strongly regularized neural network, by contrast, will output smoother probability distributions for unseen patterns. Such outputs also result in a smoother graph, which in turn helps the LP process. Thus, we found that a network with only 12 hidden units and relatively high R(\u03b8) in Eq. 5 (10% of the weight value) performed best in combination with LP (at an insignificant cost in accuracy when used as an isolated classifier).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combination optimization", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We first conducted an experiment to measure the smoothness of the underlying graph, S(G), in the two LP experiments according to the following formula:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "S(G) = y i =y j ,(i>n\u2228j>n) w ij (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "where y i is the label of sample i. (Lower values are better as they reflect less affinity between nodes of different labels.) The value of S(G) was in all cases significantly better on graphs constructed with our proposed technique than on graphs constructed in the standard way (see Table 1 ). Table 1 also shows the performance comparison between LP over the discrete representation and cosine distance (\"LP\"), the neural network itself (\"NN\"), and LP over the continuous representation (\"NN+LP\"), on all different subsets and for different training sizes. For scarce labeled data (5000 samples) the neural network, which uses a strictly supervised training procedure, is at a clear disadvantage. However, for a larger training set the network is able to perform more accurately than the LP learner that uses the discrete features directly. The third, combined technique outperforms the first two significantly. 2 The differences are more pronounced for smaller training set sizes. Interestingly, the LP is able to extract information from largely erroneous (noisy) distributions learned by the neural network.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 292, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 303, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "We compare the performance of an SVM classifier, an LP learner using the same input features as the SVM, and an LP learner using the SVM outputs as input features. To analyze the influence of training set size on accuracy, we randomly sample subsets of the training data (25%, 50%, and 75%) and use the remaining training data plus the test data as unlabeled data, similarly to the procedure followed in related work (Niu et al., 2005) . The results are averaged over five different random samplings. The samplings were chosen such that there was at least one sample for each label in the training set. SENSEVAL-3 sports multi-labeled samples and samples with the \"unknown\" label. We eliminate all samples labeled as unknown and retain only the first label for the multi-labeled instances.", |
|
"cite_spans": [ |
|
{ |
|
"start": 417, |
|
"end": 435, |
|
"text": "(Niu et al., 2005)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sense Disambiguation", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The use of SVM vs. MLP in this case was justified by the very small training data set. An MLP has many parameters and needs a considerable amount of data for effective training, so for this task with only on the order of 10 2 training samples per classifier, an SVM was deemed more appropriate. We use the SVM light package to build a set of binary classifiers in a one-versus-all formulation of the multiclass classification problem. The features input to each SVM consist of the discrete features described above (Sec. 4.2) after feature selection. After training SVMs for each target label against the union of all others, we evaluate the SVM approach against the test set by using the winner-takes-all strategy: the predicted label corresponds to the SVM that outputs the largest value.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SVM setup", |
|
"sec_num": "5.3.1" |
|
}, |
|
{ |
|
"text": "Again we set up two LP systems: one using the original feature space (after feature selection, which benefited all of the tested systems) and one using the SVM outputs. Both use a cosine distance measure. The \u03b1 parameter (see Eq. 1) is optimized through 3-fold cross-validation on the training set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "LP setup", |
|
"sec_num": "5.3.2" |
|
}, |
|
{ |
|
"text": "Unlike MLPs, SVMs do not compute a smooth output distribution but base the classification decision on the sign of the output values. In order to smooth output values with a view towards graph construction we applied the following techniques:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combination optimization", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "1. Combining SVM predictions and perfect feature vectors: After training, the SVM actually outputs wrong label predictions for a small number (\u2248 5%) of training samples. These outputs could simply be replaced with the perfect SVM predictions (1 for the true class, -1 elsewhere) since the labels are known. However, the second-pass learner might actually benefit from the information contained in the misclassifications. We therefore linearly combine the SVM predictions with the \"perfect\" feature Table 1 : Accuracy results of neural classification (NN), LP with discrete features (LP), and combined (NN+LP), over 5 random samplings of 5000, 10000, and 15000 labeled words in the WSJ lexicon acquisition task. S(G) is the smoothness of the graph vectors v that contain 1 at the correct label position and -1 elsewhere:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 498, |
|
"end": 505, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Combination optimization", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "s i = \u03b3s i + (1 \u2212 \u03b3)v i (7)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combination optimization", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "where s i , s i are the i'th input and output feature vectors and \u03b3 a parameter fixed at 0.5. 2. Biasing uninformative distributions: For some training samples, although the predicted class label was correct, the outputs of the SVM were relatively close to one another, i.e. the decision was borderline. We decided to bias these SVM outputs in the right direction by using the same formula as in equation 7. 3. Weighting by class priors: For each training sample, a corresponding sample with the perfect output features was added, thus doubling the total number of labeled nodes in the graph. These synthesized nodes are akin to the \"dongle\" nodes (Goldberg and Zhu, 2006) . The difference is that, while dongle nodes are only linked to one node, our artificial nodes are treated like any other node and as such can connect to several other nodes. The role of the artificial nodes is to serve as authorities during the LP process and to emphasize class priors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 648, |
|
"end": 672, |
|
"text": "(Goldberg and Zhu, 2006)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combination optimization", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "As before, we measured the smoothness of the graphs in the two label propagation setups and found that in all cases the smoothness of the graph produced with our method was better when compared to the graphs produced using the standard approach, as shown in Table 3 , which also shows accuracy results for the SVM (\"SVM\" label), LP over the standard graph (\"LP\"), and label propagation over SVM outputs (\"SVM+LP\"). The latter system consistently performs best in all cases, although the most marked gains occur in the upper range of labeled samples percentage. The gain of the best data-driven LP over the knowledge-based LP is significant in the 100% and 75% cases.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 258, |
|
"end": 265, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.4.1" |
|
}, |
|
{ |
|
"text": "Acc. (%) 1 htsa3 (Grozea, 2004) 72.9 2 IRST-kernels (Strapparava et al., 2004) 72.6 3 nusels 72.4 4 SENSEVAL-3 contest baseline 55.2 5 Niu et al. (Niu et al., 2005 ) LP/J-S 70.3 6 Niu et al. LP/cosine 68.4 7 Niu et al. SVM 69.7 Table 2 : Accuracy results of other published systems on SENSEVAL-3. 1-3 use syntactic features; 5-7 are directly comparably to our system. Table 2 shows results of other published systems against the SENSEVAL corpus. The \"htsa3\", \"IRST-kernels\", and \"nusels\" systems were the winners of the SENSEVAL-3 contest and used extra input features (syntactic relations). The Niu et al. work (Niu et al., 2005) is most comparable to ours. We attribute the slightly higher performance of our SVM due to our feature selection process. The LP/cosine system is a system similar to our LP system using the discrete features, and the LP/Jensen-Shannon system is also similar but uses a distance measure derived from Jensen-Shannon divergence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 31, |
|
"text": "(Grozea, 2004)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 52, |
|
"end": 78, |
|
"text": "(Strapparava et al., 2004)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 146, |
|
"end": 163, |
|
"text": "(Niu et al., 2005", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 612, |
|
"end": 630, |
|
"text": "(Niu et al., 2005)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 235, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 375, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "# System", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have presented a data-driven graph construction technique for label propagation that utilizes a first- Table 3 : Accuracy results of support vector machine (SVM), label propagation over discrete features (LP), and label propagation over SVM outputs (SVM+LP), each trained with 25%, 50%, 75% (5 random samplings each), and 100% of the train set. The improvements of SVM+LP are significant over LP in the 75% and 100% cases. S(G) is the graph smoothness pass supervised classifier. The outputs from this classifier (especially when optimized for the secondpass learner) were shown to serve as a better representation for graph-based semi-supervised learning. Classification results on two learning tasks showed significantly better performance compared to LP using standard graph construction and the supervised classifier alone.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 113, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In fact, experiments have shown that performance tends to degrade for larger chunk sizes, suggesting that whole-set LP might be affected by \"artifact\" clusters that are not related to the labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Significance was tested using a difference of proportions significance test; the significance level is 0.01 or smaller in all cases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Acknowledgments This work was funded by NSF under grant no. IIS-0326276. Any opinions, findings and conclusions, or recommendations expressed herein are those of the authors and do not necessarily reflect the views of this agency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "acknowledgement", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Person identification in webcam images", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Balcan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "ICML Workshop on Learning with Partially Classified Training Data", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Balcan et al. 2005. Person identification in webcam images. In ICML Workshop on Learning with Partially Classified Train- ing Data.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "A neural probabilistic language model", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Ducharme", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Vincent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Bengio, R. Ducharme, and P. Vincent. 2000. A neural prob- abilistic language model. In NIPS.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Relation Extraction Using Label Propagation Based Semi-supervised Learning", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "129--136", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Chen, D. Ji, C.L. Tan, and Z. Niu. 2006. Relation Extraction Using Label Propagation Based Semi-supervised Learning. In Proceedings of ACL, pages 129-136.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Seeing stars when there aren't many stars: Graph-based semi-supervised learning for sentiment categorization", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "HLT-NAACL Workshop on Graphbased Algorithms for Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Goldberg and J. Zhu. 2006. Seeing stars when there aren't many stars: Graph-based semi-supervised learning for sen- timent categorization. In HLT-NAACL Workshop on Graph- based Algorithms for Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Finding optimal parameter settings for high performance word sense disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Grozea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of Senseval-3 Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Grozea. 2004. Finding optimal parameter settings for high performance word sense disambiguation. Proceedings of Senseval-3 Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Robust textual inference via graph matching", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Haghighi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Haghighi, A. Ng, and C.D. Manning. 2005. Robust textual inference via graph matching. Proceedings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "An empirical evaluation of knowledge sources and learning algorithms for word sense disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Lee and H.T. Ng. 2002. An empirical evaluation of knowl- edge sources and learning algorithms for word sense disam- biguation. In Proceedings of EMNLP, pages 41-48.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Supervised Word Sense Disambiguation with Support Vector Machines and Multiple Knowledge Sources", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Chia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y.K. Lee, H.T. Ng, and T.K. Chia. 2004. Supervised Word Sense Disambiguation with Support Vector Machines and Multiple Knowledge Sources. SENSEVAL-3.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The Senseval-3 English Lexical Sample Task", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Chklovski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Killgariff", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of ACL/SIGLEX Senseval-3", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Mihalcea, T. Chklovski, and A. Killgariff. 2004. The Senseval-3 English Lexical Sample Task. In Proceedings of ACL/SIGLEX Senseval-3.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Unsupervised large-vocabulary word sense disambiguation with graph-based algorithms for sequence data labeling", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of HLT/EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "411--418", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Mihalcea. 2005. Unsupervised large-vocabulary word sense disambiguation with graph-based algorithms for sequence data labeling. In Proceedings of HLT/EMNLP, pages 411- 418.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Complementarity of Lexical and Simple Syntactic Features: The SyntaLex Approach to Senseval-3. Proceedings of the SENSEVAL-3", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Pedersen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Mohammad and T. Pedersen. 2004. Complementarity of Lexical and Simple Syntactic Features: The SyntaLex Ap- proach to Senseval-3. Proceedings of the SENSEVAL-3.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Word sense disambiguation using label propagation based semisupervised learning", |
|
"authors": [ |
|
{ |
|
"first": "Zheng-Yu", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dong-Hong", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chew Lim", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "ACL '05", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zheng-Yu Niu, Dong-Hong Ji, and Chew Lim Tan. 2005. Word sense disambiguation using label propagation based semi- supervised learning. In ACL '05.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Using Random Walks for Question-focused Sentence Retrieval. Proceedings of HLT/EMNLP", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Otterbacher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Erkan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "915--922", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Otterbacher, G. Erkan, and D.R. Radev. 2005. Using Ran- dom Walks for Question-focused Sentence Retrieval. Pro- ceedings of HLT/EMNLP, pages 915-922.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A sentimental education: Sentiment analysis using subjectivity summarization based on minimum cuts", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "271--278", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B. Pang and L. Lee. 2004. A sentimental education: Sen- timent analysis using subjectivity summarization based on minimum cuts. In Proceedings of ACL, pages 271-278.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Subspace clustering and label propagation for active feedback in image retrieval", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T.-Y", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X.-D", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W.-Y", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H.-J", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "MMM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "172--179", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Qin, T.-Y. Liu, X.-D. Zhang, W.-Y. Ma, and H.-J. Zhang. 2005. Subspace clustering and label propagation for active feedback in image retrieval. In MMM, pages 172-179.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A maximum entropy model for part-ofspeech tagging", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ratnaparkhi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "133--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Ratnaparkhi. 1996. A maximum entropy model for part-of- speech tagging. In Proceedings of EMNLP, pages 133-142.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Pattern abstraction and term similarity for word sense disambiguation: IRST at SENSEVAL-3", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Strapparava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gliozzo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Giuliano", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of SENSEVAL-3", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "229--234", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Strapparava, A. Gliozzo, and C. Giuliano. 2004. Pattern abstraction and term similarity for word sense disambigua- tion: IRST at SENSEVAL-3. Proc. of SENSEVAL-3, pages 229-234.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Semi-supervised protein classification using cluster kernels", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Leslie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Elisseeff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Noble", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Weston, C. Leslie, D. Zhou, A. Elisseeff, and W. Noble. 2003. Semi-supervised protein classification using cluster kernels.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Learning from labeled and unlabeled data with label propagation", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Ghahramani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Zhu and Z. Ghahramani. 2002. Learning from labeled and unlabeled data with label propagation. Technical report, CMU-CALD-02.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Semi-Supervised Learning with Graphs", |
|
"authors": [ |
|
{ |
|
"first": "Xiaojin", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaojin Zhu. 2005. Semi-Supervised Learning with Graphs. Ph.D. thesis, Carnegie Mellon University. CMU-LTI-05- 192.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Architecture of first-pass supervised classifier (MLP) for lexicon acquisition", |
|
"uris": null |
|
} |
|
} |
|
} |
|
} |