|
{ |
|
"paper_id": "Q13-1034", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:08:44.850370Z" |
|
}, |
|
"title": "Joint Morphological and Syntactic Analysis for Richly Inflected Languages", |
|
"authors": [ |
|
{ |
|
"first": "Bernd", |
|
"middle": [], |
|
"last": "Bohnet", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"settlement": "Prague" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"settlement": "Prague" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Igor", |
|
"middle": [], |
|
"last": "Boguslavsky", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"settlement": "Prague" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Rich\u00e1rd", |
|
"middle": [], |
|
"last": "Farkas", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"settlement": "Prague" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Filip", |
|
"middle": [], |
|
"last": "Ginter", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"settlement": "Prague" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Haji\u010d", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Charles University", |
|
"location": { |
|
"settlement": "Prague" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Joint morphological and syntactic analysis has been proposed as a way of improving parsing accuracy for richly inflected languages. Starting from a transition-based model for joint part-of-speech tagging and dependency parsing, we explore different ways of integrating morphological features into the model. We also investigate the use of rule-based morphological analyzers to provide hard or soft lexical constraints and the use of word clusters to tackle the sparsity of lexical features. Evaluation on five morphologically rich languages (Czech, Finnish, German, Hungarian, and Russian) shows consistent improvements in both morphological and syntactic accuracy for joint prediction over a pipeline model, with further improvements thanks to lexical constraints and word clusters. The final results improve the state of the art in dependency parsing for all languages.", |
|
"pdf_parse": { |
|
"paper_id": "Q13-1034", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Joint morphological and syntactic analysis has been proposed as a way of improving parsing accuracy for richly inflected languages. Starting from a transition-based model for joint part-of-speech tagging and dependency parsing, we explore different ways of integrating morphological features into the model. We also investigate the use of rule-based morphological analyzers to provide hard or soft lexical constraints and the use of word clusters to tackle the sparsity of lexical features. Evaluation on five morphologically rich languages (Czech, Finnish, German, Hungarian, and Russian) shows consistent improvements in both morphological and syntactic accuracy for joint prediction over a pipeline model, with further improvements thanks to lexical constraints and word clusters. The final results improve the state of the art in dependency parsing for all languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Syntactic parsing of natural language has witnessed a tremendous development during the last twenty years, especially through the use of statistical models for robust and accurate broad-coverage parsing. However, as statistical parsing techniques have been applied to more and more languages, it has also been observed that typological differences between languages lead to new challenges. In particular, it has been found over and over again that languages exhibiting rich morphological structure, often together with a relatively free word order, usually obtain lower parsing accuracy, especially in compar-ison to English. One striking demonstration of this tendency can be found in the CoNLL shared tasks on multilingual dependency parsing, organized in 2006 and 2007, where richly inflected languages clustered at the lower end of the scale with respect to parsing accuracy (Buchholz and Marsi, 2006; Nivre et al., 2007) . These and similar observations have led to an increased interest in the special challenges posed by parsing morphologically rich languages, as evidenced most clearly by a new series of workshops devoted to this topic (Tsarfaty et al., 2010) , as well as a special issue in Computational Linguistics (Tsarfaty et al., 2013) and a shared task on parsing morphologically rich languages. 1 One hypothesized explanation for the lower parsing accuracy observed for richly inflected languages is the strict separation of morphological and syntactic analysis assumed in many parsing frameworks (Tsarfaty et al., 2010; Tsarfaty et al., 2013) . This is true in particular for data-driven dependency parsers, which tend to assume that all morphological disambiguation has been performed before syntactic analysis begins. However, as argued by Lee et al. (2011) , in morphologically rich languages there is often considerable interaction between morphology and syntax, such that neither can be disambiguated without the other. Lee et al. (2011) go on to show that a discriminative model for joint morphological disambiguation and dependency parsing gives consistent improvements in morphological and syntactic accuracy, compared to a pipeline model, for Ancient Greek, Czech, Hungarian and Latin. Similarly, Bohnet and Nivre (2012) propose a model for joint part-of-speech tagging and dependency parsing and report improved accuracy for Czech and German (but also for Chinese and English), although in this case the joint model is limited to basic part-ofspeech tags and does not involve the full complex of morphological features. An integrated approach to morphological and syntactic analysis can also be found in grammar-based dependency parsers, such as the ETAP-3 linguistic processor (Apresian et al., 2003) , where morphological disambiguation is mostly carried out together with syntactic analysis. Finally, it is worth noting that joint models of morphology and syntax have been more popular in constituency-based statistical parsing (Cowan and Collins, 2005; Tsarfaty, 2006; Cohen and Smith, 2007; Goldberg and Tsarfaty, 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 879, |
|
"end": 905, |
|
"text": "(Buchholz and Marsi, 2006;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 906, |
|
"end": 925, |
|
"text": "Nivre et al., 2007)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1145, |
|
"end": 1168, |
|
"text": "(Tsarfaty et al., 2010)", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 1227, |
|
"end": 1250, |
|
"text": "(Tsarfaty et al., 2013)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 1312, |
|
"end": 1313, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1514, |
|
"end": 1537, |
|
"text": "(Tsarfaty et al., 2010;", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 1538, |
|
"end": 1560, |
|
"text": "Tsarfaty et al., 2013)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 1760, |
|
"end": 1777, |
|
"text": "Lee et al. (2011)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1943, |
|
"end": 1960, |
|
"text": "Lee et al. (2011)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 2224, |
|
"end": 2247, |
|
"text": "Bohnet and Nivre (2012)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 2706, |
|
"end": 2729, |
|
"text": "(Apresian et al., 2003)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 2959, |
|
"end": 2984, |
|
"text": "(Cowan and Collins, 2005;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 2985, |
|
"end": 3000, |
|
"text": "Tsarfaty, 2006;", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 3001, |
|
"end": 3023, |
|
"text": "Cohen and Smith, 2007;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 3024, |
|
"end": 3052, |
|
"text": "Goldberg and Tsarfaty, 2008)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Another hypothesis from the literature is that the high type-token ratio resulting from large morphological paradigms leads to data sparseness when estimating the parameters of a statistical parsing model (Tsarfaty et al., 2010; Tsarfaty et al., 2013) . In particular, for many words in the language, only a subset of its morphological forms will be observed at training time. This suggests that using rule-based morphological analyzers or other lexical resources may be a viable strategy to improve coverage and performance. Thus, Goldberg and Elhadad (2013) show that integrating an external wide-coverage lexicon with a treebank-trained PCFG parser improves parsing accuracy for Modern Hebrew, which is in line with earlier studies of part-of-speech tagging for morphologically rich languages (Haji\u010d, 2000) . The sparsity of lexical features can also be tackled by the use of distributional word clusters as pioneered by Koo et al. (2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 228, |
|
"text": "(Tsarfaty et al., 2010;", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 251, |
|
"text": "Tsarfaty et al., 2013)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 532, |
|
"end": 559, |
|
"text": "Goldberg and Elhadad (2013)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 796, |
|
"end": 809, |
|
"text": "(Haji\u010d, 2000)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 924, |
|
"end": 941, |
|
"text": "Koo et al. (2008)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we present a transition-based model that jointly predicts complex morphological representations and dependency relations, generalizing the approach of Bohnet and Nivre (2012) to include the full range of morphological information. We start by investigating different ways of integrating morphological features into the model, go on to examine the effect of using rule-based morphological analyzers to derive hard or soft constraints on the morphological analysis, and finally add word cluster features to combat lexical sparsity. We evaluate our methods on data from Czech, Finnish, German, Hungarian, and Russian, five morphologically rich languages representing three different language groups. The experiments show that joint prediction of morphology and syntax, rule-based morphological analyzers, and word clusters all contribute to improved parsing accuracy, leading to new state-ofthe-art results for all languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 189, |
|
"text": "Bohnet and Nivre (2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this section, we define target representations and evaluation metrics (2.1), and describe our transitionbased parsing framework, consisting of an abstract transition system (2.2), a feature-based scoring function (2.3), and algorithms for decoding (2.4) and learning (2.5).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We take an unlabeled dependency tree for a sentence x = w 1 , . . . , w n to be a directed tree T = (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "V x , A), where V x = {0, 1, ..., n}, A \u2286 V x \u00d7 V +", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "x , and 0 is the root of the tree (K\u00fcbler et al., 2009) . The set V x of nodes is the set of positive integers up to and including n, each corresponding to the linear position of a word in the sentence, plus an extra artificial root node 0. We use V +", |
|
"cite_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 55, |
|
"text": "(K\u00fcbler et al., 2009)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "x to denote V x \u2212{0}. The set A of arcs is a set of pairs (i, j), where i is the head node and j is the dependent node.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "To this basic representation of syntactic structure we add four labeling functions for part-of-speech tags, morphological features, lemmas, and dependency relations. The function \u03c0 : V +", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "x \u2192 P maps each node in V +", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "x to a part-of-speech tag in the set P ; the function \u00b5 : V +", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "x \u2192 M maps each node to a morphological description in the set M ; the function \u03bb :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "V + x \u2192 Z * maps each node in V +", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "x to a lemma (a string over some character set Z); and the function \u03b4 : A \u2192 D maps each arc to a dependency label in the set D. The exact nature of P , M and D depends on the data sets used, but normally P and D only contain atomic labels while the members of M are sets of atomic features encoding properties like number, case, tense, etc. For lemmas, we do not assume that there is a fixed lexicon but allow any character string as a legal value.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We define our target representation for a sentence x = w 1 , . . . , w n as a quintuple \u0393 = (A, \u03c0, \u00b5, \u03bb, \u03b4) such that (V x , A) is an unlabeled dependency tree; \u03c0, \u00b5 and \u03bb label the nodes with part-of-speech tags, Figure 1 : Transitions for joint morphological and syntactic analysis. The stack \u03a3 is represented as a list with its head to the right (and tail \u03c3) and the buffer B as a list with its head to the left (and tail \u03b2). The notation \u0393[q 1 , . . . , q m ] is used to denote an MS-parse that is exactly like \u0393 except that q 1 , . . . , q m hold true. morphological features and lemmas; and \u03b4 labels the arcs with dependency relations. For convenience, we refer to this type of structure as a morphosyntactic parse (or MS-parse, for short). The following evaluation metrics are used to score an MS-parse with respect to a gold standard:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 222, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Transition Condition LEFT-ARC d ([\u03c3|i, j], B, \u0393) \u21d2 ([\u03c3|j], B, \u0393[(j, i) \u2208 A, \u03b4(j, i) = d]) i = 0 RIGHT-ARC d ([\u03c3|i, j], B, \u0393) \u21d2 ([\u03c3|i], B, \u0393[(i, j) \u2208 A, \u03b4(i, j) = d]) SHIFT p,m,l (\u03c3, [i|\u03b2], \u0393) \u21d2 ([\u03c3|i], \u03b2, \u0393[\u03c0(i) = p, \u00b5(i) = m, \u03bb(i) = l]) SWAP ([\u03c3|i, j], \u03b2, \u0393) \u21d2 ([\u03c3|j], [i|\u03b2], \u0393) 0 < i < j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "1. POS: The percentage of nodes in V +", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "x that have the correct part-of-speech tag.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representations and Metrics", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "x that have the correct morphological description; if the description is set-valued, all members of the set must match exactly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "MOR: The percentage of nodes in V +", |
|
"sec_num": "2." |
|
}, |
|
{ |
|
"text": "x that have the correct lemma.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "LEM: The percentage of nodes in V +", |
|
"sec_num": "3." |
|
}, |
|
{ |
|
"text": "x that have the correct incoming arc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "UAS: The percentage of nodes in V +", |
|
"sec_num": "4." |
|
}, |
|
{ |
|
"text": "x that have the correct incoming arc with the correct label.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "LAS: The percentage of nodes in V +", |
|
"sec_num": "5." |
|
}, |
|
{ |
|
"text": "x that have the correct part-of-speech tag and the correct morphological description.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PM: The percentage of nodes in V +", |
|
"sec_num": "6." |
|
}, |
|
{ |
|
"text": "x that have the correct part-of-speech tag, the correct morphological description, and the correct incoming arc with the correct label.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PMD: The percentage of nodes in V +", |
|
"sec_num": "7." |
|
}, |
|
{ |
|
"text": "The POS, UAS and LAS metrics are standard in the dependency parsing literature; the additional metrics will provide us with a more fine-grained picture of the (joint) morphological and syntactic accuracy. All evaluation scores are computed over all tokens, including punctuation. We test statistical significance primarily for the PMD metric, using a twotailed paired t-test.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "PMD: The percentage of nodes in V +", |
|
"sec_num": "7." |
|
}, |
|
{ |
|
"text": "A transition system for dependency parsing is a quadruple S = (C, T, c s , C t ), where C is a set of configurations, T is a set of transitions, each of which is a (partial) function t : C \u2192 C, c s is an initialization function, mapping a sentence x to a configuration c \u2208 C, and C t \u2286 C is a set of terminal configurations. A transition sequence for a sentence x in S is a sequence of configuration-transition pairs", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transition System", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "C 0,m = [(c 0 , t 0 ), (c 1 , t 1 ), . . . , (c m , t m )] where c 0 = c s (x), t m (c m ) \u2208 C t , and t i (c i ) = c i+1 (0 \u2264 i < m).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transition System", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In our model for joint prediction of part-of-speech tags, morphological features and dependency trees, the set C of configurations consists of all triples c = (\u03a3, B, \u0393) such that \u03a3 (the stack) and B (the buffer) are disjoint sublists of the nodes V x of some sentence x, and \u0393 = (A, \u03c0, \u00b5, \u03bb, \u03b4) is an MS-parse for x. We take the initial configuration for a sentence", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transition System", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "x = w 1 , . . . , w n to be c s (x) = ([0], [1, . . . , n], (\u2205, \u22a5, \u22a5, \u22a5, \u22a5)),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transition System", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where \u22a5 is the function that is undefined for all arguments, and we take the set C t of terminal configurations to be the set of all configurations of the form c = ([0], [ ], \u0393) (for any \u0393). The MS-parse defined for x by c = (\u03a3, B, (A, \u03c0, \u00b5, \u03bb, \u03b4)) is \u0393 c = (A, \u03c0, \u00b5, \u03bb, \u03b4), and the MS-parse defined for x by a complete transition sequence C 0,m is \u0393 tm(cm) .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Transition System", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The set T of transitions is shown in Figure 1 . It is based on the system of Nivre (2009) , where a dependency tree is built by repeated applications of the LEFT-ARC d and RIGHT-ARC d transitions, which add an arc (with some label d \u2208 D) between the two topmost nodes on the stack (with the leftmost or rightmost node as the dependent, respectively). The SHIFT transition is used to move nodes from the buffer to the stack, and the SWAP transition is used to permute nodes in order to allow non-projective dependencies. Bohnet and Nivre (2012) modified this system by replacing the simple SHIFT transition by SHIFT p , which not only moves a node from the buffer to the stack but also assigns it a part-of-speech tag p, turning it into a system for joint part-of-speech tagging and dependency parsing. 2 Here we add two additional parameters m and l to the SHIFT transition, so that a node moved from the buffer to the stack is assigned not only a tag p but also a morphological description m and a lemma l. In this way, we get a joint model for the prediction of part-ofspeech tags, morphological features, lemmas, and dependency trees.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 89, |
|
"text": "Nivre (2009)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 45, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Transition System", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In transition-based parsing, we score parses in an indirect fashion by scoring transition sequences. In general, we assume that the score function s factors by configuration-transition pairs:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Scoring", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s(x, C 0,m ) = m i=0 s(x, c i , t i )", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Scoring", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Moreover, when using structured learning, as first proposed for transition-based parsing by Zhang and Clark (2008) , we assume that the score is given by a linear model whose feature representations decompose in the same way:", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 114, |
|
"text": "Zhang and Clark (2008)", |
|
"ref_id": "BIBREF54" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Scoring", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "s(x, C 0,m ) = f (x, C 0,m ) \u2022 w = m i=0 f (x, c i , t i ) \u2022 w (2) Here, f (x, c, t) is a high-dimensional feature vec- tor, where each component f i (x, c, t)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Scoring", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "is a nonnegative numerical feature (usually binary), and w is a weight vector of the same dimensionality, where each component w i is the real-valued weight of the feature f i (x, c, t). The choice of features to include in f (x, c, t) is discussed separately for each instantiation of the model in Sections 4-6. to 0.0, make N iterations over the training data and update the weight vector for every sentence x where the transition sequence C 0,m corresponding to the gold parse is different from the highest scoring transition sequence C * 0,m . 4 More precisely, we use the passive-aggressive update of Crammer et al. (2006) . We also use the early update strategy found beneficial for parsing in several previous studies (Collins and Roark, 2004; Zhang and Clark, 2008; Huang and Sagae, 2010) . This means that, at learning time, we terminate the beam search as soon as the hypothesis corresponding to the gold parse is pruned from the beam and then update with respect to the partial transition sequences constructed up to that point. Finally, we use the standard technique of averaging over all weight vectors seen in training, as originally proposed by Collins (2002) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 606, |
|
"end": 627, |
|
"text": "Crammer et al. (2006)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 725, |
|
"end": 750, |
|
"text": "(Collins and Roark, 2004;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 751, |
|
"end": 773, |
|
"text": "Zhang and Clark, 2008;", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 774, |
|
"end": 796, |
|
"text": "Huang and Sagae, 2010)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1160, |
|
"end": 1174, |
|
"text": "Collins (2002)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Scoring", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "PARSE(x, w) 1 h 0 .c \u2190 c s (x) 2 h 0 .s \u2190 0.0 3 h 0 .f \u2190 {0.0} dim(w) 4 BEAM \u2190 [h 0 ] 5 while \u2203h \u2208 BEAM : h.c \u2208 C t 6 TMP \u2190 [ ] 7 foreach h \u2208 BEAM 8 foreach t \u2208 T : PERMISSIBLE(h.c, t) 9 h.f \u2190 h.f + f(x, h.c, t) 10 h.s \u2190 h.s + f(x, h.c, t) \u2022 w 11 h.c \u2190 t(h.c) 12 TMP \u2190 INSERT(h, TMP) 13 BEAM \u2190 PRUNE(TMP) 14 h * \u2190 TOP(BEAM) 15 return \u0393 h * c", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Scoring", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Throughout the paper, we experiment with data from five languages: Czech, Finnish, German, Hungarian, and Russian. For each language, we use a morphologically and syntactically annotated corpus (treebank), divided into a training set, a development set and a test set. In addition, we use a lexicon generated by a rule-based morphological analyzer, and distributional word clusters derived from a large unlabeled corpus. Below we describe the specific resources used for each language. Table 1 provides descriptive statistics about the resources.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 486, |
|
"end": 493, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Sets and Resources", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Czech For training and test we use the Prague Dependency Treebank (Haji\u010d et al., 2001; B\u00f6hmov\u00e1 et al., 2003) , Version 2.5, converted to the format used in the CoNLL 2009 shared task (Haji\u010d et al., 2009) . The morphological lexicon comes from Haji\u010d and Hladk\u00e1 (1998) , 5 and word clusters are derived from a large web corpus (Spoustov\u00e1 and Spousta, 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 86, |
|
"text": "(Haji\u010d et al., 2001;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 87, |
|
"end": 108, |
|
"text": "B\u00f6hmov\u00e1 et al., 2003)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 183, |
|
"end": 203, |
|
"text": "(Haji\u010d et al., 2009)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 243, |
|
"end": 266, |
|
"text": "Haji\u010d and Hladk\u00e1 (1998)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 270, |
|
"text": "5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 354, |
|
"text": "(Spoustov\u00e1 and Spousta, 2012)", |
|
"ref_id": "BIBREF46" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Sets and Resources", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Finnish The training set is from the Turku Dependency Treebank (Haverinen et al., 2013) , and the test set is the hidden test set maintained by the treebank developers. It is worth noting that, while the entire treebank has manually validated syntactic annotation, the morphological annotation is automatic except for a subset of 1204 tokens in the test set, which will be used to estimate the POS, MOR, LEM, PM and PMD scores. The estimated accuracy of the automatic annotation is 97.3% POS and 94.8% PM (Haverinen et al., 2013) . Also, because of the limited amount of data, we do not use a development set for Finnish but instead use cross-validation on the training set when tuning parameters. We use the open-source morphological analyzer OMorFi (Pirinen, 2011) and word clusters derived from the entire Finnish Wikipedia. 6 German Training and test sets are from the Tiger Treebank (Brants et al., 2002) in the improved dependency conversion by Seeker et al. (2010) . We use the SMOR morphological analyzer (Schmid et al., 2004) , but because the tags and morphological features in the lexicon are not the same as in the Hungarian For training and test we use the Szeged Dependency Treebank (Farkas et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 87, |
|
"text": "(Haverinen et al., 2013)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 505, |
|
"end": 529, |
|
"text": "(Haverinen et al., 2013)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 828, |
|
"end": 829, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 888, |
|
"end": 909, |
|
"text": "(Brants et al., 2002)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 951, |
|
"end": 971, |
|
"text": "Seeker et al. (2010)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 1013, |
|
"end": 1034, |
|
"text": "(Schmid et al., 2004)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 1197, |
|
"end": 1218, |
|
"text": "(Farkas et al., 2012)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Sets and Resources", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We use a finite-state morphological analyzer constructed from the morphdb.hu lexical resource (Tr\u00f3n et al., 2006) , and word clusters come from the Hungarian National Corpus (V\u00e1radi, 2002) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 113, |
|
"text": "(Tr\u00f3n et al., 2006)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 174, |
|
"end": 188, |
|
"text": "(V\u00e1radi, 2002)", |
|
"ref_id": "BIBREF52" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Sets and Resources", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Russian Parsers are trained and tested on data from the SynTagRus Treebank (Boguslavsky et al., 2000; Boguslavsky et al., 2002) . The morphological analyzer is a module of the ETAP-3 linguistic processor (Apresian et al., 2003) with a dictionary comprising more than 130,000 lexemes (Iomdin and Sizov, 2008) . Word clusters have been produced on the basis of an unlabeled corpus of Russian compiled by the Russian Language Institute of the Russian Academy of Sciences and tokenized by the ETAP-3 analyzer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 101, |
|
"text": "(Boguslavsky et al., 2000;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 102, |
|
"end": 127, |
|
"text": "Boguslavsky et al., 2002)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 283, |
|
"end": 307, |
|
"text": "(Iomdin and Sizov, 2008)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Sets and Resources", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We start by exploring different ways of integrating morphology and syntax in a data-driven setting, that is, where our only knowledge source is the annotated training corpus. At both learning and parsing time, we preprocess sentences using a tagger that assigns (up to) k p part-of-speech tags and k m morphological descriptions and a lemmatizer that assigns a single best lemma to each word. Complex morphological descriptions consisting of several atomic features are predicted as a whole, both in preprocessing and in parsing. Although it would be pos-sible to predict each atomic morphological feature separately, we believe this would increase the risk of creating inconsistent morphological descriptions. As preprocessors, we use the tagger and lemmatizer included in the MATE tools 8 trained on the same annotated training set, using 10-fold jack-knifing to get predictions for the training set itself. The tagger is a greedy left-to-right tagger trained with the same passive-aggressive online learning as the parsing system, which is run twice over the input to make more effective use of contextual features. The tagger scores are not properly normalized but tend to be in the [0,1] range for both part-of-speech tags and morphological descriptions. In this setting, we consider four different models for deriving a full MS-parse:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Morphology and Syntax", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "1. In the PIPELINE model, we set k p = k m = 1, which means that the SHIFT transition always selects the 1-best tag, morphological description and lemma for each word. We use a beam size of 40 and prune by simply keeping the 40 highest scoring hypotheses at each step. As the name suggests, this is equivalent to a standard pipeline with no joint prediction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Morphology and Syntax", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "2. The SIMPLETAG model replicates the model of Bohnet and Nivre (2012) with k p = 2, k m = 1, and a score threshold for tags of 0.25, meaning that the second best tag is only considered if its score is less than 0.25 below that of the best tag. We use two-step beam pruning, where we first extract the 40 highest scoring hypotheses with distinct dependency trees and then add the 8 highest scoring remaining hypotheses (normally morphological variants of hypotheses already included) for a total beam size of 48. This model performs joint tagging and parsing but relies on 1-best morphological features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Morphology and Syntax", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "3. The COMPLEXTAG model is like SIMPLETAG except that we let tags represent the concatenation of ordinary tags and morphological descriptions (and retrain the preprocessing tagger on this representation). This model performs joint morphological and syntactic analysis as joint tagging and parsing with a fine-grained tag set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Morphology and Syntax", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "4. The JOINT model has k p = k m = 2, meaning that the tag and the morphological description can be selected independently by the parser.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Morphology and Syntax", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For morphological descriptions, we use a score threshold of 0.1. For beam pruning, we generalize the previous method by first extracting the 40 highest-scoring hypotheses with distinct dependency trees. For each of these, we then find the highest-scoring hypothesis with the same dependency tree but different tags or morphological features, storing these in two temporary lists TMP p , for hypotheses that differ with respect to tags, and TMP m , for hypotheses that differ only with respect to morphological features. Finally, we extract the 8 highest-scoring hypotheses from each of TMP p and TMP m and add them to the beam for a total beam size of 56. This model performs joint prediction of part-of-speech tags, morphological descriptions and dependency relations (but still relies on 1-best lemmas, like all the other models.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Morphology and Syntax", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The procedures for beam pruning may appear both complex and ad hoc, especially for the JOINT model, but are motivated by the need to achieve a balance between morphological and syntactic ambiguity in the set of hypotheses maintained. As explained by Bohnet and Nivre (2012) , just maintaining a single beam does not give enough variety in the beam. The method used for the JOINT model is one way of generalizing this technique to a fully joint model, but other strategies are certainly conceivable. Another point that may be surprising is the choice to keep k p and k m as low as 2, which is fairly close to a pipeline model. Bohnet and Nivre (2012) experimented with higher values for the tag threshold but found no improvement in accuracy, and our own pre-liminary experiments confirmed this trend for morphological descriptions. In Section 7, we present an empirical analysis that gives further support for this choice, at least for the languages considered in this paper. Note also that the choice is not motivated by efficiency concerns, since increasing the values of k p and k m has only a marginal effect on running time, as explained in Section 2.4. Finally, the choice not to consider k-best lemmas is dictated by the fact that our lemmatizer only provides a 1-best analysis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 250, |
|
"end": 273, |
|
"text": "Bohnet and Nivre (2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Morphology and Syntax", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For the first three models, we use the same feature representations as Bohnet and Nivre (2012), 9 consisting of their adaptation of the features used by Zhang and Nivre (2011) , the graph completion features of Bohnet and Kuhn (2012) , and the special features over k-best tags introduced specifically for joint tagging and parsing by Bohnet and Nivre (2012) . For the JOINT model, we simply add features over the k-best morphological descriptions analogous to the features over k-best tags. 10 Experimental results for these four models can be found in Table 2 . From the PIPELINE results, we see that the 1-best accuracy of the preprocessing tagger ranges from 95.0 (Finnish) to 99.2 (Czech) for POS, and from 89.4 (Finnish) to 96.5 (Hungarian) for MOR. The lemmatizer does a good job for four of the languages (93.9-97.9) but has really poor performance on Finnish (73.7). With respect to syntactic accuracy, the PIPELINE system achieves LAS ranging from 79.9 (Finnish) to 91.8 (German) and UAS ranging from 84.4 to 93.7. It is interesting to note that the highest PMD score, which requires both morphology and syntax to be completely correct, is observed for Hungarian (86.2).", |
|
"cite_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 175, |
|
"text": "Zhang and Nivre (2011)", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 233, |
|
"text": "Bohnet and Kuhn (2012)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 335, |
|
"end": 358, |
|
"text": "Bohnet and Nivre (2012)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 492, |
|
"end": 494, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 554, |
|
"end": 561, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Joint Morphology and Syntax", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Turning to the results for SIMPLETAG, we note that our results are consistent with those reported by Bohnet and Nivre (2012) , with small but consistent improvements in POS and UAS/LAS (and in the compound metrics PM and PMD) for most languages. However, the improvement in the PMD score is statistically significant only for Hungarian and Russian (p < 0.01). By contrast, the results for COMPLEXTAG confirm our hypothesis that merging tags and morphological descriptions into a single tag is not an effective way to do joint morphological and syntactic analysis. Here, we see a significant drop in most scores for all languages, but in particular in the accuracy of morphological descriptions (MOR), where the score drops by 5.6 percentage points for Hungarian, 4.5 for Finnish, 2.6 for Russian, and 2.4 for German. The only exception is Czech, where MOR and PM actually improve slightly, but this comes at the expense of a substantial drop in dependency accuracy. In any case, the decrease in PMD is highly significant for all languages (p < 0.01).", |
|
"cite_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 124, |
|
"text": "Bohnet and Nivre (2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Morphology and Syntax", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Finally, we see that the JOINT model, where tags and morphological descriptions are predicted separately during the parsing process, gives significant improvements in MOR accuracy compared to the PIPELINE and SIMPLETAG models for German (+0.8), Czech (+0.5), and Russian (+0.4), with marginal improvements also in the syntactic UAS and LAS scores. For Finnish and Hungarian, on the other hand, there is actually a small drop in accuracy (and for Finnish also a drop in POS accuracy compared to SIMPLETAG). Interestingly, however, for both these languages there is nevertheless a small improvement in the joint PM score, indicating that the JOINT model in general does a better job at selecting a valid complete morphological description than the SIMPLETAG model. Since Finnish and Hungarian are the most morphologically complex languages, it is likely that the lack of a strong positive effect is due in part to sparse data, especially for Finnish where the training set is small. As we shall see in the next section, this problem can be partly overcome through the use of external lexical resources. Still, the improvement in the PMD score over the other three models is highly significant for all languages except Finnish (p < 0.01).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Morphology and Syntax", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our starting point in this section is the JOINT model, which gave the best overall accuracy score (PMD) for all languages except Finnish. To this model we now add constraints derived from a morphological lexicon that maps each word form to a set of possible tags, morphological descriptions and lemmas. We explore two different ways of integrating these constraints:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical Constraints", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "1. In the LEXHARD model, we use the lexicon to derive hard constraints and filter out tags and morphological descriptions that are not in the lexicon. More precisely, for word forms that are covered by the lexicon, we let the preprocessing tagger select the k p best tags and k m best morphological descriptions that are in the lexicon. We do this both during training and parsing, and we use exactly the same features and beam handling as for the JOINT model in the previous section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical Constraints", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "2. In the LEXSOFT model, we instead use soft lexical constraints by adding features that encode whether a tag or morphological description is in the lexicon or not. Again, we add these features both to the preprocessing tagger and to the joint parser, which otherwise remain exactly as before.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical Constraints", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "One additional modification that we make for both the LEXHARD and the LEXSOFT model is to completely rely on the external lexicon for the prediction of lemmas. After the parser has selected a tag and morphological description for a word, we simply predict the corresponding lemma from the lexicon, breaking ties arbitrarily in the very few cases where the word form, tag and morphological description do not determine a unique lemma, and leaving the lemma empty for word forms that are not contained in the lexicon. This means that, in contrast to the purely data-driven models, the lexiconenriched models predict the complete morphological analysis jointly with parsing (with the lemma being derived deterministically from the tag and the morphological description). We make an exception only for German, where the lexicon provides lemmas that would require further disambiguation and where we therefore continue to use the data-driven lemmatizer. As can be seen in Table 2 , the results for the LEX-HARD model are somewhat mixed. For Finnish, we see a dramatic improvement of the LEM score (from 73.7 to 93.4), indicating that the rule-based morphological analyzer is vastly superior to the data-driven lemmatizer for Finnish. There is also a very nice boost to the MOR score (+2.2) and a smaller improvement on POS (+0.4). These improvements also lead to higher syntactic accuracy, with LAS increasing from 80.6 to 82.5 and UAS from 84.8 to 86.1. For Hungarian, we have nice improvements of the LEM score (+5.3), the MOR score (+0.9) and the POS score (+0.7), but only small improvements in LAS/UAS. For Russian, we observe improvements in POS and MOR, a small drop in LEM, and again minor improvements in UAS/LAS. For Czech and German, finally, we see a drop in MOR (and in LEM for Czech and POS for German), while UAS/LAS is largely unaffected. For German, this result can probably be explained largely by the fact that the morphological descriptions in the lexicon are not fully compatible with those in the treebank, as explained in Section 3. Similarly, for Czech, we think the drop in the LEM score is due to discrepancies caused by updates in the dictionary version released in 2013, deviating from the previously published treebank.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 967, |
|
"end": 974, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Lexical Constraints", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In general, the LEXSOFT model performs considerably better, achieving the best results so far for most languages and metrics. The only clear exception is Finnish, where it performs slightly worse than LEXHARD (but better than all the other models). In addition, there is a marginal drop in POS and LAS/UAS for Russian and in UAS for Hungarian (but again only compared to LEXHARD). The results are particularly striking for German, where the soft lexical constraints are clearly beneficial (especially for the MOR score) despite not being quite compatible with the morphological descriptions in the training set. In terms of statistical signifance, LEXSOFT outperforms the JOINT model with respect to the PMD score for all languages (p < 0.01). It is also significantly better than LEXHARD for all languages except Finnish (p < 0.01).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Lexical Constraints", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Finally, we add word cluster features to the best model for each language (LEXHARD for Finnish, LEXSOFT for the others). 11 We use Brown clusters (Brown et al., 1992) , with 800 clusters for all languages, and we use the same feature representation as Bohnet and Nivre (2012) . The results in Table 2 show small but consistent improvements in almost all metrics for all languages, confirming the benefit of cluster features for morphologically rich languages. It is worth noting that we see the biggest improvement for Finnish, the language with the smallest training set and therefore most likely to suffer from sparse data, where the syntactic accuracy improves substantially (LAS +0.6, UAS +0.5) and lemmatization even more (LEM +1.0). We also see a nice improvement in morphological accuracy for German (MOR +0.6, PM +0.5), which may be related to the lack of a compatible morphological analyzer for this language or simply to the fact that the clusters are derived from a much larger corpus for German than for the other languages. The PMD improvement is statistically significant for all languages except Finnish (p < 0.01).", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 123, |
|
"text": "11", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 146, |
|
"end": 166, |
|
"text": "(Brown et al., 1992)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 275, |
|
"text": "Bohnet and Nivre (2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 293, |
|
"end": 300, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Clusters", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The experimental results generally support the conclusion that joint prediction of morphology and syntax, where morphology includes rich morphological features as well as basic part-of-speech tags, improves both morphological and syntactic accuracy. The effect is especially clear on the joint evaluation metrics PM and PMD, which indicates that the joint model produces more internally consistent representations. However, we also see evidence that the joint model may suffer from data sparsity, as in the case of Finnish, where a model that only predicts part-of-speech tags jointly with dependency relations achieve better accuracy on some metrics. However, even in this case, the joint model has the best results on the joint evaluation metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The second conclusion that can be drawn from the experiments is that the use of an external lexicon is an effective way of mitigating the sparse data problem and thereby improving accuracy. In general, however, it is more effective to add the lexical constraints in the form of features, or soft constraints, than to apply them as hard constraints and discard all analyses that are not licensed by the lexicon. In particular, this is a useful strategy when the lexical resource is not completely compatible with the annotation in the training set, as seen in the case of German and (to a lesser extent) Czech. The only exception to this generalization is again Finnish, where the hard constraint model works marginally better (except for the MOR and PM metrics), which may again indicate that the training set is too small to make optimal use of the additional features. Still, the soft constraint model improves substantially over the models without lexical resources also for Finnish.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Finally, our experiments confirm that features based on distributional word clusters have a positive impact on syntactic accuracy, but little or no impact on morphological accuracy. This is consistent with previous findings in the literature, mainly from English (Koo et al., 2008; Sagae and Gordon, 2009) , and it is interesting to see that it holds also for richly inflected languages and when added on top of features derived from external lexical resources.", |
|
"cite_spans": [ |
|
{ |
|
"start": 263, |
|
"end": 281, |
|
"text": "(Koo et al., 2008;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 282, |
|
"end": 305, |
|
"text": "Sagae and Gordon, 2009)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "One issue worth discussing is the choice to allow the joint model to consider at most 2 tags and 2 morphological descriptions per word, which may seem overly restrictive and very close to a pipeline model. As already mentioned, this was motivated by the results of Bohnet and Nivre (2012) , which explored higher values without seeing any improvements, as well as by our own preliminary experiments. In an attempt to shed further light on this issue, we computed oracle scores for the LEXSOFT model, which uses soft lexical constraints but no cluster features. The oracle scores for POS and MOR tell us how often the correct analysis is actually included in the input to the joint model, while the oracle scores for UAS and LAS reports the score of the best dependency tree present in the beam at termination. The results, reported in Table 2 , show that the oracle scores are very high, especially for part-of-speech tags (98.0-99.9) but also for morphological descriptions (94.8-99.3). Hence, very few correct analyses are pruned away when setting the k p and k m parameters to 2, and increasing the search space further is therefore unlikely to improve accuracy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 288, |
|
"text": "Bohnet and Nivre (2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 835, |
|
"end": 842, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "For further analysis, Table 2 reports the UAS/LAS scores of the PIPELINE system when given gold standard tags, morphological descriptions and lemmas as input. 12 Viewing this as an upper bound on improvements in parsing accuracy for the joint models, and comparing with the LEXSOFT model, which like PIPELINE does not use cluster features, we see that joint prediction with (soft) lexical constraints gives an average error reduction of about 40% for UAS and about 32% for LAS, which is substantial especially given that the error reduction in the PM score (compared to the perfect morphology underlyling the GOLD scores) is only about 27.5%. It is also worth pointing out that these improvements come at a very modest cost in computational efficiency, as the run times for the LEXSOFT model are on average only 15% higher than for the PIPELINE model, despite having a 40% larger beam size. 13 Interestingly, however, for all languages the LAS/UAS scores are actually higher for ORACLE than for GOLD, indicating that the LEXSOFT model has in its final beam dependency trees that are better than the 1-best trees predicted with perfect morphological input and suggesting that there is room for further improvement of the scoring model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 29, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The final results obtained with joint prediction of morphology and syntax, external lexical constraints, and cluster features represent a new state of the art for syntactic dependency parsing for all five languages. For Czech, the best previous UAS on the standard train-test split of the PDT is 87.32, reported by Koo et al. (2010) with a parser using non-projective head automata and dual decomposition, while the best LAS is 78.82 LAS from Nilsson et al. (2006) , using a greedy arc-eager transitionbased system with pseudo-projective parsing. Our best results are 1.7 percentage points better for UAS (89.0) and almost 5 percentage points better for LAS (83.7). 14 For Finnish, the only previous results are from Haverinen et al. (2013) , who achieve 81.01 LAS and 84.97 UAS with the graph-based parser of Bohnet (2010) . We get substantial improvements with 83.1 LAS and 86.6 UAS. We also improve slightly over their best POS score, obtained with the HunPos tagger (Hal\u00e1csy et al., 2007) together with the OMorFi analyzer (95.7 vs. 95.4). For German, the best previous results on the same train-test split are from Seeker and Kuhn (2012) , using the graphbased parser of Bohnet (2010) in a pipeline architecture. With the same evaluation setup as in this paper, they achieve 91.50 LAS and 93.48 UAS -13 LEXSOFT averages 0.132 ms per sentence on an Intel i7-3930K processor with 6 cores, against 0.112 ms for PIPELINE.", |
|
"cite_spans": [ |
|
{ |
|
"start": 315, |
|
"end": 332, |
|
"text": "Koo et al. (2010)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 443, |
|
"end": 464, |
|
"text": "Nilsson et al. (2006)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 717, |
|
"end": 740, |
|
"text": "Haverinen et al. (2013)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 810, |
|
"end": 823, |
|
"text": "Bohnet (2010)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 970, |
|
"end": 992, |
|
"text": "(Hal\u00e1csy et al., 2007)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1120, |
|
"end": 1142, |
|
"text": "Seeker and Kuhn (2012)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "14 It is worth noting that there are a number of more recent parsing results for Czech, but they all use a different test set (and often a different training set), usually from one of the CoNLL shared tasks in 2006 (Buchholz and Marsi, 2006 (Nivre et al., 2007 and 2009 (Haji\u010d et al., 2009) . For the 2009 data set, the best results are 83.73 LAS and 88.82 UAS from Bohnet and Nivre (2012) , who use the SIMPLETAG model but with a beam size of 80. In our setup, we outperform this model by 0.5 points in both LAS and UAS. in the original paper, they only report results without punctuation -to be compared with 92.4 LAS and 94.1 UAS for our best model. 15 In addition, our POS score of 98.4 is the highest reported for a tagger trained only on the Tiger Treebank, outperforming the previous best from Bohnet and Nivre (2012) by 0.3 percentage points. The only previous results on Hungarian using the same version of the treebank are from Farkas et al. (2012) , who report 87.2 LAS and 90.1 UAS for the graph-based parser of Bohnet (2010) . Our best results improve labeled accuracy by 2.1 points (89.3 LAS) and unlabeled accuracy by 1.6 points (91.7 UAS), which is again quite substantial. For Russian, Boguslavsky et al. (2011) report 86.0 LAS and 90.0 UAS using the rule-based ETAP-3 parser with an added statistical model and joint morphological and syntactic disambiguation. The scores are not strictly comparable, because we use a more recent version of the Syn-TagRus treebank (May 2013 vs. April 2011), but our results nevertheless show substantial improvements, in particular for UAS (93.0) but also for LAS (88.0).", |
|
"cite_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 240, |
|
"text": "(Buchholz and Marsi, 2006", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 241, |
|
"end": 260, |
|
"text": "(Nivre et al., 2007", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 270, |
|
"end": 290, |
|
"text": "(Haji\u010d et al., 2009)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 389, |
|
"text": "Bohnet and Nivre (2012)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 653, |
|
"end": 655, |
|
"text": "15", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 801, |
|
"end": 824, |
|
"text": "Bohnet and Nivre (2012)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 938, |
|
"end": 958, |
|
"text": "Farkas et al. (2012)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1024, |
|
"end": 1037, |
|
"text": "Bohnet (2010)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1203, |
|
"end": 1228, |
|
"text": "Boguslavsky et al. (2011)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We have presented the first system that performs full morphological disambiguation and labeled nonprojective dependency parsing in a joint model, and we have demonstrated its usefulness for parsing richly inflected languages. A thorough empirical investigation of joint prediction models, rule-based lexical constraints, and distributional word clusters has shown substantial improvements in accuracy for five languages. In the future, we hope to conduct a detailed error analysis for all languages, which may give us more insight about the benefits of different components and hopefully pave the way for further improvements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Concluding Remarks", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "See https://sites.google.com/site/spmrl2013/home/sharedtask.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Hatori et al. (2011) previously made the same modification to the arc-standard system(Nivre, 2004), without the SWAP transition. Similarly,Titov and Henderson (2007) added a word parameter to the SHIFT transition to get a joint model of word strings and dependency trees. A similar model was considered but finally not used byGesmundo et al. (2009).2.4 DecodingExact decoding for transition-based parsing is hard in general. 3 Early transition-based parsers mostly relied on greedy, deterministic decoding, which makes for very efficient parsing(Yamada and Matsumoto, 2003;Nivre, 2003), but research has shown that accuracy can be improved by using beam search instead(Zhang and Clark, 2008;Zhang and Nivre, 2012). While still not exact, beam search decoders explore a larger part of the search space than greedy parsers, which is likely to be especially important for joint models, where the search space is larger than for plain dependency parsing without morphology (even more so with the SWAP transition for nonprojectivity).Figure 2outlines the beam search algorithm used for decoding with our model. Different instantiations of the model will require slightly different implementations of the permissibility condition invoked in line 8, which can be used to filter out labels that are improbable or incompatible with an external lexicon, and the pruning step performed in line 13, where there may be a need to balance the amount of morphological and syntactic variation in the beam. Both these aspects will be discussed in depth in Sections 4-6.Although the worst-case running time with constant beam size is quadratic in sentence length, the observed running time is linear for natural language data sets, due to the sparsity of non-projective dependencies(Nivre, 2009). The running time is also linear in |D| + |P \u00d7 M |, which means that joint prediction only gives a linear increase in running time, often quite marginal because |D| > |P \u00d7 M |. This assumes that the lemma is predicted deterministically given a tag and a morphological description, an assumption that is enforced in all our experiments.2.5 LearningIn order to learn a weight vector w from a training set of sentences with gold parses, we use a variant of the structured perceptron, introduced byCollins (2002) and first used for transition-based parsing byZhang and Clark (2008). We initialize all weights 3 While there exist exact dynamic programming algorithms for projective transition systems(Huang and Sagae, 2010;Kuhlmann et al., 2011) and even for restricted non-projective systems(Cohen et al., 2011), parsing is intractable for systems like ours that permit arbitrary non-projective trees.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Note that there may be more than one transition sequence corresponding to the gold parse, in which case we pick the canonical transition sequence that processes all left-dependents before right-dependents and applies the lazy swapping strategy of.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Downloaded from the http://lindat.cz repository as resource PID http://hdl.handle.net/11858/00-097C-0000-0015-A780-9.6 Downloaded in March 2012.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "See http://www.ims.uni-stuttgart.de/forschung/ressourcen/ korpora/hgc.html.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Available at https://code.google.com/p/mate-tools/.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "See http://stp.lingfil.uu.se/\u223cnivre/exp/emnlp12.html. 10 A complete description of our feature representations is available at http://stp.lingfil.uu.se/\u223cnivre/exp/tacl13.html.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The best model was selected according to results on the dev set (cross-validation on the training set for Finnish).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finnish had to be excluded because gold standard morphological annotation exists only for a small subset of the treebank.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As in the case of Czech, there are many recent results for German based on the CoNLL 2009 data sets, but the previous best is with the SIMPLETAG model of Bohnet and Nivre (2012), which we outperform by 0.5/0.3 points in LAS/UAS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Work partly funded by the projects LM2010013 and LH12093 of the MEYS of the Czech Republic and the National Excellence Program of the State of Hungary (T\u00c1MOP 4.2.4. A/2-11-1-2012-0001).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "ETAP-3 linguistic processor: A full-fledged NLP implementation of the MTT", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ju", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Apresian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Boguslavsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Iomdin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Lazursky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Sannikov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Sizov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tsinman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the First International Conference on Meaning-Text Theory", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "279--288", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ju. Apresian, I. Boguslavsky, L. Iomdin, A. Lazursky, V. Sannikov, V. Sizov, and L. Tsinman. 2003. ETAP-3 linguistic processor: A full-fledged NLP implementa- tion of the MTT. In Proceedings of the First Inter- national Conference on Meaning-Text Theory, pages 279-288.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Dependency treebank for Russian: Concept, tools, types of information", |
|
"authors": [ |
|
{ |
|
"first": "Igor", |
|
"middle": [], |
|
"last": "Boguslavsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Grigorieva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolai", |
|
"middle": [], |
|
"last": "Grigoriev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonid", |
|
"middle": [], |
|
"last": "Kreidlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadezhda", |
|
"middle": [], |
|
"last": "Frid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the 18th International Conference on Computational Linguistics (COLING)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "987--991", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Igor Boguslavsky, Svetlana Grigorieva, Nikolai Grig- oriev, Leonid Kreidlin, and Nadezhda Frid. 2000. Dependency treebank for Russian: Concept, tools, types of information. In Proceedings of the 18th In- ternational Conference on Computational Linguistics (COLING), pages 987-991.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Development of a dependency treebank for Russian and its possible applications in NLP", |
|
"authors": [ |
|
{ |
|
"first": "Igor", |
|
"middle": [], |
|
"last": "Boguslavsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Chardin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Grigorieva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolai", |
|
"middle": [], |
|
"last": "Grigoriev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonid", |
|
"middle": [], |
|
"last": "Iomdin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonid", |
|
"middle": [], |
|
"last": "Kreidlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadezhda", |
|
"middle": [], |
|
"last": "Frid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 3rd International Conference on Language Resources and Evaluation (LREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "852--856", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Igor Boguslavsky, Ivan Chardin, Svetlana Grigorieva, Nikolai Grigoriev, Leonid Iomdin, Leonid Kreidlin, and Nadezhda Frid. 2002. Development of a depen- dency treebank for Russian and its possible applica- tions in NLP. In Proceedings of the 3rd International Conference on Language Resources and Evaluation (LREC), pages 852-856.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Rule-based dependency parser refined by empirical and corpus statistics", |
|
"authors": [ |
|
{ |
|
"first": "Igor", |
|
"middle": [], |
|
"last": "Boguslavsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonid", |
|
"middle": [], |
|
"last": "Iomdin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sizov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonid", |
|
"middle": [], |
|
"last": "Tsinman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vadim", |
|
"middle": [], |
|
"last": "Petrochenkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the International Conference on Dependency Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "318--327", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Igor Boguslavsky, Leonid Iomdin, Victor Sizov, Leonid Tsinman, and Vadim Petrochenkov. 2011. Rule-based dependency parser refined by empirical and corpus statistics. In Proceedings of the International Confer- ence on Dependency Linguistics, pages 318-327.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "The Prague Dependency Treebank: A three-level annotation scenario", |
|
"authors": [ |
|
{ |
|
"first": "Alena", |
|
"middle": [], |
|
"last": "B\u00f6hmov\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Haji\u010d", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Haji\u010dov\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barbora", |
|
"middle": [], |
|
"last": "Hladk\u00e1", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Treebanks: Building and Using Parsed Corpora", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "103--127", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alena B\u00f6hmov\u00e1, Jan Haji\u010d, Eva Haji\u010dov\u00e1, and Barbora Hladk\u00e1. 2003. The Prague Dependency Treebank: A three-level annotation scenario. In Anne Abeill\u00e9, ed- itor, Treebanks: Building and Using Parsed Corpora, pages 103-127. Kluwer.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The best of both worlds -a graph-based completion model for transition-based parsers", |
|
"authors": [ |
|
{ |
|
"first": "Bernd", |
|
"middle": [], |
|
"last": "Bohnet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Kuhn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 13th Conference of the European Chpater of the Association for Computational Linguistics (EACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "77--87", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernd Bohnet and Jonas Kuhn. 2012. The best of both worlds -a graph-based completion model for transition-based parsers. In Proceedings of the 13th Conference of the European Chpater of the Associa- tion for Computational Linguistics (EACL), pages 77- 87.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A transitionbased system for joint part-of-speech tagging and labeled non-projective dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Bernd", |
|
"middle": [], |
|
"last": "Bohnet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1455--1465", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernd Bohnet and Joakim Nivre. 2012. A transition- based system for joint part-of-speech tagging and la- beled non-projective dependency parsing. In Proceed- ings of the 2012 Joint Conference on Empirical Meth- ods in Natural Language Processing and Computa- tional Natural Language Learning (EMNLP-CoNLL), pages 1455-1465.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Top accuracy and fast dependency parsing is not a contradiction", |
|
"authors": [ |
|
{ |
|
"first": "Bernd", |
|
"middle": [], |
|
"last": "Bohnet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 23rd International Conference on Computational Linguistics (COLING)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "89--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernd Bohnet. 2010. Top accuracy and fast dependency parsing is not a contradiction. In Proceedings of the 23rd International Conference on Computational Lin- guistics (COLING), pages 89-97.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "TIGER treebank", |
|
"authors": [ |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Brants", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefanie", |
|
"middle": [], |
|
"last": "Dipper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silvia", |
|
"middle": [], |
|
"last": "Hansen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Lezius", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 1st Workshop on Treebanks and Linguistic Theories (TLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "24--42", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sabine Brants, Stefanie Dipper, Silvia Hansen, Wolfgang Lezius, and George Smith. 2002. TIGER treebank. In Proceedings of the 1st Workshop on Treebanks and Linguistic Theories (TLT), pages 24-42.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Classbased n-gram models of natural language", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"Della" |
|
], |
|
"last": "Vincent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Desouza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mercer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Computational Linguistics", |
|
"volume": "18", |
|
"issue": "", |
|
"pages": "467--479", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter F. Brown, Vincent J. Della Pietra, Peter V. deSouza, Jennifer C. Lai, and Robert L. Mercer. 1992. Class- based n-gram models of natural language. Computa- tional Linguistics, 18:467-479.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "CoNLL-X shared task on multilingual dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Buchholz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erwin", |
|
"middle": [], |
|
"last": "Marsi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 10th Conference on Computational Natural Language Learning (CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--164", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sabine Buchholz and Erwin Marsi. 2006. CoNLL-X shared task on multilingual dependency parsing. In Proceedings of the 10th Conference on Computational Natural Language Learning (CoNLL), pages 149-164.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Joint morphological and syntactic disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Shay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "208--217", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shay B. Cohen and Noah A. Smith. 2007. Joint morpho- logical and syntactic disambiguation. In Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL), pages 208-217.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Exact inference for generative probabilistic non-projective dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Shay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giorgio", |
|
"middle": [], |
|
"last": "G\u00f3mez-Rodr\u00edguez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Satta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1234--1245", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shay B. Cohen, Carlos G\u00f3mez-Rodr\u00edguez, and Giorgio Satta. 2011. Exact inference for generative probabilis- tic non-projective dependency parsing. In Proceedings of the 2011 Conference on Empirical Methods in Nat- ural Language Processing, pages 1234-1245.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Incremental parsing with the perceptron algorithm", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Roark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "112--119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Collins and Brian Roark. 2004. Incremental parsing with the perceptron algorithm. In Proceed- ings of the 42nd Annual Meeting of the Association for Computational Linguistics (ACL), pages 112-119.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Discriminative training methods for hidden markov models: Theory and experiments with perceptron algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Collins. 2002. Discriminative training meth- ods for hidden markov models: Theory and experi- ments with perceptron algorithms. In Proceedings of the Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 1-8.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Morphology and reranking for the statistical parsing of spanish", |
|
"authors": [ |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the Human Language Technology Conference and the Conference on Empirical Methods in Natural Language Processing (HLT/EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "795--802", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brooke Cowan and Michael Collins. 2005. Morphology and reranking for the statistical parsing of spanish. In Proceedings of the Human Language Technology Con- ference and the Conference on Empirical Methods in Natural Language Processing (HLT/EMNLP), pages 795-802.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Shai Shalev-Shwartz, and Yoram Singer", |
|
"authors": [ |
|
{ |
|
"first": "Koby", |
|
"middle": [], |
|
"last": "Crammer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ofer", |
|
"middle": [], |
|
"last": "Dekel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Keshet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "551--585", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Koby Crammer, Ofer Dekel, Joseph Keshet, Shai Shalev- Shwartz, and Yoram Singer. 2006. Online passive- aggressive algorithms. Journal of Machine Learning Research, 7:551-585.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Dependency parsing of hungarian: Baseline results and challenges", |
|
"authors": [ |
|
{ |
|
"first": "Rich\u00e1rd", |
|
"middle": [], |
|
"last": "Farkas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veronika", |
|
"middle": [], |
|
"last": "Vincze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helmut", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 13th Conference of the European Chpater of the Association for Computational Linguistics (EACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--65", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rich\u00e1rd Farkas, Veronika Vincze, and Helmut Schmid. 2012. Dependency parsing of hungarian: Baseline re- sults and challenges. In Proceedings of the 13th Con- ference of the European Chpater of the Association for Computational Linguistics (EACL), pages 55-65.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A latent variable model of synchronous syntactic-semantic parsing for multiple languages", |
|
"authors": [ |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Gesmundo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paola", |
|
"middle": [], |
|
"last": "Merlo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Thirteenth Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "37--42", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrea Gesmundo, James Henderson, Paola Merlo, and Ivan Titov. 2009. A latent variable model of syn- chronous syntactic-semantic parsing for multiple lan- guages. In Proceedings of the Thirteenth Confer- ence on Computational Natural Language Learning (CoNLL 2009): Shared Task, pages 37-42.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Word segmentation, unknown-word resolution, and morphological agreement in a hebrew parsing system", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Elhadad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational Linguistics", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "121--160", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Goldberg and Michael Elhadad. 2013. Word seg- mentation, unknown-word resolution, and morpholog- ical agreement in a hebrew parsing system. Computa- tional Linguistics, 39:121-160.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A single generative model for joint morphological segmentation and syntactic parsing", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "371--379", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Goldberg and Reut Tsarfaty. 2008. A single gener- ative model for joint morphological segmentation and syntactic parsing. In Proceedings of the 46th Annual Meeting of the Association for Computational Linguis- tics (ACL), pages 371-379.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Tagging Inflective Languages: Prediction of Morphological Categories for a Rich, Structured Tagset", |
|
"authors": [ |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Haji\u010d", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barbora", |
|
"middle": [], |
|
"last": "Hladk\u00e1", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the 36th Annual Meeting of the Association for Computational Linguistics (ACL) and the 17th International Conference on Computational Linguistics (COLING)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "483--490", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan Haji\u010d and Barbora Hladk\u00e1. 1998. Tagging Inflective Languages: Prediction of Morphological Categories for a Rich, Structured Tagset. In Proceedings of the 36th Annual Meeting of the Association for Compu- tational Linguistics (ACL) and the 17th International Conference on Computational Linguistics (COLING), pages 483-490.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Petr Sgall, and Petr Pajas", |
|
"authors": [ |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Haji\u010d", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jarmila", |
|
"middle": [], |
|
"last": "Barbora Vidova Hladka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Panevov\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Haji\u010dov\u00e1", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan Haji\u010d, Barbora Vidova Hladka, Jarmila Panevov\u00e1, Eva Haji\u010dov\u00e1, Petr Sgall, and Petr Pajas. 2001. Prague Dependency Treebank 1.0. LDC, 2001T10.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "The conll-2009 shared task: Syntactic and semantic dependencies in multiple languages", |
|
"authors": [ |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Haji\u010d", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Massimiliano", |
|
"middle": [], |
|
"last": "Ciaramita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Johansson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daisuke", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [ |
|
"Ant\u00f2nia" |
|
], |
|
"last": "Mart\u00ed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Meyers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Jan\u0161t\u011bp\u00e1nek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Stra\u0148\u00e1k", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Thirteenth Conference on Computational Natural Language Learning (CoNLL): Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan Haji\u010d, Massimiliano Ciaramita, Richard Johans- son, Daisuke Kawahara, Maria Ant\u00f2nia Mart\u00ed, Llu\u00eds M\u00e0rquez, Adam Meyers, Joakim Nivre, Sebastian Pad\u00f3, Jan\u0160t\u011bp\u00e1nek, Pavel Stra\u0148\u00e1k, Mihai Surdeanu, Nianwen Xue, and Yi Zhang. 2009. The conll- 2009 shared task: Syntactic and semantic dependen- cies in multiple languages. In Proceedings of the Thir- teenth Conference on Computational Natural Lan- guage Learning (CoNLL): Shared Task, pages 1-18.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Morphological tagging: Data vs. dictionaries", |
|
"authors": [], |
|
"year": 2000, |
|
"venue": "Proceedings of the First Meeting of the North American Chapter of the Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "94--101", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan Haji\u010d. 2000. Morphological tagging: Data vs. dic- tionaries. In Proceedings of the First Meeting of the North American Chapter of the Association for Com- putational Linguistics (NAACL), pages 94-101.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "HunPos -an open source trigram tagger", |
|
"authors": [ |
|
{ |
|
"first": "P\u00e9ter", |
|
"middle": [], |
|
"last": "Hal\u00e1csy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e1s", |
|
"middle": [], |
|
"last": "Kornai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Csaba", |
|
"middle": [], |
|
"last": "Oravecz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics: Companion Volume Proceedings of the Demo and Poster Sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "209--212", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P\u00e9ter Hal\u00e1csy, Andr\u00e1s Kornai, and Csaba Oravecz. 2007. HunPos -an open source trigram tagger. In Proceed- ings of the 45th Annual Meeting of the Association for Computational Linguistics: Companion Volume Pro- ceedings of the Demo and Poster Sessions, pages 209- 212.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Incremental joint pos tagging and dependency parsing in chinese", |
|
"authors": [ |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Hatori", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takuya", |
|
"middle": [], |
|
"last": "Matsuzaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yusuke", |
|
"middle": [], |
|
"last": "Miyao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of 5th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1216--1224", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jun Hatori, Takuya Matsuzaki, Yusuke Miyao, and Jun'ichi Tsujii. 2011. Incremental joint pos tagging and dependency parsing in chinese. In Proceedings of 5th International Joint Conference on Natural Lan- guage Processing, pages 1216-1224.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Tapio Salakoski, and Filip Ginter. 2013. Building the essential resources for Finnish: the Turku Dependency Treebank. Language Resources and Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Katri", |
|
"middle": [], |
|
"last": "Haverinen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenna", |
|
"middle": [], |
|
"last": "Nyblom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timo", |
|
"middle": [], |
|
"last": "Viljanen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veronika", |
|
"middle": [], |
|
"last": "Laippala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Kohonen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Missil\u00e4", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stina", |
|
"middle": [], |
|
"last": "Ojala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katri Haverinen, Jenna Nyblom, Timo Viljanen, Veronika Laippala, Samuel Kohonen, Anna Missil\u00e4, Stina Ojala, Tapio Salakoski, and Filip Ginter. 2013. Building the essential resources for Finnish: the Turku Dependency Treebank. Language Resources and Evaluation.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Dynamic programming for linear-time incremental parsing", |
|
"authors": [ |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Sagae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1077--1086", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liang Huang and Kenji Sagae. 2010. Dynamic program- ming for linear-time incremental parsing. In Proceed- ings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL), pages 1077-1086.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Lexicographer's companion: A user-friendly software system for enlarging and updating high-profile computerized bilingual dictionaries", |
|
"authors": [ |
|
{ |
|
"first": "Leonid", |
|
"middle": [], |
|
"last": "Iomdin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Viktor", |
|
"middle": [], |
|
"last": "Sizov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Lexicographic Tools and Techniques. MONDILEX First Open Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "42--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonid Iomdin and Viktor Sizov. 2008. Lexicographer's companion: A user-friendly software system for en- larging and updating high-profile computerized bilin- gual dictionaries. In Lexicographic Tools and Tech- niques. MONDILEX First Open Workshop, pages 42- 54.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Simple semi-supervised dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Terry", |
|
"middle": [], |
|
"last": "Koo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Carreras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "595--603", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Terry Koo, Xavier Carreras, and Michael Collins. 2008. Simple semi-supervised dependency parsing. In Pro- ceedings of the 46th Annual Meeting of the Association for Computational Linguistics (ACL), pages 595-603.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Dual decomposition for parsing with non-projective head automata", |
|
"authors": [ |
|
{ |
|
"first": "Terry", |
|
"middle": [], |
|
"last": "Koo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Jaakkola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Sontag", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1288--1298", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Terry Koo, Alexander M. Rush, Michael Collins, Tommi Jaakkola, and David Sontag. 2010. Dual decompo- sition for parsing with non-projective head automata. In Proceedings of the 2010 Conference on Empiri- cal Methods in Natural Language Processing, pages 1288-1298.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Dependency Parsing", |
|
"authors": [ |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sandra K\u00fcbler, Ryan McDonald, and Joakim Nivre. 2009. Dependency Parsing. Morgan and Claypool.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Dynamic programming algorithms for transition-based dependency parsers", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Kuhlmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "G\u00f3mez-Rodr\u00edguez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giorgio", |
|
"middle": [], |
|
"last": "Satta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "673--682", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Kuhlmann, Carlos G\u00f3mez-Rodr\u00edguez, and Gior- gio Satta. 2011. Dynamic programming algorithms for transition-based dependency parsers. In Proceed- ings of the 49th Annual Meeting of the Association for Computational Linguistics (ACL), pages 673-682.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "A discriminative model for joint morphological disambiguation and dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Naradowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "885--894", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Lee, Jason Naradowsky, and David A. Smith. 2011. A discriminative model for joint morphological disam- biguation and dependency parsing. In Proceedings of the 49th Annual Meeting of the Association for Com- putational Linguistics (ACL), pages 885-894.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Graph transformations in data-driven dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Nilsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 21st International Conference on Computational Linguistics and the 44th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "257--264", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jens Nilsson, Joakim Nivre, and Johan Hall. 2006. Graph transformations in data-driven dependency parsing. In Proceedings of the 21st International Con- ference on Computational Linguistics and the 44th An- nual Meeting of the Association for Computational Linguistics, pages 257-264.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "The CoNLL 2007 shared task on dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Nilsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deniz", |
|
"middle": [], |
|
"last": "Yuret", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the CoNLL Shared Task of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "915--932", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Nivre, Johan Hall, Sandra K\u00fcbler, Ryan McDon- ald, Jens Nilsson, Sebastian Riedel, and Deniz Yuret. 2007. The CoNLL 2007 shared task on dependency parsing. In Proceedings of the CoNLL Shared Task of EMNLP-CoNLL 2007, pages 915-932.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "An improved oracle for dependency parsing with online reordering", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Kuhlmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 11th International Conference on Parsing Technologies (IWPT'09)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "73--76", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Nivre, Marco Kuhlmann, and Johan Hall. 2009. An improved oracle for dependency parsing with online reordering. In Proceedings of the 11th International Conference on Parsing Technologies (IWPT'09), pages 73-76.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "An efficient algorithm for projective dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the 8th International Workshop on Parsing Technologies (IWPT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--160", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Nivre. 2003. An efficient algorithm for pro- jective dependency parsing. In Proceedings of the 8th International Workshop on Parsing Technologies (IWPT), pages 149-160.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Incrementality in deterministic dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the Workshop on Incremental Parsing: Bringing Engineering and Cognition Together (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "50--57", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Nivre. 2004. Incrementality in deterministic de- pendency parsing. In Proceedings of the Workshop on Incremental Parsing: Bringing Engineering and Cog- nition Together (ACL), pages 50-57.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Non-projective dependency parsing in expected linear time", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP (ACL-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "351--359", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Nivre. 2009. Non-projective dependency parsing in expected linear time. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Lan- guage Processing of the AFNLP (ACL-IJCNLP), pages 351-359.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Modularisation of finnish finite-state language description -towards wide collaboration in open source development of a morphological analyser", |
|
"authors": [ |
|
{ |
|
"first": "Tommi", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Pirinen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 18th Nordic Conference of Computational Linguistics (NODAL-IDA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "299--302", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tommi A. Pirinen. 2011. Modularisation of finnish finite-state language description -towards wide col- laboration in open source development of a morpho- logical analyser. In Proceedings of the 18th Nordic Conference of Computational Linguistics (NODAL- IDA), pages 299-302.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Clustering words by syntactic similarity improves dependency parsing of predicate-argument structures", |
|
"authors": [ |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Sagae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Gordon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 11th International Conference on Parsing Technologies (IWPT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "192--201", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenji Sagae and Andrew S. Gordon. 2009. Clustering words by syntactic similarity improves dependency parsing of predicate-argument structures. In Proceed- ings of the 11th International Conference on Parsing Technologies (IWPT), pages 192-201.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "SMOR: A German computational morphology covering derivation, composition and inflection", |
|
"authors": [ |
|
{ |
|
"first": "Helmut", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arne", |
|
"middle": [], |
|
"last": "Fitschen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulrich", |
|
"middle": [], |
|
"last": "Heid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 4th International Conference on Language Resources and Evaluation (LREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1263--1266", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Helmut Schmid, Arne Fitschen, and Ulrich Heid. 2004. SMOR: A German computational morphology cover- ing derivation, composition and inflection. In Pro- ceedings of the 4th International Conference on Lan- guage Resources and Evaluation (LREC), pages 1263- 1266.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Making ellipses explicit in dependency conversion for a german treebank", |
|
"authors": [ |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Seeker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Kuhn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3132--3139", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wolfgang Seeker and Jonas Kuhn. 2012. Making el- lipses explicit in dependency conversion for a ger- man treebank. In Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC), pages 3132-3139.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Informed ways of improving data-driven dependency parsing for german", |
|
"authors": [ |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Seeker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernd", |
|
"middle": [], |
|
"last": "Bohnet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lilja", |
|
"middle": [], |
|
"last": "\u00d8vrelid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Kuhn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Coling 2010: Posters", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1122--1130", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wolfgang Seeker, Bernd Bohnet, Lilja \u00d8vrelid, and Jonas Kuhn. 2010. Informed ways of improving data-driven dependency parsing for german. In Coling 2010: Posters, pages 1122-1130.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "A highquality web corpus of czech", |
|
"authors": [ |
|
{ |
|
"first": "Johanka", |
|
"middle": [], |
|
"last": "Spoustov\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miroslav", |
|
"middle": [], |
|
"last": "Spousta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC 2012)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--315", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johanka Spoustov\u00e1 and Miroslav Spousta. 2012. A high- quality web corpus of czech. In Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC 2012), pages 311-315.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "A latent variable model for generative dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Titov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 10th International Conference on Parsing Technologies (IWPT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "144--155", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ivan Titov and James Henderson. 2007. A latent variable model for generative dependency parsing. In Proceed- ings of the 10th International Conference on Parsing Technologies (IWPT), pages 144-155.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Morphdb.hu: Hungarian lexical database and morphological grammar", |
|
"authors": [ |
|
{ |
|
"first": "P\u00e9ter", |
|
"middle": [], |
|
"last": "Viktor Tr\u00f3n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P\u00e9ter", |
|
"middle": [], |
|
"last": "Hal\u00e1csy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rebrus", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 5th International Conference on Language Resources and Evaluation (LREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1670--1673", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Viktor Tr\u00f3n, P\u00e9ter Hal\u00e1csy, P\u00e9ter Rebrus, Andr\u00e1s Rung, Eszter Simon, and P\u00e9ter Vajda. 2006. Morphdb.hu: Hungarian lexical database and morphological gram- mar. In Proceedings of the 5th International Confer- ence on Language Resources and Evaluation (LREC), pages 1670-1673.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Statistical parsing of morphologically rich languages (spmrl) what, how and whither", |
|
"authors": [ |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Djam\u00e9", |
|
"middle": [], |
|
"last": "Seddah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "Kuebler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannick", |
|
"middle": [], |
|
"last": "Versley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie", |
|
"middle": [], |
|
"last": "Candito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ines", |
|
"middle": [], |
|
"last": "Rehbein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lamia", |
|
"middle": [], |
|
"last": "Tounsi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the NAACL HLT 2010 First Workshop on Statistical Parsing of Morphologically-Rich Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reut Tsarfaty, Djam\u00e9 Seddah, Yoav Goldberg, San- dra Kuebler, Yannick Versley, Marie Candito, Jen- nifer Foster, Ines Rehbein, and Lamia Tounsi. 2010. Statistical parsing of morphologically rich languages (spmrl) what, how and whither. In Proceedings of the NAACL HLT 2010 First Workshop on Statistical Pars- ing of Morphologically-Rich Languages, pages 1-12.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Parsing morphologicall rich languages: Introduction to the special issue", |
|
"authors": [ |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Djam\u00e9", |
|
"middle": [], |
|
"last": "Seddah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational Linguistics", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "15--22", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reut Tsarfaty, Djam\u00e9 Seddah, Sandra K\u00fcbler, and Joakim Nivre. 2013. Parsing morphologicall rich languages: Introduction to the special issue. Computational Lin- guistics, 39:15-22.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Integrated morphological and syntactic disambiguation for modern hebrew", |
|
"authors": [ |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the COLING/ACL 2006 Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "49--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reut Tsarfaty. 2006. Integrated morphological and syn- tactic disambiguation for modern hebrew. In Pro- ceedings of the COLING/ACL 2006 Student Research Workshop, pages 49-54.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "The hungarian national corpus", |
|
"authors": [ |
|
{ |
|
"first": "Tam\u00e1s", |
|
"middle": [], |
|
"last": "V\u00e1radi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 3rd International Conference on Language Resources and Evaluation (LREC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "385--389", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tam\u00e1s V\u00e1radi. 2002. The hungarian national corpus. In Proceedings of the 3rd International Conference on Language Resources and Evaluation (LREC), pages 385-389.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Statistical dependency analysis with support vector machines", |
|
"authors": [ |
|
{ |
|
"first": "Hiroyasu", |
|
"middle": [], |
|
"last": "Yamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuji", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the 8th International Workshop on Parsing Technologies (IWPT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "195--206", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hiroyasu Yamada and Yuji Matsumoto. 2003. Statisti- cal dependency analysis with support vector machines. In Proceedings of the 8th International Workshop on Parsing Technologies (IWPT), pages 195-206.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "A tale of two parsers: Investigating and combining graph-based and transition-based dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "562--571", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yue Zhang and Stephen Clark. 2008. A tale of two parsers: Investigating and combining graph-based and transition-based dependency parsing. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 562-571.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Transition-based parsing with rich non-local features", |
|
"authors": [ |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yue Zhang and Joakim Nivre. 2011. Transition-based parsing with rich non-local features. In Proceedings of the 49th Annual Meeting of the Association for Com- putational Linguistics (ACL).", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Analyzing the effect of global learning and beam-search on transitionbased dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of COL-ING 2012: Posters", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1391--1400", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yue Zhang and Joakim Nivre. 2012. Analyzing the ef- fect of global learning and beam-search on transition- based dependency parsing. In Proceedings of COL- ING 2012: Posters, pages 1391-1400.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "Beam search algorithm for finding the best MSparse for input sentence x with weight vector w. The symbols h.c, h.s and h.f denote, respectively, the configuration, score and feature vector of a hypothesis h; \u0393 c denotes the MS-parse defined by c.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"text": "Statistics about data sets and resources used in the experiments. Treebank: number of tokens in data sets; number of labels in label sets. Morphology: number of word forms and lemmas in treebank covered by morphological analyzer. Clusters: number of tokens and types in unlabeled corpus. treebank annotation we have to rely on a heuristic mapping between the two. Word clusters are derived from the so-called Huge German Corpus. 7" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"text": "Test set results for all models. ORACLE = oracle scores for LEXSOFT; GOLD = accuracy for PIPELINE with gold POS, MOR, LEM. Bold marks best result per column and language (excluding ORACLE and GOLD)." |
|
} |
|
} |
|
} |
|
} |