|
{ |
|
"paper_id": "C16-1033", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:02:42.437513Z" |
|
}, |
|
"title": "Data-Driven Morphological Analysis and Disambiguation for Morphologically Rich Languages and Universal Dependencies", |
|
"authors": [ |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "More", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Open University of Israel", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Open University of Israel", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Parsing texts into universal dependencies (UD) in realistic scenarios requires infrastructure for morphological analysis and disambiguation (MA&D) of typologically different languages as a first tier. MA&D is particularly challenging in morphologically rich languages (MRLs), where the ambiguous space-delimited tokens ought to be disambiguated with respect to their constituent morphemes. Here we present a novel, language-agnostic, framework for MA&D, based on a transition system with two variants, word-based and morpheme-based, and a dedicated transition to mitigate the biases of variable-length morpheme sequences. Our experiments on a Modern Hebrew case study outperform the state of the art, and we show that the morpheme-based MD consistently outperforms our word-based variant. We further illustrate the utility and multilingual coverage of our framework by morphologically analyzing and disambiguating the large set of languages in the UD treebanks.", |
|
"pdf_parse": { |
|
"paper_id": "C16-1033", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Parsing texts into universal dependencies (UD) in realistic scenarios requires infrastructure for morphological analysis and disambiguation (MA&D) of typologically different languages as a first tier. MA&D is particularly challenging in morphologically rich languages (MRLs), where the ambiguous space-delimited tokens ought to be disambiguated with respect to their constituent morphemes. Here we present a novel, language-agnostic, framework for MA&D, based on a transition system with two variants, word-based and morpheme-based, and a dedicated transition to mitigate the biases of variable-length morpheme sequences. Our experiments on a Modern Hebrew case study outperform the state of the art, and we show that the morpheme-based MD consistently outperforms our word-based variant. We further illustrate the utility and multilingual coverage of our framework by morphologically analyzing and disambiguating the large set of languages in the UD treebanks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "A decade following the emergence of statistical parsers for English (Charniak, 1996; Bod, 1995) , the CoNLL data sets presented a new challenge: the development of data-driven statistical parsers that can be trained to parse any language given an appropriately annotated treebank (Buchholz and Marsi, 2006; Nivre et al., 2007) . These data sets facilitated the development of accurate, language-agnostic, dependency parsers (Nivre et al. (2006) , McDonald (2006) etc.), but not without shortcomings: they require that input tokens be morphologically analyzed and disambiguated in advance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 84, |
|
"text": "(Charniak, 1996;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 85, |
|
"end": 95, |
|
"text": "Bod, 1995)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 306, |
|
"text": "(Buchholz and Marsi, 2006;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 326, |
|
"text": "Nivre et al., 2007)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 444, |
|
"text": "(Nivre et al. (2006)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 447, |
|
"end": 462, |
|
"text": "McDonald (2006)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This latter assumption breaks down in realistic parsing scenarios where the morphological analysis of an input token may consist of multiple syntactic words to participate in the parse tree (Tsarfaty et al., 2010) . The universal dependencies (UD) initiative aims to remedy this by presenting a harmonized set of treebanks, now 54 and counting, with a unified annotation scheme and multilayered annotation. Specifically, UD data distinguishes the input space-delimited tokens from the (morpho)syntactic words that participate in the parse tree (Nivre et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 213, |
|
"text": "(Tsarfaty et al., 2010)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 564, |
|
"text": "(Nivre et al., 2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Efforts towards parsing texts into universal dependencies in realistic scenarios thus require languageagnostic infrastructure for automatic morphological analysis and disambiguation (MA&D) of data from typologically different languages. MA&D is particularly challenging in morphologically rich languages (MRLs), where space-delimited input tokens may have multiple analyses, only one relevant in context. This morphological ambiguity of a token is typically represented as a lattice. The term Morphological Disambiguation (MD) refers to selecting a single path through the morphological analysis (MA) lattice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In Semitic languages, MD is particularly challenging (Adler, 2007; Bar-haim et al., 2008; Shacham and Wintner, 2007; Pasha et al., 2014; Habash and Rambow, 2005) . To illustrate, Figure 1 shows the MA lattice of the Hebrew phrase 'bclm hneim' 1 (literally: in-shadow-of-them the-pleasant, meaning: in their pleasant shadow). Different paths in the lattice represent different disambiguation decisions. In This work is licenced under a Creative Commons Attribution 4.0 International License. License details: http:// creativecommons.org/licenses/by/4.0/", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 66, |
|
"text": "(Adler, 2007;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 67, |
|
"end": 89, |
|
"text": "Bar-haim et al., 2008;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 90, |
|
"end": 116, |
|
"text": "Shacham and Wintner, 2007;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 136, |
|
"text": "Pasha et al., 2014;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 137, |
|
"end": 161, |
|
"text": "Habash and Rambow, 2005)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 187, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1 We transliterate as in Sima'an et al. (2001) . Figure 1 : An example of an MA lattice of the phrase \"bclm hneim\" (\"in their pleasant shadow\") in transliterated Hebrew. Edges mark syntactic words, and double circles mark white spaces.", |
|
"cite_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 46, |
|
"text": "Sima'an et al. (2001)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 57, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "the context of 'bclm hneim', the correct path of 'bclm' is 'b-cl-(fl)-hm', \"in-shadow-(of)-them\". In the context of another sentence, the token 'bclm' may be \"Betzelem\", the name of a famous organization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In MRLs, MD is also more subtle than simply segmenting the space-delimited tokens. Many MRLs are fusional, and clitics may be fused into hosts. In Figure 1 , in the phrase 'b-cl-(fl)-hm' (literally: inshadow-(of)-them), the possessive 'fl' (of) is fused into the pronoun 'hm' (them) and remains implicit in the surface form. Such fusion results in an ambiguous number of morphosyntactic nodes that participate in the analysis, impacting downstream applications as syntactic and semantic parsing, translation, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 155, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous work on MA&D in MRLs, and in Semitic languages in particular (Adler, 2007; Bar-Haim et al., 2005; Shacham and Wintner, 2007; Pasha et al., 2014) , relied on language-specific lexica and cannot be executed cross-linguistically. General CRF implementations, such as MarMoT (M\u00fcller et al., 2013) , that can be applied across languages, assume an unrealistic, gold pre-segmented setting (Bjorkelund et al., 2013) . For generic morphological segmentation, Morfessor (Smit et al., 2014) uses a max-likelihood in semi-supervised settings, but it cannot handle the rich labeling of morphological segments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 83, |
|
"text": "(Adler, 2007;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 84, |
|
"end": 106, |
|
"text": "Bar-Haim et al., 2005;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 107, |
|
"end": 133, |
|
"text": "Shacham and Wintner, 2007;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 153, |
|
"text": "Pasha et al., 2014)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 280, |
|
"end": 301, |
|
"text": "(M\u00fcller et al., 2013)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 417, |
|
"text": "(Bjorkelund et al., 2013)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 489, |
|
"text": "(Smit et al., 2014)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper we present a general, language-agnostic solution the for the MA&D task. We target joint morphological segmentation and tagging, as has been advocated in monolingual cases (Zhang and Clark, 2011; Bar-haim et al., 2008; Adler and Elhadad, 2006; Habash and Rambow, 2005) , in universal settings. Our technical approach extends the transition-based framework for structured prediction of Zhang and Clark (2011) . We define and implement two MD variants: word-based and morpheme-based. We present the best MA&D results to date, and demonstrate that the morpheme-based variant consistently outperforms our word-based one, while providing state-of-the-art results on full-fledge, fine-grained, MD of Hebrew. Furthermore, our MA&D framework is intentionally designed with language independence in mind. Devoid of requiring language-specific resources, we show robust and competitive MA&D performance on MRLs and non-MRLs alike, for circa 50 languages in the most recent release of UD treebanks (Nivre et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 208, |
|
"text": "(Zhang and Clark, 2011;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 231, |
|
"text": "Bar-haim et al., 2008;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 256, |
|
"text": "Adler and Elhadad, 2006;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 257, |
|
"end": 281, |
|
"text": "Habash and Rambow, 2005)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 420, |
|
"text": "Zhang and Clark (2011)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1000, |
|
"end": 1020, |
|
"text": "(Nivre et al., 2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We propose a data-driven framework for MA&D of MRLs and non-MRLs alike. The MA component implements a function that maps each input sentence to its MA lattice, and the MD component implements a transition-based model that accepts the lattice as input and returns a selected path as output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Challenges and Formal Settings", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Formally, our transition system is a quadruple S = (C, T, c s , C t ), where C is a set of configurations, T is a set of transitions, c s is an initialization function, and C t \u2286 C is a set of terminal configurations. A transition sequence y of length n, y = c 0 , t 1 (c 0 ), ..., t n (c n\u22121 ), starts with an initial configuration c 0 = c s (x) for the input sentence x and ends with a terminal configuration c n \u2208 C t , where t i \u2208 T and c n = t n (c n\u22121 ) \u2208 C t . We employ an objective function F where x is the input sentence and GEN (x) is the set of possible transition sequences for x:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Challenges and Formal Settings", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "F (x) = argmax y\u2208GEN (x) Score(y)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Challenges and Formal Settings", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To compute Score(y), y \u2208 GEN (x) is mapped to a global feature vector \u03a6(y) \u2208 N d , where each feature is a count of occurrences of a pattern defined by a feature function \u03c6. The feature vector \u03a6(y) is defined via a set of d feature functions", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Challenges and Formal Settings", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "{\u03c6 i } d i=1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Challenges and Formal Settings", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The way \u03a6 is defined effectively determines the quality of the parser, since the feature model captures linguistic information to which the model learns to assign weights. Given this vector, Score(y) is computed by multiplying \u03a6(y) with a weights vector \u03c9 \u2208 R d .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Challenges and Formal Settings", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Score(y) = \u03a6(y) \u2022 \u03c9 = c j \u2208y d i=1 \u03c9 i \u03c6 i (c j )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Challenges and Formal Settings", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Following Zhang and Clark (2011) , our system learns the weights vector \u03c9 \u2208 R d via the generalized perceptron, using the early-update averaged variant of Collins and Roark (2004) . The algorithm iterates through a gold-annotated corpus, each sentence is disambiguated (decoded) with the last known weights, and if the decoded result differs from the gold standard, the weights are updated. As in Zhang and Clark (2011) , decoding is based on the beam search algorithm, where a number of possible parse sequences are evaluated concurrently to mitigate irrecoverable prediction errors. At each step, the transition system applies all valid applicable transitions to all candidates. The B highest scoring expanded candidates are maintained and passed on to the next step. Those that don't make the B mark, fall off the beam.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 32, |
|
"text": "Zhang and Clark (2011)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 179, |
|
"text": "Collins and Roark (2004)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 397, |
|
"end": 419, |
|
"text": "Zhang and Clark (2011)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Challenges and Formal Settings", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our MRL setting (cf. Tsarfaty et al. (2010) ) poses three technical challenges to this general scheme: (i) the formal challenge: how should we define a transition system for MA&D? (ii) the learning challenge: how can we define feature functions that learn morphological phenomena? (iii) the decoding challenge: how can we effectively compare morpheme sequences of variable length?", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 43, |
|
"text": "Tsarfaty et al. (2010)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Challenges and Formal Settings", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Let x = x 1 ...x k be an input sentence of k tokens and L = M A(x 1 ), ..., M A(x k ) be the morphological ambiguity lattice for x, where L is a contiguous series of word-lattices L i = M A(x i ) connected top to bottom, as illustrated in Figure 1 . Each word lattice L i is a set of sequences of morphemes, and each sequence is a single disambiguated analysis for x i . We define the morphosyntactic representation (MSR) of an arc in the lattice as a tuple m = (s, e, f, t, g) with lattice nodes s and e marking the start and end of a morpheme, a form f , a part-of-speech tag t, and a set g of attribute:value grammatical properties.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 247, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Defining Configurations. A configuration for the MD transition system is a quadruple (L, n, i, M ) where L = M A(x) is the sentence-lattice, n is a node in L, i is the 0-based index of a word-lattice in L, and M is a set of disambiguated morphemes (i.e., selected arcs). The terminal configuration is defined to Defining Transitions. There are two conceivable ways to make morphological disambiguation decisions, in a word-based (WB), and in a morpheme-based (MB), fashion, in the terminology of . In WB models (a.k.a token-level in the UD terminology), the disambiguation decision determines a complete path of morphemes between token-boundaries. In the lattice, this refers to selecting a path between two token-boundary nodes (double circles). MB disambiguation decisions (also termed lexical-level, or word-level in UD) occur at any node in the lattice indicating a morpheme boundary, with more than one outgoing arc, choosing a specific arc m among them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "be C t = {(L, top(L), tokens(L), M )} for any L, M ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Interim: Word-Based or Morpheme-Based? WB and MB strategies face contradicting, and complementary, challenges. In WB models, disambiguation decisions are complex, and learning how to score them is expected to suffer from data sparseness. MB models, on the other hand, over-generalize in terms of possible morphological combinations, and learning to score combinations may fail to generalize and be prone to over-fitting. On top of that, morpheme sequences are longer than word sequences, which, in a transition-based system, is known to be more error prone. Finally, variable-length sequences introduce length biases which negatively impact performance. Since MA&D is the base for the NLP pipeline, it is critical to settle this debate empirically and establish the basis for downstream tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Parameterizing Transitions. A transition system is required to distinguish between all possible decisions it can make at a given point. At the same time, the model should be able to generalize from seen decisions to unseen ones, and effectively learn to disambiguate open-class words and out-of-vocabulary items. To satisfy these desiderata, we define a delexicalization projection for a pre-defined set of parts-ofspeech tags O capturing open-class categories. Simply put, this projection neutralizes the lattice-nodes specific indices, and, for any tag t \u2208 O, it further neutralizes the lexical form. Formally:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "DLEX O (m) = ( , , , t, g) if t \u2208 O ( , , f, t, g) otherwise (3) 3.1 Word-Based Modeling", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The Transition System For word-based (WB) modeling, a single transition morphologically disambiguates whole word-lattices such that the node n of a configuration is always at a word boundary (a node that is a bottom, top, or both, of word-lattices of L). We define the transitions in the WB system as an open set of transitions termed M D s , specifying the parameter s as a single path:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "M D s : (L, n, i, M ) \u2192 (L, q, i + 1, M \u222a {m 0 , .., m j })", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Here, {m 0 , ..., m j } \u2208 L form a contiguous path of arcs, where m 0 starts at node n, m j ends at node q (they can be the same arc), and s is the projected paths", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "s = DLEX O (m 0 ), ..., DLEX O (m j ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "A terminal configuration will therefore contain the union of contiguous paths of word-lattices in L, together forming a complete morphological disambiguation of the ambiguous tokens of the input sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Learning We define three types of word-lattice properties: o -the surface form of the token itself, a -the DLEX-projected lattice (all MSRs projected by the delexicalization function), and p -a chosen disambiguated path, which only exists for previously processed lattices. Using these properties, we define baseline feature templates modeled after POS tagging: unigram, bigram, and trigram combinations of o and a, and p-based features, which predict the next disambiguation decision based on the previous one(s).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Our Proposed Solution", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The Transition System For morpheme-based (MB) modeling, a single transition chooses an outgoing arc of the current node n in the lattice, requiring a disambiguation decision if (and only if) there is more than one outgoing arc. Again we define the transitions as an open set of transitions termed M D s , now specifying s as a single arc:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "M D s : (L, n, i, M ) \u2192 (L, q, j, M \u222a {m})", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Here, m is a morpheme (n, q, f, t, g) \u2208 L, and s = DLEX O (m). If node q is at a word boundary, then j = i + 1, otherwise j = i. For a terminal configuration, each m \u2208 M is an outgoing arc of the end node of another arc in M (with the exception of the first morpheme, starting at bottom(L)) forming a contiguous path that disambiguates x.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Learning In the MB model we can access specific information concerning the current node inside the word-lattice. We define the properties f , t and g, corresponding to arcs' form, part-of-speech and morphological attribute:value pairs. We use these properties in various unigram, bigram, and trigram combinations, in parallel with the WB model. As in the WB model we also define the property p as the path in the previously disambiguated word-lattices. We define the property n to be the set of DLEXprojected outgoing morphemes of the current node (this parallels the property a of WB models, but at morpheme granularity). Similarly to the WB case, we use unigram, bigram, and trigram combinations of these properties as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Decoding Since the number of arcs in lattices' paths for x may vary, so do the number of transitions in our morpheme-based transition system. This violates a basic assumption of standard beam search decoding -that the number of transitions is a deterministic function of the input. There are two inherent biases in varied-length transition sequences driven by the general perceptron algorithm. The beam search algorithm tests the best candidate after each step for goal fulfillment. A short sequence may temporarily be the best candidate and fulfill the goal, while longer (and possibly correct) sequences are incomplete and may be lost. On the other hand, long sequences have more features, therefore their score may be arbitrarily inflated. So the score may be higher for longer paths, even though a shorter one may be correct and may fall off the beam.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To address these challenges we introduce a special transition we call ENDTOKEN (ET), that explicitly increments i, instead of implicitly in M D s . So, in equation 4we set j = i and apply:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "ET : (L, n, i, M ) \u2192 (L, n, i + 1, M )", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "ET is required to occur exactly once at the end of the word-lattice, when n is the top of some wordlattice in L. Set aside from other transitions, ET has its own set of features. Other than incrementing i, ET has no effect on configurations, but it does cause a re-ordering of candidates in the beam during decoding, at each token boundary. Note that ET kicks in only for variable length lattices. On same-length lattices, ET is skipped and equation 4remains as is -the process essentially falls back on the standard, same-length decoding.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "An MD transition sequence thus becomes the union of disjoint sets of configurations y = y md \u222a y et , and changes Score in Equation 2, where |y et | is the # of tokens in L with variable length paths. :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "d i=1 \u03c9 md i \u03c6 md i (y md ) + d j=1 \u03c9 et j \u03c6 et j (y et ) = c k \u2208y md d i=1 \u03c9 md i \u03c6 md i (c k ) + c l \u2208yet d j=1 \u03c9 et j \u03c6 et j (c l )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "While the number of morphemes, and therefore |y md |, can vary, |y et | is deterministic per lattice. Using this anchor, the features of the ET transition provide a counter-balance to the effects of variedlength sequences by scoring fully disambiguated paths of each word-lattice individually, occuring a fixed amount of times for all paths.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "ENDTOKEN vs. IDLE transitions Variable-length sequences in beam search also exist in the structured prediction of constituency trees. Zhu et al. (2013) introduced an IDLE transition (also adopted in Honnibal and Johnson (2014) and Zhang et al. (2014) ) that, like ET, has no effect on configuration, but unlike ET, occurs only at the end of the parsing sequence, an arbitrary number of times, until all parsing sequences are complete.", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 151, |
|
"text": "Zhu et al. (2013)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 199, |
|
"end": 226, |
|
"text": "Honnibal and Johnson (2014)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 231, |
|
"end": 250, |
|
"text": "Zhang et al. (2014)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "While IDLE transitions make sense when applied after a complete hierarchical structure is predicted -where they may learn to rerank candidates based on features that are visible at the top of the structure (the root) -it is futile to use last-seen features that arbitrarily exist at the end of a morphological disambiguation (linear) sequence, to rerank candidates again and again. This is because at the end of the sequence, we can no longer save candidates that were lost earlier on due to length discrepancies .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To mitigate this, one might try to create IDLE padding with global features spanning the entire disambiguated path. Even then, the learned model parameters would not generalize well, since these features will be applied an arbitrary number of times -the maximal length of an occasional word lattice we are at -which has no linguistic significance, and may arbitrarily inflate certain scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "ET transitions, in contrast, occur right when they are needed -at the boundary of a word-lattice. This position enables the reordering of candidates right after a length discrepancy may have been introduced. Moreover, ET scores are counted against the global score a fixed number of times per lattice, for all, any length, candidates. This enables a fair comparison of all paths per lattice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Morpheme-Based Modeling", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We empirically evaluate the proposed models, and investigate their strengths, weaknesses, and bounds. We start with a detailed investigation of MA&D in the Semitic language Modern Hebrew, which is known for its rich morphology and significant ambiguity. We then verify the cross-linguistic coverage of the models on the set of UD treebanks (Nivre et al., 2016) , to validate their efficacy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 340, |
|
"end": 360, |
|
"text": "(Nivre et al., 2016)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Empirical Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We implement yap (yet another parser), a general-purpose transition-based framework for structured prediction, based on beam search and the generalized perceptron. We extend it with the models described herein. We implement the WB, MB variants, ET transitions, and evaluate different feature settings. We report the F 1 metric comparing the MSRs of predicted vs. gold lattice arcs, for full morphological disambiguation (segmentation, POS tags, and all morphological properties), and for segmentation and POS tags only. For comparison with previous work, we also report token-level accuracy (while F 1 awards partial success on word-lattices, token-level accuracy requires exact match on a whole path per token).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Empirical Evaluation", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Setup We evaluate MA&D performance on the Modern Hebrew section of the SPMRL 2014 Shared Task (Seddah et al., 2014) , which has been derived from the Unified-SD treebank of Tsarfaty (2013) . We updated the treebank to provide consistent theories for the treebank annotation and lexicographic resources (Itai and Wintner, 2008) , a consistency that we found lacking in the SPMRL 2014 Hebrew section. We use the standard split, and train on the standard train set (5k sentences). Here we provide results and in-depth analysis on dev and confirm our findings on test.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 115, |
|
"text": "(Seddah et al., 2014)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 188, |
|
"text": "Tsarfaty (2013)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 302, |
|
"end": 326, |
|
"text": "(Itai and Wintner, 2008)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Case for Modern Hebrew", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "A pre-condition for the execution of our MD models is an M A(x) function that generates word-lattices for x ( \u00a72). We start off with a morphological analyzer that we implemented, called HEBLEX, which relies on the Ben-Gurion Hebrew Lexicon used by Adler and Elhadad (2006) . The lexicon contains full analyses for 567,483 words and 102 prefixes. HEBLEX uses the lexicon to determine the various combinations of prefixes and words that form valid tokens. This process is far from trivial due to morphological fusion, as some morphemes are implicit ( \u00a71).", |
|
"cite_spans": [ |
|
{ |
|
"start": 248, |
|
"end": 272, |
|
"text": "Adler and Elhadad (2006)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Case for Modern Hebrew", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Although the lexicon is quite large, there are still tokens which are out-of-vocabulary (OOV). OOV tokens may be of two types: it may be that an entire string is out of the lexicon (mostly proper nouns) or that the affixes and the open class items are seen, but their combination has not yet been encountered. We address OOV by assigning proper noun analyses to entire tokens, as well as to all arcs combined with seen affixes. This adds ambiguity to the lattices, but gives the MD the chance to select a correct path.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Case for Modern Hebrew", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In our experiments we aim to quantify exactly the effect of lexical coverage of the MA on MD accuracy. To this end, we add an option to infuse missing gold analyses into the MA lattices provided by HEBLEX and present two sets of results: once disambiguating lattices with infused gold analyses (ideal MA), and once without infusing gold analyses (realistic MA).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Case for Modern Hebrew", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Results Tables (1a), (1b) present our investigation of the WB and MB models on the dev test, respectively, with different feature templates. Our results show that the MB disambiguation consistently outperforms our WB variant, in various feature template settings. Moreover, the ET transition consistently improves performance, with best results for Hebrew at F 1 scores of 94.3 (94.9) for full MD (seg/POS only). The token-level accuracy for our best results are 93.07 (93.9) for full MD (seg/POS).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Case for Modern Hebrew", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "These results were obtained on infused lattices, that include the gold path as one of the alternatives. In order to gauge the effect of incomplete lexical coverage, we disable infusion of the gold analyses into the HEBLEX lattices. We then observe a drop to F 1 scores of 89.62 (92.06) and token accuracy of 87.72(90.85). To set our results in context, we applied our best model in \"English-like\" settings for tagging, with gold pre-segmented text. F 1 then increases to 96.82 (97.44). That is, in \"English-like\" settings, our tagging accuracy (97.44) is as high as state-of-the-art English tagging (Manning, 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 599, |
|
"end": 614, |
|
"text": "(Manning, 2011)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Case for Modern Hebrew", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "MarMoT (M\u00fcller et al., 2013) , a state of the art CRF tagger, obtains F 1 93.38 on full MD on this set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 28, |
|
"text": "(M\u00fcller et al., 2013)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Case for Modern Hebrew", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Next we aim to verify that ET transitions indeed act as intended. We classify a sequence length error as either an overshoot (predicted morphological disambiguation sequence is longer than gold) or undershoot (predicted shorter than gold). Without ET, in the infused setting, 36.8% of sentences have incorrect length and the overshot:undershoot ratio is 6.6:1. Adding ET transitions results in 31.8% length errors, correcting 20.62% of the overshoot errors, resulting in ratio of 4:1. In the un-infused setting, 41.4% of the sentences have incorrect length with a ratio of 6.39:1. Adding ET results in 36% length errors, correcting 20.67% of the overshoot errors, resulting in ratio of 3.7:1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Case for Modern Hebrew", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Hebrew previous results are non-trivial to compare to due to significant changes of the treebank along the way and unavailable code of previous work. The most relevant results to ours are by Adler (2007) , who reports state-of-the-art results for Modern Hebrew in realistic (non-infused) setting, with self-reported token accuracy of 90% (93%) on a different evaluation set. For his prediction on our dev set, F 1 evaluation yields 85.74 (87.95), much lower than ours. Segmentation F 1 for Adler is 96.35, while ours is 97.6. We confirm our findings on the test set, for which Adler F 1 yields 82.91 (85.56). Our best model now yields 86.23 (88.85) and 92.96 (93.73) in realistic and infused settings, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 191, |
|
"end": 203, |
|
"text": "Adler (2007)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Case for Modern Hebrew", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Setup We evaluate the cross-linguistic coverage of our MD models on the UD set. We parse 48 UD treebanks from the UD1.3 release (Nivre et al., 2016) , training on the train set and evaluating on test. 2 We implement a universal data-driven morphological analyzer, that can be trained out-of-the-box along with our MD models for any treebank in the CoNLL-U format. We generate a dictionary for each language from its train set by collecting all seen analyses of each token in the training data, where an analysis is composed of MSRs that contain a lemma, POS, and the full set of morphological features. The dictionary maps each token to a set of MSR sequences, which then compose their ambiguous MA lattices. For out-of-vocabulary (OOV) tokens, the MA pre-computes the cardinality of each coarse POS -the number of unique tokens per coarse POS -and consider the top 5 POS as \"open-class\". For these top 5 POS, the MA computes the 50 highest-frequency MSRs (POS + morph. properties) to be used as the OOV lattice of an OOV token. When applying MA to the training set, we add the OOV lattice to tokens whose known analysis contains an open-class POS. The model thus encounters during training a larger space of states than the observed one, and learns to accurately apply transitions in OOV circumstances at test time.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 148, |
|
"text": "(Nivre et al., 2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 201, |
|
"end": 202, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Case for Universal Dependencies", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Results Table 2 reports F 1 accuracy for full MD and seg/POS for UD languages that do not require segmentation. For most large train sets (> 200k tokens), we observe 2-3 points absolute drop from infused (ideal) to uninfused (realistic) setting. This suggests that when large train sets exist, our datadriven MA is a viable economic alternative to costly hand-crafted monolingual lexical resources. To scrutinize our realistic results we also report non-OOV-only F 1 for 5k limited uninfused setting. Here we see that for~80% of languages, our results are on a par with a state-of-the-art tagger, MarMot, retrained on these data, within 0.035 (or less) points gap. This demonstrates that our disambiguation capacity is onpar with MarMot, where performance gaps come mostly from our OOV strategy (which is intentionally restrained, to allow handling of MRL segmentation that is handled by MarMot). Table 3 shows results for UD MRLs that require segmentation, contrasting results on gold pre-segmented input and un-segmented raw data. For raw data we see a minor,~0.02, drop in F 1 , compared to gold-segmented settings. That is, our model still retains the competitive MA&D performance, in a single, universal, trainable modelwe attribute this to our joint segmentation and tagging strategy, which overcomes error propagation. Table 3 : MA&D in segmented languages: F 1 scores of the languages in UD which require morphological segmentation. The upper line indicate full MD, the lower line indicates segmentation and POS only. The left hand side shows results for GOLD segmentation, the right hand side for input lattices. MM columns are results for MarMoT. Results in parentheses are F 1 scores for non-OOV-only tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 15, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 897, |
|
"end": 904, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1326, |
|
"end": 1333, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The Case for Universal Dependencies", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We present an MD transition-based system that can effectively cope with extreme morphological ambiguities in MRLs. To the best of our knowledge, this is the first joint framework for MRL segmentation and tagging in a transition-based setup. Moreover, we present the best MA&D results for Modern Hebrew to date, and the first ever set of MA&D results for the most recent release of UD treebanks (UD1.3). 4 Our system provides a first tier for dependency parsing in real-world scenarios, dispensing with the need of external pre-processing. Furthermore, this transition-based model can be extended into a joint model for complete morphological and syntactic analysis, as has been previously advanced in phrase-based parsing (Tsarfaty, 2006; Cohen and Smith, 2007; Green and Manning, 2010) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 403, |
|
"end": 404, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 722, |
|
"end": 738, |
|
"text": "(Tsarfaty, 2006;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 739, |
|
"end": 761, |
|
"text": "Cohen and Smith, 2007;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 762, |
|
"end": 786, |
|
"text": "Green and Manning, 2010)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We do not present results for 6 languages: cs,kk,es ancora,en esl,pt br,ja ktc, as some or all form fields are empty.3 Shortly before submitting this article, UDPipe(Straka et al., 2016), a tool for tokenization, morphological analysis, tagging and parsing, had been released. As its name suggests, UDPipe is a pipeline implementation packaging together separate tools for different tasks. Our approaches and ultimate goals are rather different. We present joint morphological segmentation and tagging, as opposed to a pipeline. Moreover, our framework can be extended into a single transition-based system performing all tasks jointly, overcoming overheads and error propagation, as we intend to address next.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Upon publication we will make our source code, executables and trained models available at https://github.com/habeanf/yap .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "An unsupervised morpheme-based hmm for Hebrew morphological disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Meni", |
|
"middle": [], |
|
"last": "Adler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Elhadad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "The Association for Computer Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meni Adler and Michael Elhadad. 2006. An unsupervised morpheme-based hmm for Hebrew morphological disambiguation. In Nicoletta Calzolari, Claire Cardie, and Pierre Isabelle, editors, ACL. The Association for Computer Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Hebrew Morphological Disambiguation: An Unsupervised Stochastic Word-based Approach", |
|
"authors": [ |
|
{ |
|
"first": "Meni", |
|
"middle": [], |
|
"last": "Adler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meni Adler. 2007. Hebrew Morphological Disambiguation: An Unsupervised Stochastic Word-based Approach. Ph.D. thesis, Ben-Gurion University of the Negev, Beer-Sheva, Israel.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Part-of-speech tagging for Hebrew and other semitic languages", |
|
"authors": [ |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Bar-Haim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khalil", |
|
"middle": [], |
|
"last": "Simaan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoad", |
|
"middle": [], |
|
"last": "Winter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roy Bar-Haim, Khalil Simaan, and Yoad Winter. 2005. Part-of-speech tagging for Hebrew and other semitic languages. Master's thesis, Technion, Haifa, Israel.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Part-of-speech tagging of Modern Hebrew text", |
|
"authors": [ |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Bar-Haim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khalil", |
|
"middle": [], |
|
"last": "Sima'an", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoad", |
|
"middle": [], |
|
"last": "Winter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Natural Language Engineering", |
|
"volume": "14", |
|
"issue": "2", |
|
"pages": "223--251", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roy Bar-haim, Khalil Sima'an, and Yoad Winter. 2008. Part-of-speech tagging of Modern Hebrew text. Natural Language Engineering, 14(2):223-251.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "(re)ranking meets morphosyntax: State-of-the-art results from the SPMRL 2013 shared task", |
|
"authors": [ |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "Bjorkelund", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ozlem", |
|
"middle": [], |
|
"last": "Cetinoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Farkas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Mueller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Seeker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Parsing of Morphologically-Rich Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "135--145", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anders Bjorkelund, Ozlem Cetinoglu, Richard Farkas, Thomas Mueller, and Wolfgang Seeker. 2013. (re)ranking meets morphosyntax: State-of-the-art results from the SPMRL 2013 shared task. In Proceedings of the Fourth Workshop on Statistical Parsing of Morphologically-Rich Languages, pages 135-145, Seattle, Washington, USA, October. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Enriching Linguistics With Statistics", |
|
"authors": [ |
|
{ |
|
"first": "Rens", |
|
"middle": [], |
|
"last": "Bod", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rens Bod. 1995. Enriching Linguistics With Statistics. Ph.D. thesis, University of Amsterdam.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "CoNLL-X shared task on multilingual dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Buchholz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erwin", |
|
"middle": [], |
|
"last": "Marsi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of CoNLL-X", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--164", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sabine Buchholz and Erwin Marsi. 2006. CoNLL-X shared task on multilingual dependency parsing. In Proceed- ings of CoNLL-X, pages 149-164.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Tree-Bank Grammars", |
|
"authors": [ |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Charniak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "AAAI/IAAI", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "1031--1036", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eugene Charniak. 1996. Tree-Bank Grammars. In AAAI/IAAI, Vol. 2, pages 1031-1036.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Joint morphological and syntactic disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. B. Cohen and N. A. Smith. 2007. Joint morphological and syntactic disambiguation. In Proceedings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Incremental parsing with the perceptron algorithm", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Roark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 42Nd Annual Meeting on Association for Computational Linguistics, ACL '04", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Collins and Brian Roark. 2004. Incremental parsing with the perceptron algorithm. In Proceedings of the 42Nd Annual Meeting on Association for Computational Linguistics, ACL '04, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A single framework for joint morphological segmentation and syntactic parsing", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Goldberg and Reut Tsarfaty. 2008. A single framework for joint morphological segmentation and syntactic parsing. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Better Arabic parsing: Baselines, evaluations, and analysis", |
|
"authors": [ |
|
{ |
|
"first": "Spence", |
|
"middle": [], |
|
"last": "Green", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 23rd International Conference on Computational Linguistics, COLING '10", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "394--402", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Spence Green and Christopher D. Manning. 2010. Better Arabic parsing: Baselines, evaluations, and analysis. In Proceedings of the 23rd International Conference on Computational Linguistics, COLING '10, pages 394-402, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Arabic tokenization, part-of-speech tagging and morphological disambiguation in one fell swoop", |
|
"authors": [ |
|
{ |
|
"first": "Nizar", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Owen", |
|
"middle": [], |
|
"last": "Rambow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics, ACL '05", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "573--580", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nizar Habash and Owen Rambow. 2005. Arabic tokenization, part-of-speech tagging and morphological disam- biguation in one fell swoop. In Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics, ACL '05, pages 573-580, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Joint incremental disfluency detection and dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Honnibal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Transactions of the Association of Computational Linguistics", |
|
"volume": "2", |
|
"issue": "1", |
|
"pages": "131--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Honnibal and Mark Johnson. 2014. Joint incremental disfluency detection and dependency parsing. Transactions of the Association of Computational Linguistics -Volume 2, Issue 1, pages 131-142.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Language resources for Hebrew. Language Resources and Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Itai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuly", |
|
"middle": [], |
|
"last": "Wintner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "42", |
|
"issue": "", |
|
"pages": "75--98", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alon Itai and Shuly Wintner. 2008. Language resources for Hebrew. Language Resources and Evaluation, 42(1):75-98, March.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Proceedings, Part I, chapter Part-of-Speech Tagging from 97% to 100%: Is It Time for Some Linguistics?", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "171--189", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher D. Manning, 2011. Computational Linguistics and Intelligent Text Processing: 12th International Conference, CICLing 2011, Tokyo, Japan, February 20-26, 2011. Proceedings, Part I, chapter Part-of-Speech Tagging from 97% to 100%: Is It Time for Some Linguistics?, pages 171-189. Springer Berlin Heidelberg, Berlin, Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Discriminative Learning and Spanning Tree Algorithms for Dependency Parsing", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan McDonald. 2006. Discriminative Learning and Spanning Tree Algorithms for Dependency Parsing. Ph.D. thesis, University of Pennsylvanya.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Efficient higher-order crfs for morphological tagging", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "M\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Helmut", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas M\u00fcller, Helmut Schmid, and Hinrich Sch\u00fctze. 2013. Efficient higher-order crfs for morphological tag- ging. In Proceedings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Maltparser: A data-driven parser-generator for dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Nilsson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2216--2219", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Nivre, Johan Hall, and Jens Nilsson. 2006. Maltparser: A data-driven parser-generator for dependency parsing. In Proceedings of LREC, pages 2216-2219.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "The CoNLL 2007 shared task on dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Nilsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deniz", |
|
"middle": [], |
|
"last": "Yuret", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "915--932", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Nivre, Johan Hall, Sandra K\u00fcbler, Ryan McDonald, Jens Nilsson, Sebastian Riedel, and Deniz Yuret. 2007. The CoNLL 2007 shared task on dependency parsing. In Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL 2007, pages 915-932.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Madamira: A fast, comprehensive tool for morphological analysis and disambiguation of Arabic", |
|
"authors": [ |
|
{ |
|
"first": "Arfath", |
|
"middle": [], |
|
"last": "Pasha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Al-Badrashiny", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [ |
|
"El" |
|
], |
|
"last": "Kholy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramy", |
|
"middle": [], |
|
"last": "Eskander", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nizar", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manoj", |
|
"middle": [], |
|
"last": "Pooleery", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Owen", |
|
"middle": [], |
|
"last": "Rambow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC-2014). European Language Resources Association (ELRA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arfath Pasha, Mohamed Al-Badrashiny, Mona Diab, Ahmed El Kholy, Ramy Eskander, Nizar Habash, Manoj Pooleery, Owen Rambow, and Ryan Roth. 2014. Madamira: A fast, comprehensive tool for morphological analysis and disambiguation of Arabic. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC-2014). European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Introducing the spmrl 2014 shared task on parsing morphologically-rich languages", |
|
"authors": [ |
|
{ |
|
"first": "Djam\u00e9", |
|
"middle": [], |
|
"last": "Seddah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "103--109", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Djam\u00e9 Seddah, Sandra K\u00fcbler, and Reut Tsarfaty. 2014. Introducing the spmrl 2014 shared task on parsing morphologically-rich languages. pages 103-109.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Morphological disambiguation of Hebrew: A case study in classifier combination", |
|
"authors": [ |
|
{ |
|
"first": "Danny", |
|
"middle": [], |
|
"last": "Shacham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuly", |
|
"middle": [], |
|
"last": "Wintner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danny Shacham and Shuly Wintner. 2007. Morphological disambiguation of Hebrew: A case study in classifier combination. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Building a tree-bank of Modern Hebrew text", |
|
"authors": [ |
|
{ |
|
"first": "Khalil", |
|
"middle": [], |
|
"last": "Sima'an", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Itai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoad", |
|
"middle": [], |
|
"last": "Winter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Altman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Nativ", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Traitment Automatique des Langues", |
|
"volume": "", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Khalil Sima'an, Alon Itai, Yoad Winter, Alon Altman, and N. Nativ. 2001. Building a tree-bank of Modern Hebrew text. Traitment Automatique des Langues, 42(2).", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Morfessor 2.0: Toolkit for statistical morphological segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Smit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sami", |
|
"middle": [], |
|
"last": "Virpioja", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stig-Arne", |
|
"middle": [], |
|
"last": "Gr\u00f6nroos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mikko", |
|
"middle": [], |
|
"last": "Kurimo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Demonstrations at the 14th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "21--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter Smit, Sami Virpioja, Stig-Arne Gr\u00f6nroos, and Mikko Kurimo. 2014. Morfessor 2.0: Toolkit for statistical morphological segmentation. In Proceedings of the Demonstrations at the 14th Conference of the European Chapter of the Association for Computational Linguistics, pages 21-24. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Udpipe: Trainable pipeline for processing conll-u files performing tokenization, morphological analysis, pos tagging and parsing", |
|
"authors": [ |
|
{ |
|
"first": "Milan", |
|
"middle": [], |
|
"last": "Straka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Hajic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jana", |
|
"middle": [], |
|
"last": "Strakov\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ";", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khalid", |
|
"middle": [], |
|
"last": "Choukri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thierry", |
|
"middle": [], |
|
"last": "Declerck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Goggi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marko", |
|
"middle": [], |
|
"last": "Grobelnik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bente", |
|
"middle": [], |
|
"last": "Maegaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Mariani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Milan Straka, Jan Hajic, and Jana Strakov\u00e1. 2016. Udpipe: Trainable pipeline for processing conll-u files perform- ing tokenization, morphological analysis, pos tagging and parsing. In Nicoletta Calzolari (Conference Chair), Khalid Choukri, Thierry Declerck, Sara Goggi, Marko Grobelnik, Bente Maegaard, Joseph Mariani, Helene Mazo, Asuncion Moreno, Jan Odijk, and Stelios Piperidis, editors, Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), Paris, France, may. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Word-based or morpheme-based? annotation strategies for modern Hebrew clitics", |
|
"authors": [ |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reut Tsarfaty and Yoav Goldberg. 2008. Word-based or morpheme-based? annotation strategies for modern Hebrew clitics. In Proceedings of LREC.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Statistical parsing for morphologically rich language (spmrl): What, how and whither", |
|
"authors": [ |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Djame", |
|
"middle": [], |
|
"last": "Seddah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "Kuebler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie", |
|
"middle": [], |
|
"last": "Candito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannick", |
|
"middle": [], |
|
"last": "Versley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ines", |
|
"middle": [], |
|
"last": "Rehbein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lamia", |
|
"middle": [], |
|
"last": "Tounsi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the first workshop on Statistical Parsing of Morphologically Rich Languages (SPMRL) at NA-ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reut Tsarfaty, Djame Seddah, Yoav Goldberg, Sandra Kuebler, Marie Candito, Jennifer Foster, Yannick Versley, Ines Rehbein, and Lamia Tounsi. 2010. Statistical parsing for morphologically rich language (spmrl): What, how and whither. In Proceedings of the first workshop on Statistical Parsing of Morphologically Rich Languages (SPMRL) at NA-ACL.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Integrated morphological and syntactic disambiguation for modern Hebrew", |
|
"authors": [ |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 21st International Conference on Computational Linguistics and 44th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop, COLING ACL '06", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "49--54", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reut Tsarfaty. 2006. Integrated morphological and syntactic disambiguation for modern Hebrew. In Proceedings of the 21st International Conference on Computational Linguistics and 44th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop, COLING ACL '06, pages 49-54, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "A unified morphosyntactic scheme for stanford dependencies", |
|
"authors": [ |
|
{ |
|
"first": "Reut", |
|
"middle": [], |
|
"last": "Tsarfaty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reut Tsarfaty. 2013. A unified morphosyntactic scheme for stanford dependencies. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Syntactic processing using the generalized perceptron and beam search", |
|
"authors": [ |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Comput. Linguist", |
|
"volume": "37", |
|
"issue": "1", |
|
"pages": "105--151", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yue Zhang and Stephen Clark. 2011. Syntactic processing using the generalized perceptron and beam search. Comput. Linguist., 37(1):105-151, March.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Character-level chinese dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Meishan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanxiang", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1326--1336", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meishan Zhang, Yue Zhang, Wanxiang Che, and Ting Liu. 2014. Character-level chinese dependency parsing. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1326-1336, Baltimore, Maryland, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Fast and accurate shift-reduce constituent parsing", |
|
"authors": [ |
|
{ |
|
"first": "Muhua", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenliang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingbo", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "434--443", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Muhua Zhu, Yue Zhang, Wenliang Chen, Min Zhang, and Jingbo Zhu. 2013. Fast and accurate shift-reduce con- stituent parsing. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, ACL 2013, 4-9 August 2013, Sofia, Bulgaria, Volume 1: Long Papers, pages 434-443. The Association for Computer Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "where tokens(L) is the number of word-lattices that form L. The initial configuration function c s concatenates the L i lattices of the tokens into a single structure L = M A(x 1 ) + ... + M A(x k ), and sets n = bottom(L), i = 0 and M = \u2205.", |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"text": "(86.72) 86.9 (87.88) 86.7 (87.66) 92.19 (92.76) 91.98 (92.59) +ET 89.09 (89.81) 89.93 (90.71) 90 (90.85)", |
|
"num": null, |
|
"content": "<table><tr><td/><td>Word-Based</td></tr><tr><td>(a)</td><td colspan=\"2\">unigram 85.73 93.39 (93.94) +bigram +trigram +next unigram +next with bigram 92.84 (93.46)</td></tr><tr><td/><td>Morpheme-Based</td></tr><tr><td>(b)</td><td colspan=\"2\">unigram 90.67 (91.41) 91.12 (91.88) 90.09 (91.82) 93.56 (94.16) +bigram +trigram +next unigram +next with bigram 93.89 (94.49)</td></tr><tr><td/><td>+ET 92.68 (93.36) 92.74 (93.55) 92.64 (93.47) 94.27 (94.92)</td><td>94.33 (94.9)</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "Dev. set results for Word-Based (a) and Morpheme-based (b) MD: F 1 for full morphological disambiguation (form, part of speech, morphological properties). (n parenthesis: F 1 for form and POS only. The +ET lines indicate a variant that employs the ENDTOKEN transition at token boundaries.", |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "MA&D for unsegmented languages : F 1 scores of the languages in UD that do not require morphological segmentation, the upper line indicates full MD, the lower line indicates segmentation and POS only. MM columns are results for MarMoT. Results in parentheses are F 1 scores for non-OOV-only tokens. .956 0.956 0.948 0.948 (0.956) 0.956 (0.972) 0.959 0.959 0.951 0.951 0.959 0.929 0.933 0.882 0.882 ca 13123 0.953 0.953 0.919 0.919 (0.958) 0.957 (0.965) 0.963 0.963 0.939 0.939 0.968 0.961 0.961 0.938 0.937 429157 0.969 0.969 0.936 0.936 (0.972) 0.973 (0.977) 0.977 0.977 0.954 0.954 0.", |
|
"num": null, |
|
"content": "<table><tr><td>Lang.</td><td>sents words</td><td colspan=\"3\">Gold Segmented 5k Train. Set (non-OOV accuracy) inf. +ET no inf. +ET MM</td><td>inf.</td><td colspan=\"3\">Gold Segmented Full Trainset +ET no inf. +ET</td><td>MM</td><td>Un-Segmented Full Trainset inf. +ET no inf. +ET</td></tr><tr><td>ar</td><td>6174</td><td colspan=\"2\">0.887 0.887 0.853</td><td colspan=\"3\">0.853 (0.87) 0.903 (0.924) 0.892 0.892</td><td>0.86</td><td>0.86 0.907 0.867 0.871</td><td>0.8</td><td>0.799</td></tr><tr><td/><td colspan=\"8\">225853 098</td><td>0.975 0.975 0.952 0.951</td></tr><tr><td>cs</td><td>23478</td><td colspan=\"3\">0.865 0.857 0.758 0.758 (0.862) 0.86 (0.921)</td><td colspan=\"4\">0.901 0.901 0.831 0.831 0.904 0.897 0.899 0.827 0.827</td></tr><tr><td>cac</td><td colspan=\"2\">472608 0.973 0.969</td><td>0.93</td><td colspan=\"5\">0.928 (0.984) 0.975 (0.988) 0.983 0.983 0.961 0.961 0.987 0.983 0.983 0.961 0.961</td></tr><tr><td>cs</td><td>860</td><td colspan=\"7\">0.845 0.844 0.812 0.801 (0.871) 0.887 (0.922) 0.848 0.847 0.816 0.812 0.887 0.832 0.822 0.804</td><td>0.8</td></tr><tr><td>cltt</td><td>26234</td><td colspan=\"3\">0.956 0.959 0.942 0.938 (0.988) 0.98 (0.991)</td><td colspan=\"2\">0.958 0.957</td><td>0.94</td><td>0.942 0.98</td><td>0.953 0.951 0.937 0.938</td></tr><tr><td>de</td><td>14118</td><td colspan=\"7\">0.928 0.928 0.921 0.921 (0.936) 0.927 (0.941) 0.929 0.929 0.921 0.921 0.927</td><td>0.93 0.928 0.921</td><td>0.92</td></tr><tr><td/><td colspan=\"8\">269626 0.928 0.928 0.921 0.921 (0.936) 0.927 (0.941) 0.929 0.929 0.921 0.921 0.927</td><td>0.93 0.928 0.921</td><td>0.92</td></tr><tr><td>es</td><td>14187</td><td colspan=\"7\">0.929 0.928 0.888 0.888 (0.943) 0.927 (0.956) 0.939 0.939 0.908 0.908 0.936</td><td>0.93 0.933</td><td>0.9</td><td>0.903</td></tr><tr><td/><td colspan=\"8\">382436 0.947 0.947 0.931 0.931 (0.959) 0.945 (0.967) 0.955 0.955 0.943 0.943 0.953 0.948 0.951 0.935 0.938</td></tr><tr><td>fa</td><td>4798</td><td colspan=\"7\">0.953 0.961 0.958 0.958 (0.972) 0.963 (0.978) 0.954 0.953 0.956 0.957 0.963 0.957 0.956 0.947 0.949</td></tr><tr><td/><td>121020</td><td colspan=\"3\">0.96 0.968 0.964 0.964 (0.974) 0.969 (0.98)</td><td colspan=\"4\">0.96 0.959 0.962 0.963 0.969 0.962 0.962 0.954 0.955</td></tr><tr><td>fi</td><td>14981</td><td>0.876 0.88</td><td colspan=\"6\">0.794 0.793 (0.921) 0.858 (0.944) 0.856 0.847 0.811 0.809 0.915 0.916 0.929 0.814 0.856</td></tr><tr><td>ftb</td><td colspan=\"8\">127602 0.914 0.917 0.856 0.855 (0.953) 0.904 (0.957) 0.883 0.869 0.843 0.844 0.943 0.939 0.95</td><td>0.849 0.899</td></tr><tr><td>fr</td><td>14554</td><td>0.931 0.93</td><td colspan=\"6\">0.921 0.918 (0.935) 0.939 (0.954) 0.943 0.951 0.925 0.925 0.949 0.944 0.942</td><td>0.92</td><td>0.921</td></tr><tr><td/><td colspan=\"8\">356216 0.949 0.947 0.941 0.939 (0.952) 0.955 (0.967) 0.959 0.965 0.942 0.942 0.964 0.958 0.957 0.937 0.938</td></tr><tr><td>he</td><td>5241</td><td colspan=\"5\">0.934 0.933 0.888 0.888 (0.907) 0.921 (0.953) 0.934 0.93</td><td colspan=\"2\">0.891 0.886 0.922 0.914 0.917 0.724 0.724</td></tr><tr><td/><td colspan=\"8\">135496 0.968 0.968 0.937 0.938 (0.947) 0.955 (0.974) 0.967 0.966 0.939 0.937 0.957 0.945 0.947 0.768 0.769</td></tr><tr><td>it</td><td>11699</td><td>0.945 0.946</td><td>0.91</td><td>0.911 (0.953) 0.943 (0.962)</td><td colspan=\"4\">0.96 0.958 0.945 0.945 0.97</td><td>0.953 0.957 0.935 0.937</td></tr><tr><td/><td colspan=\"2\">249330 0.959 0.96</td><td colspan=\"2\">0.924 0.925 (0.961) 0.958 (0.972)</td><td colspan=\"4\">0.97 0.967 0.956 0.955 0.978 0.961 0.965 0.946 0.947</td></tr><tr><td>ta</td><td>400</td><td colspan=\"3\">0.761 0.793 0.761 0.732 (0.912) 0.82 (0.929)</td><td colspan=\"4\">0.794 0.797 0.757 0.756 0.82</td><td>0.72 0.722 0.659 0.664</td></tr><tr><td/><td>6329</td><td colspan=\"7\">0.835 0.859 0.825 0.813 (0.927) 0.877 (0.938) 0.856 0.861 0.827 0.821 0.877 0.765 0.772 0.714 0.717</td></tr><tr><td>tr</td><td>3947</td><td colspan=\"7\">0.781 0.873 0.765 0.806 (0.935) 0.855 (0.933) 0.794 0.787 0.768 0.805 0.855</td><td>0.84 0.781 0.742</td><td>0.78</td></tr><tr><td/><td>40609</td><td>0.884 0.938</td><td>0.87</td><td colspan=\"5\">0.897 (0.968) 0.934 (0.964) 0.893 0.885 0.879 0.899 0.934 0.907 0.87</td><td>0.846 0.871</td></tr></table>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |