|
{ |
|
"paper_id": "N10-1015", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:49:40.918075Z" |
|
}, |
|
"title": "Joint Parsing and Alignment with Weakly Synchronized Grammars", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Burkett", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California", |
|
"location": { |
|
"settlement": "Berkeley" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Blitzer", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California", |
|
"location": { |
|
"settlement": "Berkeley" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of California", |
|
"location": { |
|
"settlement": "Berkeley" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Syntactic machine translation systems extract rules from bilingual, word-aligned, syntactically parsed text, but current systems for parsing and word alignment are at best cascaded and at worst totally independent of one another. This work presents a unified joint model for simultaneous parsing and word alignment. To flexibly model syntactic divergence, we develop a discriminative log-linear model over two parse trees and an ITG derivation which is encouraged but not forced to synchronize with the parses. Our model gives absolute improvements of 3.3 F 1 for English parsing, 2.1 F 1 for Chinese parsing, and 5.5 F 1 for word alignment over each task's independent baseline, giving the best reported results for both Chinese-English word alignment and joint parsing on the parallel portion of the Chinese treebank. We also show an improvement of 1.2 BLEU in downstream MT evaluation over basic HMM alignments.", |
|
"pdf_parse": { |
|
"paper_id": "N10-1015", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Syntactic machine translation systems extract rules from bilingual, word-aligned, syntactically parsed text, but current systems for parsing and word alignment are at best cascaded and at worst totally independent of one another. This work presents a unified joint model for simultaneous parsing and word alignment. To flexibly model syntactic divergence, we develop a discriminative log-linear model over two parse trees and an ITG derivation which is encouraged but not forced to synchronize with the parses. Our model gives absolute improvements of 3.3 F 1 for English parsing, 2.1 F 1 for Chinese parsing, and 5.5 F 1 for word alignment over each task's independent baseline, giving the best reported results for both Chinese-English word alignment and joint parsing on the parallel portion of the Chinese treebank. We also show an improvement of 1.2 BLEU in downstream MT evaluation over basic HMM alignments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Current syntactic machine translation (MT) systems build synchronous context free grammars from aligned syntactic fragments (Galley et al., 2004; Zollmann et al., 2006) . Extracting such grammars requires that bilingual word alignments and monolingual syntactic parses be compatible. Because of this, much recent work in both word alignment and parsing has focused on changing aligners to make use of syntactic information (DeNero and Klein, 2007; May and Knight, 2007; Fossum et al., 2008) or changing parsers to make use of word alignments (Smith and Smith, 2004; Burkett and Klein, 2008; Snyder et al., 2009) . In the first case, however, parsers do not exploit bilingual information. In the second, word alignment is performed with a model that does not exploit syntactic information. This work presents a single, joint model for parsing and word alignment that allows both pieces to influence one another simultaneously.", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 145, |
|
"text": "(Galley et al., 2004;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 146, |
|
"end": 168, |
|
"text": "Zollmann et al., 2006)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 447, |
|
"text": "(DeNero and Klein, 2007;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 448, |
|
"end": 469, |
|
"text": "May and Knight, 2007;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 490, |
|
"text": "Fossum et al., 2008)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 542, |
|
"end": 565, |
|
"text": "(Smith and Smith, 2004;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 566, |
|
"end": 590, |
|
"text": "Burkett and Klein, 2008;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 591, |
|
"end": 611, |
|
"text": "Snyder et al., 2009)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "While building a joint model seems intuitive, there is no easy way to characterize how word alignments and syntactic parses should relate to each other in general. In the ideal situation, each pair of sentences in a bilingual corpus could be syntactically parsed using a synchronous context-free grammar. Of course, real translations are almost always at least partially syntactically divergent. Therefore, it is unreasonable to expect perfect matches of any kind between the two sides' syntactic trees, much less expect that those matches be well explained at a word level. Indeed, it is sometimes the case that large pieces of a sentence pair are completely asynchronous and can only be explained monolingually.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our model exploits synchronization where possible to perform more accurately on both word alignment and parsing, but also allows independent models to dictate pieces of parse trees and word alignments when synchronization is impossible. This notion of \"weak synchronization\" is parameterized and estimated from data to maximize the likelihood of the correct parses and word alignments. Weak synchronization is closely related to the quasi-synchronous models of Smith and Eisner (2006; and the bilingual parse reranking model of Burkett and Klein (2008) , but those models assume that the word alignment of a sentence pair is known and fixed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 461, |
|
"end": 484, |
|
"text": "Smith and Eisner (2006;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 528, |
|
"end": 552, |
|
"text": "Burkett and Klein (2008)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To simultaneously model both parses and align-ments, our model loosely couples three separate combinatorial structures: monolingual trees in the source and target languages, and a synchronous ITG alignment that links the two languages (but is not constrained to match linguistic syntax). The model has no hard constraints on how these three structures must align, but instead contains a set of \"synchronization\" features that are used to propagate influence between the three component grammars. The presence of synchronization features couples the parses and alignments, but makes exact inference in the model intractable; we show how to use a variational mean field approximation, both for computing approximate feature expectations during training, and for performing approximate joint inference at test time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We train our joint model on the parallel, gold word-aligned portion of the Chinese treebank. When evaluated on parsing and word alignment, this model significantly improves over independentlytrained baselines: the monolingual parser of Petrov and Klein (2007) and the discriminative word aligner of Haghighi et al. (2009) . It also improves over the discriminative, bilingual parsing model of Burkett and Klein (2008) , yielding the highest joint parsing F 1 numbers on this data set. Finally, our model improves word alignment in the context of translation, leading to a 1.2 BLEU increase over using HMM word alignments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 236, |
|
"end": 259, |
|
"text": "Petrov and Klein (2007)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 321, |
|
"text": "Haghighi et al. (2009)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 417, |
|
"text": "Burkett and Klein (2008)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Given a source-language sentence, s, and a targetlanguage sentence, s , we wish to predict a source tree t, a target tree t , and some kind of alignment a between them. These structures are illustrated in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 213, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Joint Parsing and Alignment", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To facilitate these predictions, we define a conditional distribution P(t, a, t |s, s ). We begin with a generic conditional exponential form:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Parsing and Alignment", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "P(t, a, t |s, s ) \u221d exp \u03b8 \u03c6(t, a, t , s, s ) (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Parsing and Alignment", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Unfortunately, a generic model of this form is intractable, because we cannot efficiently sum over all triples (t, a, t ) without some assumptions about how the features \u03c6(t, a, t , s, s ) decompose.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Parsing and Alignment", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "One natural solution is to restrict our candidate triples to those given by a synchronous context free grammar (SCFG) (Shieber and Schabes, 1990) . Figure 1(a) gives a simple example of generation from a log-linearly parameterized synchronous grammar, together with its features. With the SCFG restriction, we can sum over the necessary structures using the O(n 6 ) bitext inside-outside algorithm, making P(t, a, t |s, s ) relatively efficient to compute expectations under.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 145, |
|
"text": "(Shieber and Schabes, 1990)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 148, |
|
"end": 159, |
|
"text": "Figure 1(a)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Parsing and Alignment", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Unfortunately, an SCFG requires that all the constituents of each tree, from the root down to the words, are generated perfectly in tandem. The resulting inability to model any level of syntactic divergence prevents accurate modeling of the individual monolingual trees. We will consider the running example from Figure 2 throughout the paper. Here, for instance, the verb phrase established in such places as Quanzhou, Zhangzhou, etc. in English does not correspond to any single node in the Chinese tree. A synchronous grammar has no choice but to analyze this sentence incorrectly, either by ignoring this verb phrase in English or postulating an incorrect Chinese constituent that corresponds to it.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 313, |
|
"end": 321, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Joint Parsing and Alignment", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Therefore, instead of requiring strict synchronization, our model treats the two monolingual trees and the alignment as separate objects that can vary arbitrarily. However, the model rewards synchronization appropriately when the alignment brings the trees into correspondence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Parsing and Alignment", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We propose a joint model which still gives probabilities on triples (t, a, t ). However, instead of using SCFG rules to synchronously enforce the tree constraints on t and t , we only require that each of t and t be well-formed under separate monolingual CFGs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In order to permit efficient enumeration of all possible alignments a, we also restrict a to the set of unlabeled ITG bitrees (Wu, 1997) , though again we do not require that a relate to t or t in any particular way. Although this assumption does limit the space of possible word-level alignments, for the domain we consider (Chinese-English word alignment), the reduced space still contains almost all empirically observed alignments (Haghighi et al., 2009 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 136, |
|
"text": "(Wu, 1997)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 457, |
|
"text": "(Haghighi et al., 2009", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Features !( (IP, b0, S), s, s' ) !( (NP, b1, NP), s, s' ) !( (VP, b2, VP), s, s' ) NP VP S NP IP b0 b1 b2 VP AP Features Features (IP, s) (b0, s, s') (NP, s) (b1, s, s') (VP, s) (b2, s, s') (S, s') (IP, b0) (NP, s') (b0, S) (AP, s') (b1, NP) (VP, s') (IP, b0, S) Parsing Alignment Synchronization \u03c6 E \u03c6 E \u03c6 E \u03c6 E \u03c6 F \u03c6 F \u03c6 F \u03c6 A \u03c6 A \u03c6 A \u03c6 \u03c6 \u03c6 \u03c6 (a) Synchronous Rule (b) Asynchronous Rule", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Figure 1: Source trees, t (right), alignments, a (grid), and target trees, t (top), and feature decompositions for synchronous (a) and weakly synchronous (b) grammars. Features always condition on bispans and/or anchored syntactic productions, but weakly synchronous grammars permit more general decompositions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "example, in Figure 2 , the word alignment is ITGderivable, and each of the colored rectangles is a bispan in that derivation. There are no additional constraints beyond the independent, internal structural constraints on t, a, and t . This decoupling permits derivations like that in Figure 1(b) , where the top-level syntactic nodes align, but their children are allowed to diverge. With the three structures separated, our first model is a completely factored decomposition of (1).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 20, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 295, |
|
"text": "Figure 1(b)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Formally, we represent a source tree t as a set of nodes {n}, each node representing a labeled span. Likewise, a target tree t is a set of nodes {n }. 2 We represent alignments a as sets of bispans {b}, indicated by rectangles in Figure 1 . 3 Using this notation, the initial model has the following form:", |
|
"cite_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 152, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 230, |
|
"end": 238, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "P(t, a, t |s, s ) \u221d exp \uf8ee \uf8f0 n\u2208t \u03b8 \u03c6 F (n, s)+ b\u2208a \u03b8 \u03c6 A (b, s, s )+ n \u2208t \u03b8 \u03c6 E (n , s ) \uf8f9 \uf8fb (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Here \u03c6 F (n, s) indicates a vector of source node features, \u03c6 E (n , s ) is a vector of target node features, and \u03c6 A (b, s, s ) is a vector of alignment bispan features. Of course, this model is completely asyn-chronous so far, and fails to couple the trees and alignments at all. To permit soft constraints between the three structures we are modeling, we add a set of synchronization features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "For n \u2208 t and b \u2208 a, we say that n \u00a3 b if n and b both map onto the same span of s. We define b \u00a1 n analogously for n \u2208 t . We now consider three different types of synchronization features. Sourcealignment synchronization features \u03c6 \u00a3 (n, b) are extracted whenever n \u00a3 b. Similarly, target-alignment features \u03c6 \u00a1 (b, n ) are extracted if b \u00a1 n . These features capture phenomena like that of bispan b 7 in Figure 2 . Here the Chinese noun \u5730 synchronizes with the ITG derivation, but the English projection of b 7 is a distituent. Finally, we extract source-target features \u03c6 (n, b, n ) whenever n\u00a3b\u00a1n . These features capture complete bispan synchrony (as in bispan b 8 ) and can be expressed over triples (n, b, n ) which happen to align, allowing us to reward synchrony, but not requiring it. All of these licensing conditions are illustrated in Figure 1 ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 407, |
|
"end": 415, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 849, |
|
"end": 857, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(b).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "With these features added, the final form of the model is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "P(t, a, t |s, s ) \u221d exp \uf8ee \uf8f0 n\u2208t \u03b8 \u03c6 F (n, s)+ b\u2208a \u03b8 \u03c6 A (b, s, s )+ n \u2208t \u03b8 \u03c6 E (n , s )+ n\u00a3b \u03b8 \u03c6 \u00a3 (n, b)+ b\u00a1n \u03b8 \u03c6 \u00a1 (b, n )+ n\u00a3b\u00a1n \u03b8 \u03c6 (n, b, n ) \uf8f9 \uf8fb (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We emphasize that because of the synchronization features, this final form does not admit any known efficient dynamic programming for the exact computation of expectations. We will therefore turn to a variational inference method in Section 6.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Weakly Synchronized Grammars", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "With the model's locality structure defined, we just need to specify the actual feature function, \u03c6. We divide the features into three types: parsing features (\u03c6 F (n, s) and \u03c6 E (n , s )), alignment features (\u03c6 A (b, s, s )) and synchronization features (\u03c6 \u00a3 (n, b), \u03c6 \u00a1 (b, n ), and \u03c6 (n, b, n )). We detail each of these in turn here.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The monolingual parsing features we use are simply parsing model scores under the parser of Petrov and Klein (2007) . While that parser uses heavily refined PCFGs with rule probabilities defined at the refined symbol level, we interact with its posterior distribution via posterior marginal probabilities over unrefined symbols. In particular, to each unrefined anchored production i A j \u2192 i B k C j , we associate a single feature whose value is the marginal quantity ", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 115, |
|
"text": "Petrov and Klein (2007)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parsing", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "log P( i B k C j | i A j ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parsing", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We begin with the same set of alignment features as Haghighi et al. (2009) , which are defined only for terminal bispans. In addition, we include features on nonterminal bispans, including a bias feature, features that measure the difference in size between the source and target spans, features that measure the difference in relative sentence position between the source and target spans, and features that measure the density of word-to-word alignment posteriors under a separate unsupervised word alignment model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 74, |
|
"text": "Haghighi et al. (2009)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Alignment", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Our synchronization features are indicators for the syntactic types of the participating nodes. We determine types at both a coarse (more collapsed than Treebank symbols) and fine (Treebank symbol) level. At the coarse level, we distinguish between phrasal nodes (e.g. S, NP), synthetic nodes introduced in the process of binarizing the grammar (e.g. S , NP ), and part-of-speech nodes (e.g. NN, VBZ) . At the fine level, we distinguish all nodes by their exact label. We use coarse and fine types for both partially synchronized (source-alignment or target-alignment) features and completely synchronized (source-alignment-target) features. The inset of Figure 2 shows some sample features. Of course, we could devise even more sophisticated features by using the input text itself. As we shall see, however, our model gives significant improvements with these simple features alone.", |
|
"cite_spans": [ |
|
{ |
|
"start": 392, |
|
"end": 400, |
|
"text": "NN, VBZ)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 655, |
|
"end": 663, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Synchronization", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We learn the parameters of our model on the parallel portion of the Chinese treebank. Although our model assigns probabilities to entire synchronous derivations of sentences, the parallel Chinese treebank gives alignments only at the word level (1 by 1 bispans in Figure 2 ). This means that our alignment variable a is not fully observed. Because of this, given a particular word alignment w, we maximize the marginal probability of the set of derivations A(w) that are consistent with w (Haghighi et al., 2009) . 5", |
|
"cite_spans": [ |
|
{ |
|
"start": 489, |
|
"end": 512, |
|
"text": "(Haghighi et al., 2009)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 264, |
|
"end": 272, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learning", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "L(\u03b8) = log a\u2208A(w i ) P(t i , a, t i |s i , s i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We maximize this objective using standard gradient methods (Nocedal and Wright, 1999) . As with fully visible log-linear models, the gradient for the i th sentence pair with respect to \u03b8 is a difference of feature expectations: 3) on these bispans. On this example, the monolingual English parser erroneously attached the lower PP to the VP headed by established, and the non-syntactic ITG word aligner misaligned \u7b49 to such instead of to etc. Our joint model corrected both of these mistakes because it was rewarded for the synchronization of the two NPs joined by b 8 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 85, |
|
"text": "(Nocedal and Wright, 1999)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2207L(\u03b8) = E P(a|t i ,w i ,t i ,s i ,s i ) \u03c6(t i , a, t i , s i , s i ) \u2212 E P(t,a,t |s i ,s i ) \u03c6(t, a, t , s i , s i )", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Learning", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We cannot efficiently compute the model expectations in this equation exactly. Therefore we turn next to an approximate inference method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Instead of computing the model expectations from (4), we compute the expectations for each sentence pair with respect to a simpler, fully factored distribution Q(t, a, t ) = q(t)q(a)q(t ). Rewriting Q in log-linear form, we have:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Q(t, a, t ) \u221d exp \uf8ee \uf8f0 n\u2208t \u03c8 n + b\u2208a \u03c8 b + n \u2208t \u03c8 n \uf8f9 \uf8fb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Here, the \u03c8 n , \u03c8 b and \u03c8 n are variational parameters which we set to best approximate our weakly synchronized model from (3):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "\u03c8 * = argmin \u03c8 KL Q \u03c8 ||P \u03b8 (t, a, t |s, s )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Once we have found Q, we compute an approximate gradient by replacing the model expectations with expectations under Q:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "E Q(a|w i ) \u03c6(t i , a, t i , s i , s i ) \u2212 E Q(t,a,t |s i ,s i ) \u03c6(t, a, t , s i , s i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Now, we will briefly describe how we compute Q. First, note that the parameters \u03c8 of Q factor along individual source nodes, target nodes, and bispans. The combination of the KL objective and our particular factored form of Q make our inference procedure a structured mean field algorithm (Saul and Jordan, 1996) . Structured mean field techniques are well-studied in graphical models, and our adaptation in this section to multiple grammars follows standard techniques (see e.g. Wainwright and Jordan, 2008). Rather than derive the mean field updates for \u03c8, we describe the algorithm (shown in Figure 3 ) procedurally. Similar to block Gibbs sampling, we iteratively optimize each component (source parse, target parse, and alignment) of the model in turn, conditioned on the others. Where block Gibbs sampling conditions on fixed trees or ITG derivations, our mean field algorithm maintains uncertainty in Input:", |
|
"cite_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 312, |
|
"text": "(Saul and Jordan, 1996)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 595, |
|
"end": 603, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "sentence pair (s, s ) parameter vector \u03b8 Output: variational parameters \u03c8 1. Initialize \u03c8 0 n \u2190 \u03b8 \u03c6F (n, s) \u03c8 0 b \u2190 \u03b8 \u03c6A(b, s, s ) \u03c8 0 n \u2190 \u03b8 \u03c6E (n , s ) \u00b5 0 n \u2190 t q \u03c8 0 (t)I(n \u2208 t), etc for \u00b5 0 b , \u00b5 0 n 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "While not converged, for each n, n , b in the monolingual and ITG charts the form of monolingual parse forests or ITG forests. The key components to this uncertainty are the expected counts of particular source nodes, target nodes, and bispans under the mean field distribution:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "\u03c8 i n \u2190 \u03b8 \u03c6F (n, s) + b,n\u00a3b \u00b5 i\u22121 b \u03c6\u00a3(n, b)+ b,n\u00a3b n ,b\u00a1n \u00b5 i\u22121 b \u00b5 i\u22121 n \u03c6 (n, b, n ) \u00b5 i n \u2190 t q \u03c8 (t)I(n \u2208 t) (inside-outside) \u03c8 i b \u2190 \u03b8 \u03c6A(b, s, s ) + n,n\u00a3b \u00b5 i\u22121 n \u03c6\u00a3(n, b)+ n ,b\u00a1n \u00b5 i\u22121 n \u03c6\u00a1(b, n )+ n,n\u00a3b n ,b\u00a1n \u00b5 i\u22121 n \u00b5 i\u22121 n \u03c6 (n, b, n ) \u00b5 b \u2190 a q \u03c8 (a)I(b \u2208 a) (bitext inside-outside) updates for \u03c8 i n , \u00b5 i n analogous to \u03c8 i n , \u00b5 i n 3. Return variational parameters \u03c8", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "\u00b5 n = t q \u03c8 (t)I(n \u2208 t) \u00b5 n = t q \u03c8 (t )I(n \u2208 t ) \u00b5 b = a q \u03c8 (a)I(b \u2208 a)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Since dynamic programs exist for summing over each of the individual factors, these expectations can be computed in polynomial time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Mean Field Inference", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Although we can approximate the expectations from (4) in polynomial time using our mean field distribution, in practice we must still prune the ITG forests and monolingual parse forests to allow tractable inference. We prune our ITG forests using the same basic idea as Haghighi et al. (2009) , but we employ a technique that allows us to be more aggressive. Where Haghighi et al. (2009) pruned bispans based on how many unsupervised HMM alignments were violated, we first train a maximum-matching word aligner (Taskar et al., 2005 ) using our supervised data set, which has only half the precision errors of the unsupervised HMM. We then prune every bispan which violates at least three alignments from the maximum-matching aligner. When compared to pruning the bitext forest of our model with Haghighi et al. (2009) 's HMM technique, this new technique allows us to maintain the same level of accuracy while cutting the number of bispans in half. In addition to pruning the bitext forests, we also prune the syntactic parse forests using the monolingual parsing model scores. For each unrefined anchored production i A j \u2192 i B k C j , we compute the marginal probability P( i A j , i B k , k C j |s) under the monolingual parser (these are equivalent to the maxrule scores from Petrov and Klein 2007) . We only include productions where this probability is greater than 10 \u221220 . Note that at training time, we are not guaranteed that the gold trees will be included in the pruned forest. Because of this, we replace the gold trees t i , t i with oracle trees from the pruned forest, which can be found efficiently using a variant of the inside algorithm (Huang, 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 270, |
|
"end": 292, |
|
"text": "Haghighi et al. (2009)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 365, |
|
"end": 387, |
|
"text": "Haghighi et al. (2009)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 511, |
|
"end": 531, |
|
"text": "(Taskar et al., 2005", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 795, |
|
"end": 817, |
|
"text": "Haghighi et al. (2009)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1280, |
|
"end": 1302, |
|
"text": "Petrov and Klein 2007)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1656, |
|
"end": 1669, |
|
"text": "(Huang, 2008)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pruning", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Once the model has been trained, we still need to determine how to use it to predict parses and word alignments for our test sentence pairs. Ideally, given the sentence pair (s, s ), we would find:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Testing", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "(t * , w * , t * ) = argmax t,w,t P(t, w, t |s, s ) = argmax t,w,t a\u2208A(w) P(t, a, t |s, s )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Testing", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Of course, this is also intractable, so we once again resort to our mean field approximation. This yields the approximate solution:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Testing", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "(t * , w * , t * ) = argmax t,w,t a\u2208A(w) Q(t, a, t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Testing", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "However, recall that Q incorporates the model's mutual constraint into the variational parameters, which factor into q(t), q(a), and q(t ). This allows us to simplify further, and find the maximum a posteriori assignments under the variational distribution. The trees can be found quickly using the Viterbi inside algorithm on their respective qs. However, the sum for computing w * under q is still intractable. As we cannot find the maximum probability word alignment, we provide two alternative approaches for finding w * . The first is to just find the Viterbi ITG derivation a * = argmax a q(a) and then set w * to contain exactly the 1x1 bispans in a * . The second method, posterior thresholding, is to compute posterior marginal probabilities under q for each 1x1 cell beginning at position i, j in the word alignment grid:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Testing", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "m(i, j) = a q(a)I((i, i + 1, j, j + 1) \u2208 a)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Testing", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We then include w(i, j) in w * if m(w(i, j)) > \u03c4 , where \u03c4 is a threshold chosen to trade off precision and recall. For our experiments, we found that the Viterbi alignment was uniformly worse than posterior thresholding. All the results from the next section use the threshold \u03c4 = 0.25.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Testing", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We trained and tested our model on the translated portion of the Chinese treebank (Bies et al., 2007) , which includes hand annotated Chinese and English parses and word alignments. We separated the data into three sets: train, dev, and test, according to the standard Chinese treebank split. To speed up training, we only used training sentences of length \u2264 50 words, which left us with 1974 of 2261 sentences. We measured the results in two ways. First, we directly measured F 1 for English parsing, Chinese parsing, and word alignment on a held out section of the hand annotated corpus used to train the model. Next, we further evaluated the quality of the word alignments produced by our model by using them as input for a machine translation system.", |
|
"cite_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 101, |
|
"text": "(Bies et al., 2007)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "The Chinese treebank gold word alignments include significantly more many-to-many word alignments than those used by Haghighi et al. (2009) . We are able to produce some of these many-to-many alignments by including new many-to-many terminals in our ITG word aligner, as shown in Figure 4 . Our terminal productions sometimes capture non-literal translation like both sides or in recent years. They also can allow us to capture particular, systematic changes in the annotation standard. For example, the gapped pattern from Figure 4 captures the standard that English word the is always aligned to the Chinese head noun in a noun phrase. We featurize these non-terminals with features similar to those of Haghighi et al. (2009) , and all of the alignment results we report in Section 8.2 (both joint and ITG) employ these features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 139, |
|
"text": "Haghighi et al. (2009)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 705, |
|
"end": 727, |
|
"text": "Haghighi et al. (2009)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 280, |
|
"end": 288, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 524, |
|
"end": 532, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset-specific ITG Terminals", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "To compute features that depend on external models, we needed to train an unsupervised word aligner and monolingual English and Chinese parsers. The unsupervised word aligner was a pair of jointly trained HMMs (Liang et al., 2006) , trained on the FBIS corpus. We used the Berkeley Parser (Petrov and Klein, 2007) for both monolingual parsers, with the Chinese parser trained on the full Chinese treebank, and the English parser trained on a concatenation of the Penn WSJ corpus (Marcus et al., 1993) and the English side of train. 6 We compare our parsing results to the monolingual parsing models and to the English-Chinese bilingual reranker of Burkett and Klein (2008) , trained on the same dataset. The results are in Table 1 . For word alignment, we compare to 6 To avoid overlap in the data used to train the monolingual parsers and the joint model, at training time, we used a separate version of the Chinese parser, trained only on articles 400-1151 (omitting articles in train). For English parsing, we deemed it insufficient to entirely omit the Chinese treebank data from the monolingual parser's training set, as otherwise the monolingual parser would be trained entirely on out-of-domain data. Therefore, at training time we used two separate English parsers: to compute model scores for the first half of train, we used a parser trained on a concatenation of the WSJ corpus and the second half of train, and vice versa for the remaining sentences. the baseline unsupervised HMM word aligner and to the English-Chinese ITG-based word aligner of Haghighi et al. (2009) . The results are in Table 2 . As can be seen, our model makes substantial improvements over the independent models. For parsing, we improve absolute F 1 over the monolingual parsers by 2.1 in Chinese, and by 3.3 in English. For word alignment, we improve absolute F 1 by 5.5 over the non-syntactic ITG word aligner. In addition, our English parsing results are better than those of the Burkett and Klein (2008) bilingual reranker, the current top-performing English-Chinese bilingual parser, despite ours using a much simpler set of synchronization features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 210, |
|
"end": 230, |
|
"text": "(Liang et al., 2006)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 289, |
|
"end": 313, |
|
"text": "(Petrov and Klein, 2007)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 479, |
|
"end": 500, |
|
"text": "(Marcus et al., 1993)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 532, |
|
"end": 533, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 648, |
|
"end": 672, |
|
"text": "Burkett and Klein (2008)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 767, |
|
"end": 768, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1559, |
|
"end": 1581, |
|
"text": "Haghighi et al. (2009)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1969, |
|
"end": 1993, |
|
"text": "Burkett and Klein (2008)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 723, |
|
"end": 730, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1603, |
|
"end": 1610, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Parsing and Word Alignment", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "We further tested our alignments by using them to train the Joshua machine translation system (Li and Khudanpur, 2008) . on 1000 sentences of the NIST 2004 and 2005 machine translation evaluations, and tested on 400 sentences of the NIST 2006 MT evaluation. Our training set consisted of 250k sentences of newswire distributed with the GALE project, all of which were sub-sampled to have high Ngram overlap with the tune and test sets. All of our sentences were of length at most 40 words. When building the translation grammars, we used Joshua's default \"tight\" phrase extraction option. We ran MERT for 4 iterations, optimizing 20 weight vectors per iteration on a 200-best list. Table 3 gives the results. On the test set, we also ran the approximate randomization test suggested by Riezler and Maxwell (2005) . We found that our joint parsing and alignment system significantly outperformed the HMM aligner, but the improvement over the ITG aligner was not statistically significant.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 118, |
|
"text": "(Li and Khudanpur, 2008)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 786, |
|
"end": 812, |
|
"text": "Riezler and Maxwell (2005)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 682, |
|
"end": 689, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Machine Translation", |
|
"sec_num": "8.3" |
|
}, |
|
{ |
|
"text": "The quality of statistical machine translation models depends crucially on the quality of word alignments and syntactic parses for the bilingual training corpus. Our work presented the first joint model for parsing and alignment, demonstrating that we can improve results on both of these tasks, as well as on downstream machine translation, by allowing parsers and word aligners to simultaneously inform one another. Crucial to this improved performance is a notion of weak synchronization, which allows our model to learn when pieces of a grammar are synchronized and when they are not. Although exact inference in the weakly synchronized model is intractable, we developed a mean field approximate inference scheme based on monolingual and bitext parsing, allowing for efficient inference.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "See Section 8.1 for some new terminal productions required to make this true for the parallel Chinese treebank.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For expositional clarity, we describe n and n as labeled spans only. However, in general, features that depend on n or n are permitted to depend on the entire rule, and do in our final system.3 Alignments a link arbitrary spans of s and s (including non-constituents and individual words). We discuss the relation to word-level alignments in Section 4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Of course the structure of our model permits any of the additional rule-factored monolingual parsing features that have been described in the parsing literature, but in the present work we focus on the contributions of joint modeling.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also learn from non-ITG alignments by maximizing the marginal probability of the set of minimum-recall error alignments in the same way asHaghighi et al. (2009)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank Adam Pauls and John DeNero for their help in running machine translation experiments. We also thank the three anonymous reviewers for their helpful comments on an earlier draft of this paper. This project is funded in part by NSF grants 0915265 and 0643742, an NSF graduate research fellowship, the CIA under grant HM1582-09-1-0021, and BBN under DARPA contract HR0011-06-C-0022.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "English Chinese translation treebank v 1.0. Web download", |
|
"authors": [ |
|
{ |
|
"first": "Ann", |
|
"middle": [], |
|
"last": "Bies", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justin", |
|
"middle": [], |
|
"last": "Mott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Warner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2007--2009", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ann Bies, Martha Palmer, Justin Mott, and Colin Warner. 2007. English Chinese translation treebank v 1.0. Web download. LDC2007T02.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Two languages are better than one (for syntactic parsing)", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Burkett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Burkett and Dan Klein. 2008. Two languages are better than one (for syntactic parsing). In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Tailoring word alignments to syntactic machine translation", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Denero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John DeNero and Dan Klein. 2007. Tailoring word alignments to syntactic machine translation. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Using syntax to improve word alignment for syntaxbased statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Victoria", |
|
"middle": [], |
|
"last": "Fossum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Abney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "ACL MT Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victoria Fossum, Kevin Knight, and Steven Abney. 2008. Using syntax to improve word alignment for syntax- based statistical machine translation. In ACL MT Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "What's in a translation rule", |
|
"authors": [ |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Hopkins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michel Galley, Mark Hopkins, Kevin Knight, and Daniel Marcu. 2004. What's in a translation rule? In HLT- NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Better word alignments with supervised ITG models", |
|
"authors": [ |
|
{ |
|
"first": "Aria", |
|
"middle": [], |
|
"last": "Haghighi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Blitzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Denero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aria Haghighi, John Blitzer, John DeNero, and Dan Klein. 2009. Better word alignments with supervised ITG models. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Forest reranking: Discriminative parsing with non-local features", |
|
"authors": [ |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liang Huang. 2008. Forest reranking: Discriminative parsing with non-local features. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A scalable decoder for parsing-based machine translation with equivalent language model state maintenance", |
|
"authors": [ |
|
{ |
|
"first": "Zhifei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Khudanpur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "ACL SSST", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhifei Li and Sanjeev Khudanpur. 2008. A scalable decoder for parsing-based machine translation with equivalent language model state maintenance. In ACL SSST.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Alignment by agreement", |
|
"authors": [ |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Taskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Percy Liang, Ben Taskar, and Dan Klein. 2006. Align- ment by agreement. In HLT-NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Building a large annotated corpus of English: The Penn Treebank", |
|
"authors": [ |
|
{ |
|
"first": "Mitchell", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"Ann" |
|
], |
|
"last": "Marcinkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beatrice", |
|
"middle": [], |
|
"last": "Santorini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Computational Linguistics", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "313--330", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mitchell P. Marcus, Mary Ann Marcinkiewicz, and Beat- rice Santorini. 1993. Building a large annotated cor- pus of English: The Penn Treebank. Computational Linguistics, 19(2):313-330.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Probabilistic CFG with latent annotations", |
|
"authors": [ |
|
{ |
|
"first": "Takuya", |
|
"middle": [], |
|
"last": "Matsuzaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yusuki", |
|
"middle": [], |
|
"last": "Miyao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Takuya Matsuzaki, Yusuki Miyao, and Jun'ichi Tsujii. 2005. Probabilistic CFG with latent annotations. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Syntactic realignment models for machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Jon", |
|
"middle": [], |
|
"last": "May", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jon May and Kevin Knight. 2007. Syntactic re- alignment models for machine translation. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Numerical Optimization", |
|
"authors": [ |
|
{ |
|
"first": "Jorge", |
|
"middle": [], |
|
"last": "Nocedal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Wright", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jorge Nocedal and Stephen J. Wright. 1999. Numerical Optimization. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Improved inference for unlexicalized parsing", |
|
"authors": [ |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Slav Petrov and Dan Klein. 2007. Improved inference for unlexicalized parsing. In HLT-NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "On some pitfalls in automatic evaluation and significance testing for MT", |
|
"authors": [ |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Riezler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Maxwell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Workshop on Intrinsic and Extrinsic Evaluation Methods for MT and Summarization, ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefan Riezler and John Maxwell. 2005. On some pit- falls in automatic evaluation and significance testing for MT. In Workshop on Intrinsic and Extrinsic Eval- uation Methods for MT and Summarization, ACL.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Exploiting tractable substructures in intractable networks", |
|
"authors": [ |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Saul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Jordan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1996, |
|
"venue": "NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lawrence Saul and Michael Jordan. 1996. Exploit- ing tractable substructures in intractable networks. In NIPS.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Synchronous tree-adjoining grammars", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Stuart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Shieber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Schabes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stuart M. Shieber and Yves Schabes. 1990. Synchronous tree-adjoining grammars. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Quasisynchronous grammars: Alignment by soft projection of syntactic dependencies", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "HLT-NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David A. Smith and Jason Eisner. 2006. Quasi- synchronous grammars: Alignment by soft projection of syntactic dependencies. In HLT-NAACL.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Parser adaptation and projection with quasi-synchronous grammar features", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David A. Smith and Jason Eisner. 2009. Parser adapta- tion and projection with quasi-synchronous grammar features. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Bilingual parsing with factored estimation: using English to parse Korean", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David A. Smith and Noah A. Smith. 2004. Bilin- gual parsing with factored estimation: using English to parse Korean. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Unsupervised multilingual grammar induction", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Snyder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tahira", |
|
"middle": [], |
|
"last": "Naseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Snyder, Tahira Naseem, and Regina Barzilay. 2009. Unsupervised multilingual grammar induction. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "A discriminative matching approach to word alignment", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Taskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Lacoste-Julien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Taskar, Simon Lacoste-Julien, and Dan Klein. 2005. A discriminative matching approach to word align- ment. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Graphical Models, Exponential Families, and Variational Inference", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael I Jordan", |
|
"middle": [], |
|
"last": "Wainwright", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin J Wainwright and Michael I Jordan. 2008. Graphical Models, Exponential Families, and Varia- tional Inference. Now Publishers Inc., Hanover, MA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Stochastic inversion transduction grammars and bilingual parsing of parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Dekai", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Computational Linguistics", |
|
"volume": "23", |
|
"issue": "3", |
|
"pages": "377--404", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekai Wu. 1997. Stochastic inversion transduction grammars and bilingual parsing of parallel corpora. Computational Linguistics, 23(3):377-404.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "The CMU-AKA syntax augmented machine translation system for IWSLT-06", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Zollmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Venugopal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Waibel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "IWSLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andreas Zollmann, Ashish Venugopal, Stephan Vogel, and Alex Waibel. 2006. The CMU-AKA syntax aug- mented machine translation system for IWSLT-06. In IWSLT.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "s) under the monolingual parser. These scores are the same as the variational rule scores ofMatsuzaki et al. (2005).4" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "An example of a Chinese-English sentence pair with parses, word alignments, and a subset of the full optimal ITG derivation, including one totally unsynchronized bispan (b 4 ), one partially synchronized bispan (b 7 ), and and fully synchronized bispan (b 8 ). The inset provides some examples of active synchronization features (see Section 4." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Structured mean field inference for the weakly synchronized model. I(n \u2208 t) is an indicator value for the presence of node n in source tree t." |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Examples of phrasal alignments that can be represented by our new ITG terminal bispans." |
|
}, |
|
"TABREF2": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Word alignment results. Our joint model has the highest reported F 1 for English-Chinese word alignment.", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td/><td>Rules</td><td>Tune</td><td>Test</td></tr><tr><td>HMM</td><td>1.1M</td><td>29.0</td><td>29.4</td></tr><tr><td>ITG</td><td>1.5M</td><td>29.9</td><td>30.4 \u2020</td></tr><tr><td>Joint</td><td>1.5M</td><td>29.6</td><td>30.6</td></tr></table>", |
|
"type_str": "table", |
|
"num": null, |
|
"text": "describes the results of our experiments. For all of the systems, we tuned", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Tune and test BLEU results for machine translation systems built with different alignment tools. \u2020 indicates a statistically significant difference between a system's test performance and the one above it.", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |