|
{ |
|
"paper_id": "D12-1023", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:23:23.123462Z" |
|
}, |
|
"title": "Minimal Dependency Length in Realization Ranking", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Ohio State University Columbus", |
|
"location": { |
|
"region": "OH", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rajakrishnan", |
|
"middle": [], |
|
"last": "Rajkumar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The Ohio State University Columbus", |
|
"location": { |
|
"region": "OH", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Comprehension and corpus studies have found that the tendency to minimize dependency length has a strong influence on constituent ordering choices. In this paper, we investigate dependency length minimization in the context of discriminative realization ranking, focusing on its potential to eliminate egregious ordering errors as well as better match the distributional characteristics of sentence orderings in news text. We find that with a stateof-the-art, comprehensive realization ranking model, dependency length minimization yields statistically significant improvements in BLEU scores and significantly reduces the number of heavy/light ordering errors. Through distributional analyses, we also show that with simpler ranking models, dependency length minimization can go overboard, too often sacrificing canonical word order to shorten dependencies, while richer models manage to better counterbalance the dependency length minimization preference against (sometimes) competing canonical word order preferences.", |
|
"pdf_parse": { |
|
"paper_id": "D12-1023", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Comprehension and corpus studies have found that the tendency to minimize dependency length has a strong influence on constituent ordering choices. In this paper, we investigate dependency length minimization in the context of discriminative realization ranking, focusing on its potential to eliminate egregious ordering errors as well as better match the distributional characteristics of sentence orderings in news text. We find that with a stateof-the-art, comprehensive realization ranking model, dependency length minimization yields statistically significant improvements in BLEU scores and significantly reduces the number of heavy/light ordering errors. Through distributional analyses, we also show that with simpler ranking models, dependency length minimization can go overboard, too often sacrificing canonical word order to shorten dependencies, while richer models manage to better counterbalance the dependency length minimization preference against (sometimes) competing canonical word order preferences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In this paper, we show that for the constituent ordering problem in surface realization, incorporating insights from the minimal dependency length theory of language production (Temperley, 2007) into a discriminative realization ranking model yields significant improvements upon a state-of-the-art baseline. We demonstrate empirically using OpenCCG, our CCG-based (Steedman, 2000) surface realization system, the utility of a global feature encoding the total dependency length of a given derivation. Although other works in the realization literature have used phrase length or head-dependent distances in their models (Filippova and Strube, 2009; Velldal and Oepen, 2005; White and Rajkumar, 2009, i.a.) , to the best of our knowledge, this paper is the first to use insights from the minimal dependency length theory directly and study their effects, both qualitatively and quantitatively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 194, |
|
"text": "(Temperley, 2007)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 365, |
|
"end": 381, |
|
"text": "(Steedman, 2000)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 621, |
|
"end": 649, |
|
"text": "(Filippova and Strube, 2009;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 650, |
|
"end": 674, |
|
"text": "Velldal and Oepen, 2005;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 675, |
|
"end": 706, |
|
"text": "White and Rajkumar, 2009, i.a.)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The impetus for this paper was the discovery that despite incorporating a sophisticated syntactic model borrowed from the parsing literatureincluding features with head-dependent distances at various scales- realization ranking model still often performed poorly on weight-related decisions such as when to employ heavy-NP shift. Table 1 illustrates this point. In wsj 0034.9, the full model (incorporating numerous syntactic features) succeeds in reproducing the reference sentence, which is clearly preferable to the rather awkward variant selected by the baseline model (using various n-gram models). However, in wsj 0013.16, the full model fails to shift the temporal modifier for now next to the phrasal verb turned down, leaving it at the end of its very long verb phrase where it is highly ambiguous (with multiple intervening attachment sites). Conversely, in wsj 0044.3, the full model shifts before next to the verb, despite the NP cheating being very light, yielding a very confusing ordering given that before is meant to be intransitive.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 330, |
|
"end": 337, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The syntactic features in realization ranking model are taken from Clark & Curran's (2007) wsj 0044.3 she had seen cheating before , but these notes were uncanny .", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 90, |
|
"text": "Clark & Curran's (2007)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "she had seen before cheating , but these notes were uncanny . models-the first represents a successful case, the latter two egregious ordering errors (Table 3 ; see Section 3). In this model, headdependenct distances are considered in conjunction with lexicalized and unlexicalized CCG derivation steps, thereby appearing in numerous features.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 150, |
|
"end": 158, |
|
"text": "(Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "FULL", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As such, the model takes into account the interaction of dependency length with derivation steps, but in essence does not consider the main effect of dependency length itself. In this light, our investigation of dependency length minimization can be viewed as examining the question of whether realization ranking models can be made more accurate-and in particular, avoid egregious ordering errors-by incorporating a feature to account for the main effect of dependency length. It is important to observe at this point that dependency length minimization is more of a preference than an optimization objective, which must be balanced against other order preferences at times. A closer reading of Temperley's (2007) study reveals that dependency length can sometimes run counter to many canonical word order choices. A case in point is the class of examples involving pre-modifying adjunct sequences that precede both the subject and the verb. Assuming that their parent head is the main verb of the sentence, a longshort sequence would minimize overall dependency length. However, in 613 examples found in the Penn Treebank, the average length of the first adjunct was 3.15 words while the second adjunct was 3.48 words long, thus reflecting a short-long pattern, as illustrated in the Temperley p.c. example in Table 2 . Apart from these, Hawkins (2001) shows that arguments are generally located closer to the verb than adjuncts. Gildea and Temperley (2007) also suggest that adverb placement might involve cases which go against dependency length minimization. An examination of 295 legitimate long-short post-verbal constituent orders (counter to dependency length) from Section 00 of the Penn Treebank revealed that temporal adverb phrases are often involved in long-short orders, as shown in wsj 0075.13 in Table 2 . In our setup, the preference to minimize dependency length can be balanced by features capturing preferences for alternate choices (e.g. the argument-adjunct distinction in our dependency ordering model, Table 4 ). Via distributional analyses, we show that while simpler realization ranking models can go overboard in minimizing dependency length, richer models largely succeed in overcoming this issue, while still taking advantage of dependency length minimization to avoid egregious ordering errors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 696, |
|
"end": 714, |
|
"text": "Temperley's (2007)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1340, |
|
"end": 1354, |
|
"text": "Hawkins (2001)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1432, |
|
"end": 1459, |
|
"text": "Gildea and Temperley (2007)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1312, |
|
"end": 1319, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 1813, |
|
"end": 1820, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 2027, |
|
"end": 2034, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "FULL", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Comprehension and corpus studies (Gibson, 1998; Gibson, 2000; Temperley, 2007) point to the tendency of production and comprehension systems to adhere to principles of dependency length minimization. The idea of dependency length minimization is based on Gibson's (1998) Dependency Locality Theory (DLT) of comprehension, which predicts that longer dependencies are more difficult to process. DLT predictions have been further validated using comprehension studies involving eye-tracking corpora (Demberg and Keller, 2008) . DLT metrics also correlate reasonably well with activation decay over time expressed in computational models of Temperley (p.c.) (Lewis et al., 2006; Lewis and Vasishth, 2005) . Extending these ideas from comprehension, Temperley (2007) poses the question: Does language production reflect a preference for shorter dependencies as well so as to facilitate comprehension? By means of a study of Penn Treebank data, Temperley shows that English sentences do display a tendency to minimize the sum of all their head-dependent distances as illustrated by a variety of constructions. Further, Gildea and Temperley (2007) report that random linearizations have higher dependency lengths compared to actual English, while an \"optimal\" algorithm (from the perspective of dependency length minimization), which places dependents on either sides of a head in order of increasing length, is closer to actual English. Tily (2010) also applies insights from the above cited papers to show that dependency length constitutes a significant pressure towards language change. For head-final languages (e.g., Japanese), dependency length minimization results in the \"long-short\" constituent ordering in language production (Yamashita and Chang, 2001 ). More generally, Hawkins's (1994; processing domains, dependency length minimization and endweight effects in constituent ordering (Wasow and Arnold, 2003) are all very closely related. The dependency length hypothesis goes beyond the predictions made by Hawkins' Minimize Domains principle in the case of English clauses with three postverbal adjuncts: Gibson's DLT correctly predicts that the first constituent tends to be shorter than the second, while Hawkins' approach does not make predictions about the relative orders of the first two constituents.", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 47, |
|
"text": "(Gibson, 1998;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 48, |
|
"end": 61, |
|
"text": "Gibson, 2000;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 62, |
|
"end": 78, |
|
"text": "Temperley, 2007)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 270, |
|
"text": "Gibson's (1998)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 522, |
|
"text": "(Demberg and Keller, 2008)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 637, |
|
"end": 653, |
|
"text": "Temperley (p.c.)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 654, |
|
"end": 674, |
|
"text": "(Lewis et al., 2006;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 675, |
|
"end": 700, |
|
"text": "Lewis and Vasishth, 2005)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1113, |
|
"end": 1140, |
|
"text": "Gildea and Temperley (2007)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1730, |
|
"end": 1756, |
|
"text": "(Yamashita and Chang, 2001", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 1776, |
|
"end": 1792, |
|
"text": "Hawkins's (1994;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1890, |
|
"end": 1914, |
|
"text": "(Wasow and Arnold, 2003)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Minimal Dependency Length", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "However, it would be very reductive to consider dependency length minimization as the sole factor in language production. In fact, a large body of prior work discusses a variety of other factors involved in language production. These other prefer-ences are either correlated with dependency length or can override the minimal dependency length preference. Complexity (Wasow, 2002; Wasow and Arnold, 2003) , animacy (Snider and Zaenen, 2006; Branigan et al., 2008) , information status considerations (Wasow and Arnold, 2003; Arnold et al., 2000) , the argument-adjunct distinction (Hawkins, 2001) and lexical bias (Wasow and Arnold, 2003; Bresnan et al., 2007) are a few prominent factors. More recently, Anttila et al. (2010) argued that the principle of end weight can be revised by calculating weight in prosodic terms to provide more explanatory power. As Temperley (2007) suggests, a satisactory model should combine insights from multiple approaches, a theme which we investigate in this work by means of a rich feature set adapted from the parsing and realization literature. Our feature design has been inspired by the conclusions of the above-cited works pertaining to the role of dependency length minimization in syntactic choice in conjuction with other factors influencing constituent order. However, going beyond Temperley's corpus study, we confirm the utility of incorporating a feature for minimizing dependency length into machine-learned models with hundreds of thousands of features found to be useful in previous parsing and realization work, and investigate the extent to which these features can counterbalance a dependency length minimization preference in cases where canonical word order considerations should prevail.", |
|
"cite_spans": [ |
|
{ |
|
"start": 367, |
|
"end": 380, |
|
"text": "(Wasow, 2002;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 381, |
|
"end": 404, |
|
"text": "Wasow and Arnold, 2003)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 440, |
|
"text": "(Snider and Zaenen, 2006;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 441, |
|
"end": 463, |
|
"text": "Branigan et al., 2008)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 500, |
|
"end": 524, |
|
"text": "(Wasow and Arnold, 2003;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 525, |
|
"end": 545, |
|
"text": "Arnold et al., 2000)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 581, |
|
"end": 596, |
|
"text": "(Hawkins, 2001)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 614, |
|
"end": 638, |
|
"text": "(Wasow and Arnold, 2003;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 639, |
|
"end": 660, |
|
"text": "Bresnan et al., 2007)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 705, |
|
"end": 726, |
|
"text": "Anttila et al. (2010)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 860, |
|
"end": 876, |
|
"text": "Temperley (2007)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Minimal Dependency Length", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Categorial Grammar (CCG)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Surface Realization with Combinatory", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We provide here a brief overview of CCG and the OpenCCG realizer; for further details, see the works cited below. CCG (Steedman, 2000) is a unification-based categorial grammar formalism defined almost entirely in terms of lexical entries that encode sub- categorization as well as syntactic features (e.g. number and agreement). OpenCCG is a parsing/generation library which includes a hybrid symbolic-statistical chart realizer (White, 2006; . The input to the OpenCCG realizer is a semantic graph, where each node has a lexical predication and a set of semantic features; nodes are connected via dependency relations. Internally, such graphs are represented using Hybrid Logic Dependency Semantics (HLDS), a dependency-based approach to representing linguistic meaning (Baldridge and Kruijff, 2002) . Alternative realizations are ranked using integrated ngram or averaged perceptron scoring models. In the experiments reported below, the inputs are derived from the gold standard derivations in the CCGbank (Hockenmaier and Steedman, 2007) , and the outputs are the highest-scoring realizations found during the realizer's chart-based search. 1", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 134, |
|
"text": "(Steedman, 2000)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 430, |
|
"end": 443, |
|
"text": "(White, 2006;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 772, |
|
"end": 801, |
|
"text": "(Baldridge and Kruijff, 2002)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1010, |
|
"end": 1042, |
|
"text": "(Hockenmaier and Steedman, 2007)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Surface Realization with Combinatory", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "s dcl \u2192 np s dcl \\np + VBD Word-Word company, s dcl \u2192 np s dcl \\np, bought Word-POS company, s dcl \u2192 np s dcl \\np, VBD POS-Word NN, s dcl \u2192 np s dcl \\np, bought Word + \u2206 w bought, s dcl \u2192 np s dcl \\np + d w POS + \u2206 w VBD, s dcl \u2192 np s dcl \\np + d w Word + \u2206 p bought, s dcl \u2192 np s dcl \\np + d p POS + \u2206 p VBD, s dcl \u2192 np s dcl \\np + d p Word + \u2206 v bought, s dcl \u2192 np s dcl \\np + d v POS + \u2206 v VBD, s dcl \u2192 np s dcl \\np + d v", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Surface Realization with Combinatory", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In the realm of paraphrasing using tree linearization, Kempen and Harbusch (2004) explore features which have later been appropriated into classification approaches for surface realization (Filippova and Strube, 2007) . Prominent features include in-formation status, animacy and phrase length. In the case of ranking models for surface realization, by far the most comprehensive experiments involving linguistically motivated features are reported in work of Cahill for German realization ranking Cahill and Riester, 2009) . Apart from language model and Lexical Functional Grammar (LFG) c-structure and f -structure based features, Cahill also designed and incorporated features modeling information status considerations. The feature sets explored in this paper extend those in previous work on realization ranking with OpenCCG using averaged perceptron models Rajkumar and White, 2010) to include more comprehensive ordering features. The feature classes are listed below, where DEPLEN, HOCKENMAIER and DEPORD are novel, and the rest are as in earlier OpenCCG models. The inclusion of the DE-PORD features is intended to yield a model with a similarly rich set of ordering features as Cahill and Forster's (2009) C&C NF DISTANCE The distance features from the C&C normal form model, where the distance between a head and its dependent is measured in intervening words, punctuation marks or verbs; caps of 3, 3 and 2 (resp.) on the distances have the effect of binning longer distances.", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 81, |
|
"text": "Kempen and Harbusch (2004)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 189, |
|
"end": 217, |
|
"text": "(Filippova and Strube, 2007)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 523, |
|
"text": "Cahill and Riester, 2009)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 864, |
|
"end": 889, |
|
"text": "Rajkumar and White, 2010)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1189, |
|
"end": 1216, |
|
"text": "Cahill and Forster's (2009)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "DEPORD Several classes of features for ordering heads and dependents as well as sibling dependents on the same side of the head. The basic features-using words, POS tags and dependency relations, grouped by the broad POS tag of the head-are shown in We followed the averaged perceptron training procedure of with a couple of updates. First, as noted earlier, we used a reimplementation of Hockenmaier's (2003) generative syntactic model as an extra component of our generative baseline; and second, only five epochs of training were used, which was found to work as well as using additional epochs on the development set. As in the earlier work, the models were trained on the standard training sections (02-21) of an enhanced version of the CCGbank, using a lexico-grammar extracted from these sections. The models tested in the experiments reported below are summarized in Table 5 . The three groups of models are designed to test the impact of the dependency length feature when added to feature sets of increasing complexity. In more detail, the GLOBAL and DEPLEN-GLOBAL models contain dense features on entire derivations; their values are the log probabilities of the three n-gram mod- In the final group, DEPORD-NF contains all the features examined in this paper except the dependency length feature, while DEPLEN contains all the features including the dependency length feature. Note that the learned weight of the total dependency length feature was negative in each case, as expected. Table 6 shows the sizes of the various models. For each model, the alphabet-whose size increases to over a million features-is the set of applicable features found to have discriminative value in at least 5 training examples; from these, a subset are made active (i.e., take on a non-zero weight) through perceptron updates when the feature value differs between the model-best and oracle-best realization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 389, |
|
"end": 409, |
|
"text": "Hockenmaier's (2003)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 875, |
|
"end": 882, |
|
"text": "Table 5", |
|
"ref_id": "TABREF11" |
|
}, |
|
{ |
|
"start": 1497, |
|
"end": 1504, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "N Y Y N N N N N DEPLEN-GLOBAL Y Y Y N N N N N DEPORD-NONF N Y Y Y Y N N Y DEPORD-NODIST N Y Y Y Y Y N Y DEPLEN-NODIST Y Y Y Y Y Y N Y DEPORD-NF N Y Y Y Y Y Y Y DEPLEN Y Y Y Y Y Y Y Y", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Following the usual practice in the realization ranking, we first evaluate our results quantitatively using exact matches and BLEU (Papineni et al., 2002 the new dependency ordering model, as DEPORD-NONF is significantly worse than DEPORD-NODIST (the impact of the distance features is evident in the increases from the second group to the third group).", |
|
"cite_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 153, |
|
"text": "(Papineni et al., 2002", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BLEU Results", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As with the dev set, the dependency length feature yielded a significant increase in BLEU scores for each comparison on the test set also. For each group, the statistical significance of the difference in BLEU scores between a model and the unmarked model (-) is determined by bootstrap resampling (Koehn, 2004) . 3 Note that although the differences in BLEU scores are small, they end up being statistically significant because the models frequently yield the same top scoring realization, and reliably deliver improvements in the cases where they differ. In particular, note that DEPLEN and DEPORD-NF agree on the best realization 81% of the time, while DEPLEN-NODIST and DEPORD-NODIST have 78.1% agreement, and DEPLEN-GLOBAL and GLOBAL show 77.4% agreement; by comparison, DEPORD-NODIST and GLOBAL only agree on the best realization 51.1% of the time.", |
|
"cite_spans": [ |
|
{ |
|
"start": 298, |
|
"end": 311, |
|
"text": "(Koehn, 2004)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 314, |
|
"end": 315, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BLEU Results", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The effect of the dependency length feature on the distribution of dependency lengths is illustrated in Table 9 : Distribution of various kinds of post-verbal constituents in the development set (Section 00); 4692 gold cases considered pared to the corresponding gold standard derivation, as well as the number of derivations with greater and lower dependency length. According to paired ttests, the mean dependency lengths for the DEPLEN-NODIST and DEPLEN models do not differ significantly from the gold standard. In contrast, the mean dependency length of all the models that do not include the dependency length feature does differ significantly (p < 0.001) from the gold standard. Additionally, all these models have more realizations with dependency length greater than the gold standard, in comparison to the dependency length minimizing models; this shows the efficacy of the dependency length feature in approximating the gold standard. Interestingly, the DEPLEN-GLOBAL model significantly undershoots the gold standard on mean dependency length, and has the most skewed distribution of sentences with greater vs. lesser dependency length than the gold standard. Apart from studying dependency length directly, we also looked at one of the attested effects of dependency length minimization, viz. the tendency to prefer short-long post-verbal constituents in production (Temperley, 2007) . The relative lengths of adjacent post-verbal constituents were computed and their distribution is shown in Table 9 . While calculating length, punctuation marks were excluded. Four kinds of constituents were found in the postverbal domain. For every verb, apart from single constituents and equal length constituents, shortlong and long-short sequences were also observed. Table 9 demonstrates that for both the gold standard corpus as well as the realizer models, short-long constituents were more frequent than long-short or equal length constituents. This follows the trend re- perley, 2007; Wasow and Arnold, 2003) . The figures reported here show the tendency of the DEPLEN* models to be closer to the gold standard than the other models, especially in the case of short-long constituents.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1379, |
|
"end": 1396, |
|
"text": "(Temperley, 2007)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 1980, |
|
"end": 1993, |
|
"text": "perley, 2007;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1994, |
|
"end": 2017, |
|
"text": "Wasow and Arnold, 2003)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 111, |
|
"text": "Table 9", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1506, |
|
"end": 1513, |
|
"text": "Table 9", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1772, |
|
"end": 1779, |
|
"text": "Table 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Detailed Analyses", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We also performed an analysis of relative constituent lengths focusing on light-heavy and heavylight cases; specifically, we examined unequal length constituent sequences where the length difference of the constituents was greater than 5, and the shorter constituent was under 5 words. Table 10 shows the results. Using a \u03c7-square test, the distribution of heavy unequal length constituent counts in the DEPLEN-NODIST and DEPLEN models does not significantly differ from that of the gold standard. In contrast, for all the other models, the counts do differ significantly from the gold standard. Table 11 shows examples of how the dependency length feature (DEPLEN) affects the output even in comparison to a model (DEPORD) with a rich set of discriminative syntactic and dependency ordering features, but no features directly targeting relative weight. In wsj 0015.7, the dependency length model produces an exact match, while the DEPORD model fails to shift the short temporal adverbial next year next to the verb, leaving a confusingly repetitive this year next year at the end of the sentence. In wsj 0020.1, the dependency length model produces a nearly exact match with just an equally ac-ceptable inversion of closely watching. By contrast, the DEPORD model mistakenly shifts the direct object South Korea, Taiwan and Saudia Arabia to the end of the sentence where it is difficult to understand following two very long intervening phrases. In wsj 0021.8, both models mysteriously put not in front of the auxiliary and leave out the complementizer, but DEPORD also mistakenly leaves before at the end of the verb phrase where it is again apt to be interpreted as modifying the preceding verb. In wsj 0075.13, both models put the temporal modifier on Thursday in its canonical VP-final position, despite this order running counter to dependency length minimization. Finally, wsj 0014.2 shows a case where DEPORD is nearly an exact match (except for a missing comma), but the dependency length model fronts the PP on the 12-member board, where it is grammatical but rather marked (and not motivated in the discourse context).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 286, |
|
"end": 294, |
|
"text": "Table 10", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 596, |
|
"end": 604, |
|
"text": "Table 11", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Detailed Analyses", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The experiments show a consistent positive effect of the dependency length feature in improving BLEU scores and achieving a better match with the corpus distributions of dependency length and short/long constituent orders. The results in Table 10 are particulary encouraging, as they show that minimizing dependency length reduces the number of realizations in which a heavy constituent precedes a light one down to essentially the level of the corpus, thereby eliminating many realizations that can be expected to have egregious errors like those shown in Table 11 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 238, |
|
"end": 246, |
|
"text": "Table 10", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 557, |
|
"end": 565, |
|
"text": "Table 11", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Interim Discussion", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Intriguingly, there is some evidence that a negatively weighted total dependency length feature can go too far in minimizing dependency length, in the absence of other informative features to counterbalance it. In particular, the DEPLEN-GLOBAL model in Table 8 has significantly lower dependency length than the corpus, but in the richer models with discriminative synactic and dependency ordering features, there are no significant differences. It may still be though that additional features are necessary to counteract the tendency towards dependency length minimization, for example to ensure that initial constituents play their intended role in establishing and continuing topics in discourse, as also observed in Table 11. wsj 0015.7 the exact amount of the refund will be determined next year based on actual collections made until Dec. 31 of this year .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 260, |
|
"text": "Table 8", |
|
"ref_id": "TABREF13" |
|
}, |
|
{ |
|
"start": 720, |
|
"end": 729, |
|
"text": "Table 11.", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Interim Discussion", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "DEPORD the exact amount of the refund will be determined based on actual collections made until Dec. 31 of this year next year . wsj 0020.1 the U.S. , claiming some success in its trade diplomacy , removed South Korea , Taiwan and Saudi Arabia from a list of countries it is closely watching for allegedly failing to honor U.S. patents , copyrights and other intellectual-property rights . ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DEPLEN [same]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To determine whether heavy-light ordering differences often represent ordering errors (including egregious ones), rather than simply representing acceptable variation, we conducted a targeted human evaluation on examples of this kind. Specifically, for each of the DEPLEN* models and their corresponding models without the dependency length feature, we chose the 25 sentences from the development section whose realizations exhibited the greatest difference in dependency length between sibling constituents appearing in opposite orders, and asked two judges (not the authors) to choose which of the two realizations best expressed the meaning of the reference sentence in a grammatical and fluent way, with the choice forced (2AFC). with only one disagreement on the realizations from the DEPLEN and DEPORD-NF models (involving an acceptable paraphrase in our judgment), and only four disagreements on the DEPLEN-GLOBAL and GLOBAL realizations. Pooling the judgments, the preference for the DEPLEN* models was well above the chance level of 50% according to a binomial test (p < 0.001 in each case). Inspecting the data ourselves, we found that many of the items did indeed involve egregious ordering errors that the DEPLEN* models managed to avoid.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Targeted Human Evaluation", |
|
"sec_num": "4.6" |
|
}, |
|
{ |
|
"text": "As noted in the introduction, to the best of our knowledge this paper is the first to examine the impact of dependency length minimization on realization ranking. While there have been quite a few papers to date reporting results on Penn Treebank data, since the various systems make different assumptions regarding the specificity of their inputs, all but the most broad-brushed comparisons remain impossible at present, and thus detailed studies such as the present one can only be made within the context of different models for the same system. Some progress on this issue has been made in the context of the Generation Challenges Surface Realization Shared Task (Belz et al., 2011) , but it remains to be seen to what extent fair cross-system comparisons using common inputs can be achieved. For (very) rough comparison purposes, Table 13 lists our results in the context of those reported for various other systems on PTB Section 23. As the table shows, the OpenCCG scores are quite competitive, exceeded only by Callaway's (2005) extensively hand-crafted system as well as Bohnet et al.'s (2011) system on shared task shallow inputs (-S), which performs much better than their system on deep inputs (-D) that more closely resemble OpenCCG's.", |
|
"cite_spans": [ |
|
{ |
|
"start": 667, |
|
"end": 686, |
|
"text": "(Belz et al., 2011)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1019, |
|
"end": 1036, |
|
"text": "Callaway's (2005)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 835, |
|
"end": 843, |
|
"text": "Table 13", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we have investigated dependency length minimization in the context of realization ranking, focusing on its potential to eliminate egregious ordering errors as well as better match the distributional characteristics of sentence orderings in news text. When added to a state-of-the-art, com- prehensive realization ranking model, we showed that including a dense, global feature for minimizing total dependency length yields statistically significant improvements in BLEU scores and significantly reduces the number of heavy-light ordering errors. Going beyond the BLEU metric, we also conducted a targeted human evaluation to confirm the utility of the dependency length feature in models of varying richness. Interestingly, even with the richest model, in some cases we found that the dependency length feature still appears to go too far in minimizing dependency length, suggesting that further counter-balancing features-especially ones for the sentence-initial position (Filippova and Strube, 2009) -warrant investigation in future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 988, |
|
"end": 1016, |
|
"text": "(Filippova and Strube, 2009)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The realizer can also be run using inputs derived from OpenCCG's parser, though informal experiments suggest that parse errors tend to decrease generation quality.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also experimented with two other definitions of dependency length described in the literature, namely (1) counting only nouns and verbs to approximate counting by discourse referents(Gibson, 1998) and (2) omitting function words to approximate prosodic weight(Anttila et al., 2010); however, realization ranking accuracy was slightly worse than counting all non-punctuation words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported in part by NSF grants no. IIS-1143635 and IIS-0812297. We thank the anonymous reviewers for helpful comments and discussion, and Scott Martin and Dennis Mehay for their participation in the targeted human evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The role of prosody in the English dative alternation. Language and Cognitive Processes", |
|
"authors": [ |
|
{ |
|
"first": "Arto", |
|
"middle": [], |
|
"last": "Anttila", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Adams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Speriosu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arto Anttila, Matthew Adams, and Mike Speriosu. 2010. The role of prosody in the English dative alternation. Language and Cognitive Processes.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Heaviness vs. newness: The effects of structural complexity and discourse status on constituent ordering", |
|
"authors": [ |
|
{ |
|
"first": "Jennifer", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Arnold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wasow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Losongco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Ginstrom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Language", |
|
"volume": "76", |
|
"issue": "", |
|
"pages": "28--55", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jennifer E. Arnold, Thomas Wasow, Anthony Losongco, and Ryan Ginstrom. 2000. Heaviness vs. newness: The effects of structural complexity and discourse sta- tus on constituent ordering. Language, 76:28-55.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Coupling CCG and Hybrid Logic Dependency Semantics", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Geert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. ACL-02", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Baldridge and Geert-Jan Kruijff. 2002. Coupling CCG and Hybrid Logic Dependency Semantics. In Proc. ACL-02.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "The first surface realisation shared task: Overview and evaluation results", |
|
"authors": [ |
|
{ |
|
"first": "Anja", |
|
"middle": [], |
|
"last": "Belz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dominic", |
|
"middle": [], |
|
"last": "Espinosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Kow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deirdre", |
|
"middle": [], |
|
"last": "Hogan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Stent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Generation Challenges Session at the 13th European Workshop on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "217--226", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anja Belz, Mike White, Dominic Espinosa, Eric Kow, Deirdre Hogan, and Amanda Stent. 2011. The first surface realisation shared task: Overview and evaluation results. In Proceedings of the Genera- tion Challenges Session at the 13th European Work- shop on Natural Language Generation, pages 217- 226, Nancy, France, September. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "<stumaba >: From deep representation to surface", |
|
"authors": [ |
|
{ |
|
"first": "Bernd", |
|
"middle": [], |
|
"last": "Bohnet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Mille", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beno\u00eet", |
|
"middle": [], |
|
"last": "Favre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leo", |
|
"middle": [], |
|
"last": "Wanner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Generation Challenges Session at the 13th European Workshop on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "232--235", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernd Bohnet, Simon Mille, Beno\u00eet Favre, and Leo Wan- ner. 2011. <stumaba >: From deep representation to surface. In Proceedings of the Generation Challenges Session at the 13th European Workshop on Natural Language Generation, pages 232-235, Nancy, France, September. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Contributions of animacy to grammatical function assignment and word order during production", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Branigan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Pickering", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tanaka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Lingua", |
|
"volume": "118", |
|
"issue": "2", |
|
"pages": "172--189", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H Branigan, M Pickering, and M Tanaka. 2008. Con- tributions of animacy to grammatical function assign- ment and word order during production. Lingua, 118(2):172-189.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Predicting the Dative Alternation. Cognitive Foundations of Interpretation", |
|
"authors": [ |
|
{ |
|
"first": "Joan", |
|
"middle": [], |
|
"last": "Bresnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Cueni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tatiana", |
|
"middle": [], |
|
"last": "Nikitina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"Harald" |
|
], |
|
"last": "Baayen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "69--94", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joan Bresnan, Anna Cueni, Tatiana Nikitina, and R. Har- ald Baayen. 2007. Predicting the Dative Alternation. Cognitive Foundations of Interpretation, pages 69-94.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Incorporating information status into generation ranking", |
|
"authors": [ |
|
{ |
|
"first": "Aoife", |
|
"middle": [], |
|
"last": "Cahill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arndt", |
|
"middle": [], |
|
"last": "Riester", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of, ACL-IJCNLP '09", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "817--825", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aoife Cahill and Arndt Riester. 2009. Incorporating in- formation status into generation ranking. In Proceed- ings of, ACL-IJCNLP '09, pages 817-825, Morris- town, NJ, USA. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Designing features for parse disambiguation and realisation ranking", |
|
"authors": [ |
|
{ |
|
"first": "Aoife", |
|
"middle": [], |
|
"last": "Cahill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Forst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Rohrer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 12th International Lexical Functional Grammar Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "128--147", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aoife Cahill, Martin Forst, and Christian Rohrer. 2007. Designing features for parse disambiguation and real- isation ranking. In Miriam Butt and Tracy Holloway King, editors, Proceedings of the 12th International Lexical Functional Grammar Conference, pages 128- 147. CSLI Publications, Stanford.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The types and distributions of errors in a wide coverage surface realizer evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Callaway", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 10th European Workshop on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Charles Callaway. 2005. The types and distributions of errors in a wide coverage surface realizer evalua- tion. In Proceedings of the 10th European Workshop on Natural Language Generation.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Wide-Coverage Efficient Statistical Parsing with CCG and Log-Linear Models", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Curran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Computational Linguistics", |
|
"volume": "33", |
|
"issue": "4", |
|
"pages": "493--552", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Clark and James R. Curran. 2007. Wide- Coverage Efficient Statistical Parsing with CCG and Log-Linear Models. Computational Linguistics, 33(4):493-552.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Data from eyetracking corpora as evidence for theories of syntactic processing complexity", |
|
"authors": [ |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Demberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Keller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Cognition", |
|
"volume": "109", |
|
"issue": "2", |
|
"pages": "193--210", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vera Demberg and Frank Keller. 2008. Data from eye- tracking corpora as evidence for theories of syntactic processing complexity. Cognition, 109(2):193-210.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Generating constituent order in German clauses", |
|
"authors": [ |
|
{ |
|
"first": "Katja", |
|
"middle": [], |
|
"last": "Filippova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "ACL 2007, Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katja Filippova and Michael Strube. 2007. Generating constituent order in German clauses. In ACL 2007, Proceedings of the 45th Annual Meeting of the As- sociation for Computational Linguistics, June 23-30, 2007, Prague, Czech Republic. The Association for Computer Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Tree linearization in English: Improving language model based approaches", |
|
"authors": [ |
|
{ |
|
"first": "Katja", |
|
"middle": [], |
|
"last": "Filippova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Strube", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of Human Language Technologies: The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics, Companion Volume: Short Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "225--228", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katja Filippova and Michael Strube. 2009. Tree lin- earization in English: Improving language model based approaches. In Proceedings of Human Lan- guage Technologies: The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics, Companion Volume: Short Papers, pages 225-228, Boulder, Colorado, June. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Linguistic complexity: Locality of syntactic dependencies", |
|
"authors": [ |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Gibson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Cognition", |
|
"volume": "68", |
|
"issue": "", |
|
"pages": "1--76", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edward Gibson. 1998. Linguistic complexity: Locality of syntactic dependencies. Cognition, 68:1-76.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Dependency locality theory: A distance-based theory of linguistic complexity", |
|
"authors": [ |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Gibson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Image, Language, brain: Papers from the First Mind Articulation Project Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edward Gibson. 2000. Dependency locality theory: A distance-based theory of linguistic complexity. In Alec Marantz, Yasushi Miyashita, and Wayne O'Neil, editors, Image, Language, brain: Papers from the First Mind Articulation Project Symposium. MIT Press, Cambridge, MA.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Optimizing grammars for minimum dependency length", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Temperley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "184--191", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Gildea and David Temperley. 2007. Optimizing grammars for minimum dependency length. In Pro- ceedings of the 45th Annual Meeting of the Association of Computational Linguistics, pages 184-191, Prague, Czech Republic, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Dependency-based n-gram models for general purpose sentence realisation", |
|
"authors": [ |
|
{ |
|
"first": "Yuqing", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Van Genabith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proc. COLING-08", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuqing Guo, Josef van Genabith, and Haifeng Wang. 2008. Dependency-based n-gram models for general purpose sentence realisation. In Proc. COLING-08.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A Performance Theory of Order and Constituency", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Hawkins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John A. Hawkins. 1994. A Performance Theory of Order and Constituency. Cambridge University Press, New York.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "The relative order of prepositional phrases in English: Going beyond manner-place-time", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hawkins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Language Variation and Change", |
|
"volume": "11", |
|
"issue": "03", |
|
"pages": "231--266", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John A. Hawkins. 2000. The relative order of prepositional phrases in English: Going beyond manner-place-time. Language Variation and Change, 11(03):231-266.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Why are categories adjacent", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Hawkins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Journal of Linguistics", |
|
"volume": "37", |
|
"issue": "", |
|
"pages": "1--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John A. Hawkins. 2001. Why are categories adjacent? Journal of Linguistics, 37:1-34.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "CCGbank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank", |
|
"authors": [ |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hockenmaier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Computational Linguistics", |
|
"volume": "33", |
|
"issue": "3", |
|
"pages": "355--396", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julia Hockenmaier and Mark Steedman. 2007. CCG- bank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank. Com- putational Linguistics, 33(3):355-396.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Data and models for statistical parsing with Combinatory Categorial Grammar", |
|
"authors": [ |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hockenmaier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julia Hockenmaier. 2003. Data and models for statis- tical parsing with Combinatory Categorial Grammar. Ph.D. thesis, University of Edinburgh.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Exploiting multi-word units in history-based probabilistic generation", |
|
"authors": [ |
|
{ |
|
"first": "Deirdre", |
|
"middle": [], |
|
"last": "Hogan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Conor", |
|
"middle": [], |
|
"last": "Cafferkey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aoife", |
|
"middle": [], |
|
"last": "Cahill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Van Genabith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deirdre Hogan, Conor Cafferkey, Aoife Cahill, and Josef van Genabith. 2007. Exploiting multi-word units in history-based probabilistic generation. In Proc. EMNLP-CoNLL.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Generating natural word orders in a semi-free word order language: Treebank-based linearization preferences for German", |
|
"authors": [ |
|
{ |
|
"first": "Gerard", |
|
"middle": [], |
|
"last": "Kempen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karin", |
|
"middle": [], |
|
"last": "Harbusch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Lecture Notes in Computer Science", |
|
"volume": "2945", |
|
"issue": "", |
|
"pages": "350--354", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gerard Kempen and Karin Harbusch. 2004. Generat- ing natural word orders in a semi-free word order lan- guage: Treebank-based linearization preferences for German. In Alexander F. Gelbukh, editor, CICLing, volume 2945 of Lecture Notes in Computer Science, pages 350-354. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Statistical significance tests for machine translation evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of EMNLP 2004", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "388--395", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn. 2004. Statistical significance tests for machine translation evaluation. In Dekang Lin and Dekai Wu, editors, Proceedings of EMNLP 2004, pages 388-395, Barcelona, Spain, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "An empirical verification of coverage and correctness for a general-purpose sentence generator", |
|
"authors": [ |
|
{ |
|
"first": "Irene", |
|
"middle": [], |
|
"last": "Langkilde-Geary", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. INLG-02", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Irene Langkilde-Geary. 2002. An empirical verification of coverage and correctness for a general-purpose sen- tence generator. In Proc. INLG-02.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "An activation-based model of sentence processing as skilled memory retrieval", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Vasishth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Cognitive Science", |
|
"volume": "29", |
|
"issue": "", |
|
"pages": "1--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. L. Lewis and S. Vasishth. 2005. An activation-based model of sentence processing as skilled memory re- trieval. Cognitive Science, 29:1-45, May.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Computational principles of working memory in sentence comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shravan", |
|
"middle": [], |
|
"last": "Vasishth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Van Dyke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Trends in Cognitive Sciences", |
|
"volume": "10", |
|
"issue": "10", |
|
"pages": "447--454", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard L. Lewis, Shravan Vasishth, and Julie Van Dyke. 2006. Computational principles of working memory in sentence comprehension. Trends in Cognitive Sci- ences, 10(10):447-454.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Probabilistic methods for disambiguation of an HPSG-based chart generator", |
|
"authors": [ |
|
{ |
|
"first": "Hiroko", |
|
"middle": [], |
|
"last": "Nakanishi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yusuke", |
|
"middle": [], |
|
"last": "Miyao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. IWPT-05", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hiroko Nakanishi, Yusuke Miyao, and Jun'ichi Tsujii. 2005. Probabilistic methods for disambiguation of an HPSG-based chart generator. In Proc. IWPT-05.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "BLEU: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. ACL-02", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: a method for automatic eval- uation of machine translation. In Proc. ACL-02.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Designing agreement features for realization ranking", |
|
"authors": [ |
|
{ |
|
"first": "Rajakrishnan", |
|
"middle": [], |
|
"last": "Rajkumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Coling 2010: Posters", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1032--1040", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rajakrishnan Rajkumar and Michael White. 2010. De- signing agreement features for realization ranking. In Coling 2010: Posters, pages 1032-1040, Beijing, China, August. Coling 2010 Organizing Committee.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Exploiting named entity classes in CCG surface realization", |
|
"authors": [ |
|
{ |
|
"first": "Rajakrishnan", |
|
"middle": [], |
|
"last": "Rajkumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dominic", |
|
"middle": [], |
|
"last": "Espinosa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of Human Language Technologies: The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics, Companion Volume: Short Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "161--164", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rajakrishnan Rajkumar, Michael White, and Dominic Espinosa. 2009. Exploiting named entity classes in CCG surface realization. In Proceedings of Human Language Technologies: The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics, Companion Volume: Short Papers, pages 161-164, Boulder, Colorado, June. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Linguistically informed statistical models of constituent structure for ordering in sentence realization", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Ringger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Gamon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Moore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Rojas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martine", |
|
"middle": [], |
|
"last": "Smets", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Corston-Oliver", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. COLING-04", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Ringger, Michael Gamon, Robert C. Moore, David Rojas, Martine Smets, and Simon Corston-Oliver. 2004. Linguistically informed statistical models of constituent structure for ordering in sentence realiza- tion. In Proc. COLING-04.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Animacy and syntactic structure: Fronted NPs in English", |
|
"authors": [ |
|
{ |
|
"first": "Neal", |
|
"middle": [], |
|
"last": "Snider", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Annie", |
|
"middle": [], |
|
"last": "Zaenen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Intelligent Linguistic Architectures: Variations on Themes by Ronald M", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Neal Snider and Annie Zaenen. 2006. Animacy and syn- tactic structure: Fronted NPs in English. In M. Butt, M. Dalrymple, and T.H. King, editors, Intelligent Lin- guistic Architectures: Variations on Themes by Ronald M. Kaplan. CSLI Publications, Stanford.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "The Syntactic Process", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Steedman. 2000. The Syntactic Process. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Minimization of dependency length in written English", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Temperley", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Cognition", |
|
"volume": "105", |
|
"issue": "2", |
|
"pages": "300--333", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Temperley. 2007. Minimization of dependency length in written English. Cognition, 105(2):300 - 333.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "The Role of Processing Complexity in Word Order Variation and Change", |
|
"authors": [ |
|
{ |
|
"first": "Harry", |
|
"middle": [], |
|
"last": "Tily", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Harry Tily. 2010. The Role of Processing Complexity in Word Order Variation and Change. Ph.D. thesis, Stanford University.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Maximum entropy models for realization ranking", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Velldal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Oepen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. MT-Summit X", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik Velldal and Stefan Oepen. 2005. Maximum entropy models for realization ranking. In Proc. MT-Summit X.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Post-verbal Constituent Ordering in English", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wasow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "Arnold", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wasow and Jennifer Arnold. 2003. Post-verbal Constituent Ordering in English. Mouton.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Postverbal Behavior. CSLI Publications", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Wasow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Wasow. 2002. Postverbal Behavior. CSLI Publica- tions, Stanford.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Perceptron reranking for CCG realization", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajakrishnan", |
|
"middle": [], |
|
"last": "Rajkumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "410--419", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael White and Rajakrishnan Rajkumar. 2009. Per- ceptron reranking for CCG realization. In Proceedings of the 2009 Conference on Empirical Methods in Nat- ural Language Processing, pages 410-419, Singapore, August. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Efficient Realization of Coordinate Structures in Combinatory Categorial Grammar", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Research on Language & Computation", |
|
"volume": "4", |
|
"issue": "1", |
|
"pages": "39--75", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael White. 2006. Efficient Realization of Coordi- nate Structures in Combinatory Categorial Grammar. Research on Language & Computation, 4(1):39-75.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Long before short\" preference in the production of a headfinal language", |
|
"authors": [ |
|
{ |
|
"first": "Hiroko", |
|
"middle": [], |
|
"last": "Yamashita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franklin", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Cognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hiroko Yamashita and Franklin Chang. 2001. \"Long before short\" preference in the production of a head- final language. Cognition, 81.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"type_str": "table", |
|
"text": "normal form model wsj 0034.9 they fell into oblivion after the 1929 crash . they fell after the 1929 crash into oblivion . wsj 0013.16 separately , the Federal Energy Regulatory Commission [ V P turned down for now [ N P a request by Northeast [ V P seeking approval of [ N P its possible purchase of PS of New Hampshire]]]] . FULL separately , the Federal Energy Regulatory Commission [ V P turned down [ N P a request by Northeast [ V P seeking approval of [ N P its possible purchase of PS of New Hampshire]]] for now] .", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"text": "Examples of OpenCCG output with", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"text": "Counter-examples to dependency length minimization comprehension", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"text": "Word s/s/np + before LexCat + POS s/s/np + IN Rule s dcl \u2192 np s dcl \\np Rule + Word s dcl \u2192 np s dcl \\np + bought Rule + POS", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Feature Type</td><td>Example</td></tr><tr><td>LexCat +</td><td/></tr></table>" |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF6": { |
|
"type_str": "table", |
|
"text": "realization ranking model for German. Except where otherwise indicated, features are integer-valued, representing counts of occurrences in a derivation. The total of the length between all semantic heads and dependents for a realization, where length is in intervening words 2 excluding punctuation. For length purposes, collapsed named entities were counted as a single word in the experiments reported here.", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Feature Type</td><td>Example</td></tr><tr><td>HeadBroadPos + Rel + Precedes + HeadWord + DepWord</td><td>VB, Arg0, dep, wants, he</td></tr><tr><td>. . . + HeadWord + DepPOS</td><td>VB, Arg0, dep, wants, PRP</td></tr><tr><td>. . . + HeadPOS + DepWord</td><td>VB, Arg0, dep, VBZ, he</td></tr><tr><td>. . . + HeadWord + DepPOS</td><td>VB, Arg0, dep, VBZ, PRP</td></tr><tr><td>HeadBroadPos + Side + DepWord1 + DepWord2</td><td>NN, left, an, important</td></tr><tr><td>. . . + DepWord1 + DepPOS2</td><td>NN, left, an, JJ</td></tr><tr><td>. . . + DepPOS1 + DepWord2</td><td>NN, left, DT, important</td></tr><tr><td>. . . + DepPOS1 + DepPOS2</td><td>NN, left, DT, JJ</td></tr><tr><td>. . . + Rel1 + Rel2</td><td>NN, left, Det, Mod</td></tr><tr><td colspan=\"2\">NGRAMS The log probabilities of the word se-</td></tr><tr><td colspan=\"2\">quence scored using three different n-gram</td></tr><tr><td colspan=\"2\">models: a trigram word model, a trigram</td></tr><tr><td colspan=\"2\">word model with named entity classes replac-</td></tr><tr><td colspan=\"2\">ing words, and a trigram model over POS tags</td></tr><tr><td colspan=\"2\">and supertags.</td></tr><tr><td colspan=\"2\">HOCKENMAIER As an extra component of the</td></tr><tr><td colspan=\"2\">generative baseline, the log probability of the</td></tr><tr><td colspan=\"2\">derivation according to (a reimplementation</td></tr></table>" |
|
}, |
|
"TABREF7": { |
|
"type_str": "table", |
|
"text": "Basic head-dependent and sibling dependent ordering features", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>of) Hockenmaier's (2003) generative syntactic</td></tr><tr><td>model.</td></tr></table>" |
|
}, |
|
"TABREF8": { |
|
"type_str": "table", |
|
"text": "There are also similar features using words and a word class (instead of words and POS tags), where the class is either the named entity class, COLOR for color words, PRO for pronouns, one of 60-odd suffixes culled from the web, or HYPHEN or CAP for hyphenated or capitalized words. Additionally, there are features for detecting definiteness of an NP or PP (where the definiteness value is used in place of the POS tag).", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"2\"># Alph Feats # Model Feats</td></tr><tr><td>GLOBAL</td><td>4</td><td>4</td></tr><tr><td>DEPLEN-GLOBAL</td><td>5</td><td>5</td></tr><tr><td>DEPORD-NONF</td><td>790,887</td><td>269,249</td></tr><tr><td>DEPORD-NODIST</td><td>1,035,915</td><td>365,287</td></tr><tr><td>DEPLEN-NODIST</td><td>1,035,916</td><td>366,094</td></tr><tr><td>DEPORD-NF</td><td>1,173,815</td><td>431,226</td></tr><tr><td>DEPLEN</td><td>1,173,816</td><td>428,775</td></tr></table>" |
|
}, |
|
"TABREF9": { |
|
"type_str": "table", |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>4 Evaluation</td></tr><tr><td>4.1 Experimental Conditions</td></tr></table>" |
|
}, |
|
"TABREF11": { |
|
"type_str": "table", |
|
"text": "Legend for experimental conditions els used in the earlier work along with the Hockenmaier model (and the dependency length feature,", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF13": { |
|
"type_str": "table", |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>: Dependency length compared to corpus-</td></tr><tr><td>percentage of realizations with dependency length less</td></tr><tr><td>than and greater than gold standard, along with mean</td></tr><tr><td>dependency length, whose significance is tested against</td></tr><tr><td>gold; 1671 development set (Section 00) complete real-</td></tr><tr><td>izations analyzed</td></tr></table>" |
|
}, |
|
"TABREF14": { |
|
"type_str": "table", |
|
"text": "The table shows the mean of the total dependency length of each realized derivation com-3 Kudos to Kevin Gimpel for making his resampling scripts available from http://www.ark.cs.cmu.edu/ MT/paired_bootstrap_v13a.tar.gz.", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"4\">% Short % Long % Eq % Single</td></tr><tr><td/><td>/ Long</td><td>/ Short</td><td/><td>Constit</td></tr><tr><td>GOLD</td><td>25.25</td><td>4.87</td><td>4.08</td><td>65.79</td></tr><tr><td>GLOBAL</td><td>23.15</td><td>7.86</td><td>3.94</td><td>65.04</td></tr><tr><td>DEPLEN-GLOBAL</td><td>24.58</td><td>5.57</td><td>4.09</td><td>65.76</td></tr><tr><td>DEPORD-NONF</td><td>23.13</td><td>6.61</td><td>4.03</td><td>66.23</td></tr><tr><td>DEPORD-NODIST</td><td>23.38</td><td>6.52</td><td>3.94</td><td>66.15</td></tr><tr><td>DEPLEN-NODIST</td><td>24.03</td><td>5.38</td><td>4.01</td><td>66.58</td></tr><tr><td>DEPORD-NF</td><td>23.74</td><td>5.92</td><td>3.96</td><td>66.40</td></tr><tr><td>DEPLEN</td><td>24.36</td><td>5.36</td><td>4.07</td><td>66.21</td></tr></table>" |
|
}, |
|
"TABREF16": { |
|
"type_str": "table", |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>: Distribution of heavy unequal constituents</td></tr><tr><td>(length difference > 5) in Section 00; 4692 gold cases</td></tr><tr><td>considered and significance tested against the gold stan-</td></tr><tr><td>dard using a \u03c7-square test</td></tr><tr><td>ported by previous corpus studies of English (Tem</td></tr></table>" |
|
}, |
|
"TABREF17": { |
|
"type_str": "table", |
|
"text": "DEPLENthe U.S. claiming some success in its trade diplomacy , removed South Korea , Taiwan and Saudi Arabia from a list of countries it is watching closely for allegedly failing to honor U.S. patents , copyrights and other intellectual-property rights . the U.S. removed from a list of countries it is watching closely for allegedly failing to honor U.S. patents , copyrights and other intellectual-property rights , claiming some success in its trade diplomacy , South Korea , Taiwan and Saudi Arabia . wsj 0021.8 but he has not said before that the country wants half the debt forgiven .but he not has said before \u2205 the country wants half the debt forgiven .but he not has said \u2205 the country wants half the debt forgiven before . Rexinger , retired Circuit City executive vice president , and Robert R. Glauber , U.S. Treasury undersecretary , on the 12-member board .on the 12-member board they succeed Daniel M. Rexinger , retired Circuit City executive vice president , and Robert R. Glauber , U.S. Treasury undersecretary .", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"2\">wsj 0075.13 The Treasury also said it plans to sell [$ 10 billion] [in 36-day cash management bills] [on Thurs-</td></tr><tr><td/><td>day].</td></tr><tr><td>DEPLEN</td><td>[same]</td></tr><tr><td>DEPORD</td><td>[same]</td></tr><tr><td>wsj 0014.2</td><td>they succeed Daniel M.</td></tr></table>" |
|
}, |
|
"TABREF18": { |
|
"type_str": "table", |
|
"text": "Examples of realized output for full models with and without the dependency length feature", |
|
"num": null, |
|
"html": null, |
|
"content": "<table/>" |
|
}, |
|
"TABREF19": { |
|
"type_str": "table", |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"3\">% Preferred % Agr Signif</td></tr><tr><td>GLOBAL</td><td>22</td><td>-</td><td>-</td></tr><tr><td>DEPLEN-GLOBAL</td><td>78</td><td>84</td><td>***</td></tr><tr><td>DEPORD-NODIST</td><td>24</td><td>-</td><td>-</td></tr><tr><td>DEPLEN-NODIST</td><td>76</td><td>92</td><td>***</td></tr><tr><td>DEPORD-NF</td><td>26</td><td>-</td><td>-</td></tr><tr><td>DEPLEN</td><td>74</td><td>96</td><td>***</td></tr><tr><td>shows the</td><td/><td/><td/></tr><tr><td>results. Agreement between the judges was high,</td><td/><td/><td/></tr></table>" |
|
}, |
|
"TABREF20": { |
|
"type_str": "table", |
|
"text": "", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td>: Targeted human evaluation-percentage of re-</td></tr><tr><td>alizations preferred by two human judges in a 2AFC test</td></tr><tr><td>among the 25 development set sentences with the great-</td></tr><tr><td>est differences in dependency length, with a binomial test</td></tr><tr><td>for significance</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |