|
{ |
|
"paper_id": "U05-1029", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T03:08:22.317087Z" |
|
}, |
|
"title": "A Distributed Architecture for Interactive Parse Annotation", |
|
"authors": [ |
|
{ |
|
"first": "Baden", |
|
"middle": [], |
|
"last": "Hughes", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "The University of Melbourne Victoria", |
|
"location": { |
|
"postCode": "3010", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Haggerty", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Sydney", |
|
"location": { |
|
"postCode": "2006", |
|
"region": "NSW", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Saritha", |
|
"middle": [], |
|
"last": "Manickam", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Sydney", |
|
"location": { |
|
"postCode": "2006", |
|
"region": "NSW", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Nothman", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Sydney", |
|
"location": { |
|
"postCode": "2006", |
|
"region": "NSW", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Curran", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Sydney", |
|
"location": { |
|
"postCode": "2006", |
|
"region": "NSW", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In this paper we describe a modular system architecture for distributed parse annotation using interactive correction. This involves interactively adding constraints to an existing parse until the returned parse is correct. Using a mixed initiative approach, human annotators interact live with distributed ccg parser servers through an annotation gui. The examples presented to each annotator are selected by an active learning framework to maximise the value of the annotated corpus for machine learners. We report on an initial implementation based on a distributed workflow architecture.", |
|
"pdf_parse": { |
|
"paper_id": "U05-1029", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In this paper we describe a modular system architecture for distributed parse annotation using interactive correction. This involves interactively adding constraints to an existing parse until the returned parse is correct. Using a mixed initiative approach, human annotators interact live with distributed ccg parser servers through an annotation gui. The examples presented to each annotator are selected by an active learning framework to maximise the value of the annotated corpus for machine learners. We report on an initial implementation based on a distributed workflow architecture.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Annotating sentences with parse trees is perhaps the most complex and intensive linguistic annotation. The time and expense of developing parsed corpora is almost prohibitive. As a result there are only a small number of such corpora, including the Penn Treebank (Marcus et al., 1994) , the German TiGer Corpus (Skut et al., 1997) and more recently the LinGO Redwoods Treebank (Oepen et al., 2002) . These corpora are also limited in size, typically around one million words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 263, |
|
"end": 284, |
|
"text": "(Marcus et al., 1994)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 330, |
|
"text": "(Skut et al., 1997)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 377, |
|
"end": 397, |
|
"text": "(Oepen et al., 2002)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Unfortunately, the statistical approaches to parsing which have been most successful rely heavily on both the quality and quantity of annotated resources. Also, these approaches are very sensitive to the statistical properties of the corpus, and so a parser trained on one genre may perform badly on another (Gildea, 2001) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 308, |
|
"end": 322, |
|
"text": "(Gildea, 2001)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Another major problem with parsed corpora is that they must, at least to some extent, follow a particular syntactic theory or formalism. This is a major difficulty for two reasons: firstly, it means we need separate annotated corpora for each formalism; and secondly, it means that comparing parser evaluations across formalisms is difficult.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Fully automated conversion of parse trees between formalisms is difficult because each analyses certain constructs in idiosyncratic ways. An example is CCGbank (Hockenmaier and Steedman, 2002) , a treebank of Combinatory Categorial Grammar (Steedman, 2000) derivations which were converted semi-automatically from the Penn Treebank trees. The result still required laborious editing to produce idiomatic ccg derivations (Hockenmaier, 2003) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 192, |
|
"text": "(Hockenmaier and Steedman, 2002)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 256, |
|
"text": "(Steedman, 2000)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 420, |
|
"end": 439, |
|
"text": "(Hockenmaier, 2003)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We intend to create a new corpus of ccg derivations on a wide range of text. We face three key problems: 1) selecting sentence to annotate which creates the most useful corpus for statistical parsers. 2) maximising the annotator efficiency and minimising error; 3) allowing distributed annotators to share expertise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The selection problem is addressed using active learning (al). Active learning involves computing which training instances provide the most new information to one (or more) machine learners (Cohn et al., 1995; Dagan and Engelson, 1995) . The annotators become oracles answering specific queries posed by the learners.", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 209, |
|
"text": "(Cohn et al., 1995;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 235, |
|
"text": "Dagan and Engelson, 1995)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The annotation problem is addressed by interactive correction of the output of our statistical ccg parser. This is similar to the discriminant strategy employed for Redwoods annotation (Oepen et al., 2002) but generalises to grammars where parse enumeration is infeasible. Annotators interactively add constraints to the parser which returns the most probable parse satisfying the constraints.", |
|
"cite_spans": [ |
|
{ |
|
"start": 185, |
|
"end": 205, |
|
"text": "(Oepen et al., 2002)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The distributed expertise problem is addressed using a workflow manager. Annotators will be able to add comments and queries to derivations and have them sent to (potentially remote) experienced annotators for verification. The workflow manager will also handle scheduling for the active learning infrastructure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper describes the architecture and initial implementation of a system which addresses these problems for distributed parse annotation. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Since annotating parse trees is a significant bottleneck in nlp there have been several attempts to make the process more efficient. In this work we exploit two approaches: using choice points to select the correct parse and using active learning to select sentences to parse. A discriminant is a property that distinguishes between a set of interpretations. They can be designed for linguistic non-experts (Carter, 1997) . In the Redwoods project, the annotator is presented with discriminants on the trees themselves which eventually lead to the correct hpsg parse (Oepen et al., 2002) . These discriminants are calculated from the enumerated set of all parses. Unfortunately, our automatically extracted ccg grammar produces far too many derivations (billions) for enumeration to be feasible. Baldridge and Osborne (2004) demonstrate how active learning (al) can be used to significantly reduce the annotation cost for annotating text with hpsg parses. They compare random selection with approaches based on uncertainty sampling (Cohn et al., 1995) and committee based sampling (Dagan and Engelson, 1995) and demonstrate a reduction in annotation effort of 72%. A key point that Baldridge and Osborne identify is that each sentence cannot be treated as equally difficult to annotate. Tang et al. (2002) also evaluate al on statistical parsing and find the total cost of annotation can be reduced to one third. Finally, Becker et al. (2005) compares bootstrapping techniques including al for developing new named entity corpora.", |
|
"cite_spans": [ |
|
{ |
|
"start": 407, |
|
"end": 421, |
|
"text": "(Carter, 1997)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 567, |
|
"end": 587, |
|
"text": "(Oepen et al., 2002)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 796, |
|
"end": 824, |
|
"text": "Baldridge and Osborne (2004)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1032, |
|
"end": 1051, |
|
"text": "(Cohn et al., 1995)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 1081, |
|
"end": 1107, |
|
"text": "(Dagan and Engelson, 1995)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1287, |
|
"end": 1305, |
|
"text": "Tang et al. (2002)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1422, |
|
"end": 1442, |
|
"text": "Becker et al. (2005)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Formally introduced in Day et al. (1997) , mixed initiative annotation (where the division of labour between computational facilities and human effort is coordinated for increased efficiency) has become an increasingly common methodology for the preparation of large corpora. Typically however, mixed initiative approaches have largely decoupled human and machine effort, even for larger scale tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 40, |
|
"text": "Day et al. (1997)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Extending the mixed initiative model specifically to a distributed environment, Hughes and Bird (2003) offer a model for the type of solution we implement here. Additionally, the ar-chitecture advocated by Curran (2003) allows us flexibility in designing individual components of this system independently, and then marshalling them into a single application instance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 80, |
|
"end": 102, |
|
"text": "Hughes and Bird (2003)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 219, |
|
"text": "Curran (2003)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Experiments with distributed NLP tasks of building n-gram language models (Hughes et al., 2004a) and generalised textual indexing and linguistically motivated retrieval (Hughes et al., 2004b) are broadly indicative of other work in this area. To date, however we are not aware of any work in this vein specifically involving mixed initiative annotation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 96, |
|
"text": "(Hughes et al., 2004a)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 191, |
|
"text": "(Hughes et al., 2004b)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Combinatory Categorial Grammar (Steedman, 2000) is a type-driven lexicalized theory of grammar based on categorial grammar. Almost all of the grammatical information in ccg is represented in the categories assigned to each word, which are either simple atomic categories (e.g. NP ) or complex functor categories (e.g. (S [dcl]\\NP )/NP a transitive declarative verb). An example sentence with lexical categories is shown in Figure 1 . These categories are combined together according to a small number of combinatory rules.", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 47, |
|
"text": "(Steedman, 2000)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 423, |
|
"end": 431, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Combinatory Categorial Grammar", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The set of these lexical categories is obtained from CCGbank (Hockenmaier and Steedman, 2002; Hockenmaier, 2003) , a corpus of ccg normal-form derivations derived semiautomatically from the Penn Treebank. The category set consists of those category types which occur at least 10 times in sections 2-21 of CCGbank, which results in a set of 409 categories. Clark and Curran (2004a) demonstrates that this relatively small set has high coverage on unseen data and can be used to create a robust and accurate parser. In order to obtain semantic representations for a particular formalism, only 409 categories have to be annotated.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 93, |
|
"text": "(Hockenmaier and Steedman, 2002;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 94, |
|
"end": 112, |
|
"text": "Hockenmaier, 2003)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 380, |
|
"text": "Clark and Curran (2004a)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Combinatory Categorial Grammar", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In our system we are using the c&c ccg parser (Clark and Curran, 2004b) , which uses a loglinear model over normal-form derivations to select an analysis. The parser takes a pos tagged sentence as input with a set of one ore more categories assigned to each word. A ccg supertagger (Clark and Curran, 2004a) assigns the lexi-cal categories, using a log-linear model to identify the most probable categories. Clark and Curran (2004a) show how dynamic use of the supertagger -starting off with a small number of categories assigned to each word and gradually increasing the number until an analysis is found -can lead to a highly efficient and robust parser.", |
|
"cite_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 71, |
|
"text": "(Clark and Curran, 2004b)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 282, |
|
"end": 307, |
|
"text": "(Clark and Curran, 2004a)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 408, |
|
"end": 432, |
|
"text": "Clark and Curran (2004a)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CCG Parsing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The parser uses the cky chart-parsing algorithm from Steedman (2000) . The combinatory rules used by the parser are functional application (forward and backward), generalised forward composition, backward composition, generalised backward-crossed composition, and type raising. There is also a coordination rule which conjoins categories of the same type.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 68, |
|
"text": "Steedman (2000)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CCG Parsing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Lexicalised grammar formalisms, such as ltag and ccg, assign one or more syntactic structures to each word which are then manipulated by the parser. Supertagging was introduced for ltag to increase parsing efficiency by reducing the number of structures assigned to each word (Bangalore and Joshi, 1999 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 276, |
|
"end": 302, |
|
"text": "(Bangalore and Joshi, 1999", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CCG Supertagging", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The parser model parameters are estimated using a discriminative method, that is, one which requires statistics across all incorrect parses for a sentence as well as the correct parse. Since an automatically extracted ccg grammar can produce an extremely large number of parses, the use of a supertagger is crucial in limiting the total number of parses for the training data to a computationally manageable number.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CCG Supertagging", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The supertagger is also crucial for increasing the speed of the parser. We have shown that spectacular increases in speed can be obtained, without affecting accuracy or coverage, by tightly integrating the supertagger with the ccg grammar and parser (Clark and Curran, 2004a) . To achieve maximum speed, the supertagger initially assigns only a small number of ccg categories to each word, and the parser only requests more categories from the supertagger if it cannot provide an analysis. Clark et al. (2004) has demonstrated that annotating new data at just the lexical category level can be enough to significantly improve the performance of a parser on a new domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 250, |
|
"end": 275, |
|
"text": "(Clark and Curran, 2004a)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 509, |
|
"text": "Clark et al. (2004)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CCG Supertagging", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "For a given sentence, the automatically extracted grammar can produce a very large number of derivations. Clark and Curran (2004b) describes how a packed chart can be used to efficiently represent the derivation space, and also efficient algorithms for finding the most probable derivation. Unfortunately, this massive derivation space means it is not possible to enumerate all parses, so the discriminant strategy for interactive annotation outlined previous is infeasible.", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 130, |
|
"text": "Clark and Curran (2004b)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interactive Correction", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We therefore introduce the idea of interactive correction where the parser is given a number of constraints by the annotator. Rather than enumerate the parse, the process only involves finding the most probable parse that satisfies the expressed constraints. This can be performed efficiently as part of the dynamic programming algorithm which finds the highest probability derivation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interactive Correction", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Given that the ccg categories contain so much information we expect that it will only require annotators to constrain the lexical categories on a few words to reach a correct parse.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interactive Correction", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The annotation process begins by the annotator requesting a sentence to annotate. The active learning component determines which sentence from a large corpus of raw sentences may provide the most new information if it were to be annotated. al can be very computationally intensive process and so will only occur after a given number of sentences have been annotated. The al component will return a queue of sentences that then scheduled to be annotated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Use Case", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The annotator will receive the top sentence on the queue along with the most probable derivation for that sentence. They can add the following constraints to a given sentence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Example Use Case", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "force a specific lexical category ban a current lexical category ban a current non-leaf category force a specific chart span ban a current chart span Adding one (or more) new constraints will cause the derivation to be returned which satisfies the existing and new constraints (if such a derivation exists). This process continues until the correct derivation is reached and the parse is checked in as correct. Once enough new annotated sentences have been completed the al component regenerates a new queue of sentences based on the retrained statistical parser model. An alternative case is that the annotator is not sure about the correct derivation for the sentence. They can then annotate the derivation Figure 2 : System Architecture with a comment/question and it will be scheduled on the queue for other more experienced annotators. The experienced annotator will see the constraints and the comments added by the original annotator. They can make a decision or propagate it to some other annotator. Once a decision has been reached the information is returned to all annotators. This process is handled by the workflow manager.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 709, |
|
"end": 717, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Example Use Case", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The system architecture for distributed annotation and parsing with active learning can be seen in Figure 1 . The Visualization and Analysis module provides the end user interface by which a human annotator can review and revise the parser output. The actual content rendered by this module is provided by the Workflow Management module.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 107, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System Architecture", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The Workflow Management module has three main roles: first to interact with the Visualization and Analysis, providing parses to be visualised and refined; second to manage the user and tasks in the process of analysis; and third to interact with the Computational Management module by instantiating the active learning framework for incremental parsing of the corpus data, and subsequent grid execution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Architecture", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The Computational Management module has two sub-modules. The Active Learning submodule allows for incremental application of refined parses as training data for subsequent iterations of the parser. The Grid sub-module handles low level execution including the queuing, dispatch and execution of analysis tasks, and fetching the results from the distributed computation environment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Architecture", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Having described the high level architecture of the system, we now turn to an in depth discussion of each of the components in turn.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Architecture", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The visualisation gui is implemented in wx-Python (Dunn, 2005) , an extension of the crossplatform gui toolkit wxWidgets (Smart et al., 2005) for Python. wxWidgets is particularly notable for its use of native graphical components for a given operating system platform, allowing the interface a native look and feel when run on Windows, Mac or Linux environments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 62, |
|
"text": "(Dunn, 2005)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 141, |
|
"text": "(Smart et al., 2005)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualization and Analysis Module", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Both the gui and the export to file functionality are built on a flexible cross-platform code base. As can be seen in Figure 3 an initial implementation already succeeds in displaying the ccg parser output in a user-friendly form similar to that used by (Steedman, 2000) and widely adopted as a standard format.", |
|
"cite_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 270, |
|
"text": "(Steedman, 2000)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 126, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Visualization and Analysis Module", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "To facilitate the re-use of the rendered parse tree, our gui uses the ReportLab Toolkit (Rep, 2005) to facilitate export to common vector graphics formats such as PDF, PostScript, PNG and SVG. These are useful for inclusion in publications and presentations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 99, |
|
"text": "(Rep, 2005)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualization and Analysis Module", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "By using basically the same rendering code, consistent output is provided to the gui and the export formats, including plain-text. Each provides a Canvas object whose role is simply to provide metric information for displaying text on that output device, and then to place the text at given locations. All calculations for positioning text nodes are done external to the canvas, so new output formats can easily be supported. Similarly, if a user selects a change in font-styling, this is reflected in all graphical export formats and on-screen.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualization and Analysis Module", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The graphical interface provides some interactivity in order to assist a user in viewing, manipulating and annotating a parsed sentence Most importantly, an annotator can change or constrain the available categories for sentence Figure 3 : Screenshot from gui constituents. As soon as these constraints are added, they are passed back in real-time to the Workflow Management module, and the gui is updated to reflect the results of those changes. Thus, in almost all cases, correct parses can be generated without the need for the user to laboriously construct an entire derivation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 237, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Visualization and Analysis Module", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "To simplify the viewing of large parse trees, a user has the facility to 'collapse' chosen sections of the tree with a simple point-and-click operation. Hiding most collapsed words from view and only showing the derived category can significantly reduce the horizontal and vertical space occupied by the parse image. This feature is particularly useful when an annotator needs to focus on a particular section of a given parse: once a certain partial derivation is checked, it may be collapsed and will remain fixed there through other parse modifications.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualization and Analysis Module", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Collectively these features should make the job of annotators far less painful. The graphical rendering of the ccg parser's output makes the incorrect grouping of words obvious, and by using the parser in collaboration with the user as described above, annotating a sentence correctly will usually be a matter of seconds rather than minutes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualization and Analysis Module", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The basic workflow of the system is as follows. Sentences are parsed from a corpus by the ccg parser on the grid, results being added to review stack. A user logs in and starts a session, requesting the next review parse. The user reviews parse, and either confirms parse, or modifies and submits a revised parse or promotes the candidate parse to new reviewer. Accepted parses are sent back to the active learner for subsequent retraining of the ccg parser.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Workflow Management Module", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "As mentioned earlier, the Workflow Management module has two interfaces: one to the Visualization and Analysis module, and the other to the Execution module; as well as an internal user and task management function. We will first discuss the latter, before returning to the interfaces themselves. It is however, important to note that the workflow here is analytical, as distinct from computational.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Workflow Management Module", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The user registry allows for the tracking of user (i.e. annotator) names, together with corresponding passwords and user level attributes. In addition, a log is kept of the activity of each user, in particular annotation times, which are useful for monitoring the effectiveness of the interactive correction approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "User Management Sub-module", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "The task queue sub-module is basically a parse review queue contains the list of sentences pending review together with an user allocation, and a parse/sentence status (pending, reviewed).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Queue Sub-module", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "Analysis To facilitate communication the Simple Object Access Protocol (soap) (Gudgin et al., 2003) has been used to implement a lightweight interface. The soap implementation supports 5 basic functions:", |
|
"cite_spans": [ |
|
{ |
|
"start": 78, |
|
"end": 99, |
|
"text": "(Gudgin et al., 2003)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interface to Visualization and", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "\u2022 authenticate user implements a lightweight user authentication protocol based on a username and password;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interface to Visualization and", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "\u2022 submit accepted parse is used when the current parse and constraints are acceptable, and is parameterised by the sentence ID, a parse ID, and the set of constraints with any associated commentary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interface to Visualization and", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "\u2022 submit uncertain parse is used when the current parse is not fully understood by the user, and a second opinion is required. It is is parameterised by the sentence ID, a parse ID, and the set of constraints with any associated commentary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interface to Visualization and", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "\u2022 get next parse is the stack retrieval method, used to retrieve the next parsed sentence in the individual user queue. It returns a the next sentence ID, sentence, parse ID, and set of constraints and associated commentary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interface to Visualization and", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "\u2022 get modified parse allows the user to get a subsequent sentence and parse matching a revised set of constraints. It takes a parse ID and a set of constraints.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interface to Visualization and", |
|
"sec_num": "7.3" |
|
}, |
|
{ |
|
"text": "Management Again to facilitate communication the Simple Object Access Protocol (soap) (Gudgin et al., 2003) has been used to implement a lightweight interface between the Workflow Management module and the Computational Management module.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 107, |
|
"text": "(Gudgin et al., 2003)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interface to Computational", |
|
"sec_num": "7.4" |
|
}, |
|
{ |
|
"text": "The soap implementation supports 2 basic functions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interface to Computational", |
|
"sec_num": "7.4" |
|
}, |
|
{ |
|
"text": "submit sentence for parsing and get next sentence for review.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interface to Computational", |
|
"sec_num": "7.4" |
|
}, |
|
{ |
|
"text": "\u2022 submit sentence for parsing is used for transferring the sentences from the reviewed sentences queue to the active learning framework; and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interface to Computational", |
|
"sec_num": "7.4" |
|
}, |
|
{ |
|
"text": "\u2022 get next sentence for review is used for transferring the parsed sentences from the active learning framework to the Workflow Management module", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interface to Computational", |
|
"sec_num": "7.4" |
|
}, |
|
{ |
|
"text": "As mentioned earlier, the Computational Management Module consists of two further submodules, one for active learning and the other for computational grid interaction management.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Computational Management Module", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Our implementation of active learning involves a variety of differently parameterised instances of the ccg parser, with the view that an evaluation of each model will identify the best parse and constraints for a given sentence. This module is instantiated based on some threshold -either time based (e.g. once every 24 hours) or queue based (e.g. when there are 100 modified parses). This asynchronous server side component allows discontinuity between the user-centric review process and the computational impact of large scale re-parsing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning Sub-Module", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "Our active learner uses committee-based sampling (Dagan and Engelson, 1995) using differing supertagging and parsing statistical models as committee members. Where there are many annotation options, the most popular alternatives from the committee will be passed to annotators to help select the correct annotation more efficiently. This will minimise the cognitive load of selecting between too many alternatives.", |
|
"cite_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 75, |
|
"text": "(Dagan and Engelson, 1995)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning Sub-Module", |
|
"sec_num": "8.1" |
|
}, |
|
{ |
|
"text": "The purpose of the Grid component is to manage all aspects of interaction with the distributed computational environment in which the parser itself is running. The Grid submodule handles low level execution including the queuing, dispatch and execution of analysis tasks, and fetching the results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Grid Sub-Module", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "The experimental environment is setup with compute infrastructure in Sydney and in Melbourne. At the Sydney node, the system environment is a cluster of 9 dual-cpu Linux machines running MPI middleware. At the Melbourne node the system environment is a cluster of machines running Linux, managed by the NorduGrid Advanced Resource Connector (ARC). On each node, the ccg parser is installed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Grid Sub-Module", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "The ccg toolkit is installed on the respective clusters and simply instantiated by the active learning framework as threshold boundaries are reached.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Grid Sub-Module", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "The Grid sub module selects the relevant compute node for execution of the current parse task. (In the simplest case, perhaps a round robin approach to selecting the compute facility for subsequent re-parsing runs would appear to be sufficient, although for more intensive human annotation sessions, batch mode parser execution with probe-based load measurement is probably desirable for a scalable and robust implementation). A job description is then created specific to the node requirements. The job is then passed to the head node of the cluster.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Grid Sub-Module", |
|
"sec_num": "8.2" |
|
}, |
|
{ |
|
"text": "The previous sections describe an architecture for distributed, computationally intensive, mixed initiative linguistic analysis. We believe this contribution is notable for a number of reasons including:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "\u2022 a completely modular systems architecture, in contrast with tightly bound end-to-end systems which typically dominate this application space;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "\u2022 coordinated yet distinctly decoupled computational and human effort, allowing both parties to contribute to the overall effort with maximum efficiency;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "\u2022 re-usable, open sourced components which are sufficiently flexible to allow other interested parties to build from an established base, rather than the ground up", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "\u2022 an instantiation of service oriented nlp via open standards", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "We are motivated to modularise the overall system as much as possible to allow maximum flexibility for future extensions. In particular, our selection of the ccg parser is relatively arbitrary; any parser should be able to be swapped in for the ccg parser (e.g. an hpsg parser) with the only overhead being support for parallelisation and an api which can be functionally mapped to our soap based interface. Correspondingly, we envisage that the gui component should be generalised sufficiently to allow for the rendering of a variety of different parse tree representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "9" |
|
}, |
|
{ |
|
"text": "At the time of writing the status of the components required is as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Status", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "\u2022 Visualization and Analysis module A prototype gui has been implemented which can render ccg derivations in several formats. Lexical categories can now be modified in the gui and the parse regenerated directly with the new constraints. This does not currently use soap for getting next parse.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Status", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "\u2022 Workflow Management module The 7 soap methods (5 for Visualization and Analysis module interface, and 2 for Computational Management module interface) are implemented as a cgi application in Python. Basic user and task management implemented.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Status", |
|
"sec_num": "10" |
|
}, |
|
{ |
|
"text": "The parameterisation and brokering framework for grid execution is deployed in production.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Computational Management module", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Production grids are operational (building over existing infrastructure) at both Melbourne and Sydney sites. The ccg parser on these systems; in the Melbourne case, the active learning framework can be instatiated by a web services / SOAP based interface to NorduGrid's native job brokering system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Computational Management module", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have proposed an architecture for performing distributed annotation of ccg derivations. This architecture attempts to solve three key problems in the efficient preparation of large scale NLP resources: 1) selecting sentence to annotate which creates the most useful corpus for statistical parsers. 2) maximising the annotator efficiency and minimising error; 3) allowing distributed annotators to share expertise.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "11" |
|
}, |
|
{ |
|
"text": "We have attempted to address these problems using a combination of machine learning techniques and grid computing infrastructure. In particular, Active Learning will identify the best sentences to annotate; interactive correction will make the most of our annotators time; and our workflow manager will allow (even remote) annotators to share their expertise more effectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "11" |
|
}, |
|
{ |
|
"text": "While our implementation is relatively immature at this point, we believe the architecture proposed in this paper, along with the specific components, will be able to be reused in multiple contexts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "11" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank the anonymous reviewers for their helpful feedback, and to David Vadas and Toby Hawker for testing the ccg gui. This work has been supported by the Australian Research Council under Discovery Project DP0453131.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Active learning and the total cost of annotation", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miles", |
|
"middle": [], |
|
"last": "Osborne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the EMNLP Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Baldridge and Miles Osborne. 2004. Active learning and the total cost of annotation. In Pro- ceedings of the EMNLP Conference, pages 9-16, Barcelona, Spain.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Supertagging: An approach to almost parsing", |
|
"authors": [ |
|
{ |
|
"first": "Srinivas", |
|
"middle": [], |
|
"last": "Bangalore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Computational Linguistics", |
|
"volume": "25", |
|
"issue": "2", |
|
"pages": "237--265", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Srinivas Bangalore and Aravind Joshi. 1999. Su- pertagging: An approach to almost parsing. Computational Linguistics, 25(2):237-265.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Optimising selective sampling for bootstrapping named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Marcus", |
|
"middle": [], |
|
"last": "Becker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Hachey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beatrice", |
|
"middle": [ |
|
"Alex" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Grover", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. of the ICML-2005 Workshop on Learning with Multiple Views", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcus Becker, Ben Hachey, Beatrice Alex, and Claire Grover. 2005. Optimising selective sam- pling for bootstrapping named entity recognition. In Proc. of the ICML-2005 Workshop on Learning with Multiple Views.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "The treebanker: a tool for supervised training of parsed corpora", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Carter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proc. of the Workshop on Computational Environments for Grammar Development and Language Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Carter. 1997. The treebanker: a tool for supervised training of parsed corpora. In Proc. of the Workshop on Computational Environments for Grammar Development and Language Engi- neering, Madrid, Spain.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "The importance of supertagging for wide-coverage CCG parsing", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Curran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of the 20th COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "282--288", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Clark and James R. Curran. 2004a. The importance of supertagging for wide-coverage CCG parsing. In Proc. of the 20th COLING, pages 282-288, Geneva, Switzerland.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Parsing the WSJ using CCG and log-linear models", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Curran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 42nd Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "103--110", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Clark and James R. Curran. 2004b. Pars- ing the WSJ using CCG and log-linear models. In Proceedings of the 42nd Annual Meeting of the ACL, pages 103-110.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Object-extraction and questionparsing using CCG", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steedman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Curran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of the EMNLP Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "111--118", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Clark, Mark Steedman, and James R. Curran. 2004. Object-extraction and question- parsing using CCG. In Proc. of the EMNLP Con- ference, pages 111-118, Barcelona, Spain.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Active learning with statistical models", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Cohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zoubin", |
|
"middle": [], |
|
"last": "Ghahramani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Jordan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "705--712", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David A. Cohn, Zoubin Ghahramani, and Michael I. Jordan. 1995. Active learning with statisti- cal models. In G. Tesauro, D. Touretzky, and T. Leen, editors, Advances in Neural Informa- tion Processing Systems, volume 7, pages 705- 712. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Blueprint for a high performance nlp infrastructure", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Curran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proc. of the Workshop on Software Engineering and Architecture of Language Technology Systems (SEALTS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Curran. 2003. Blueprint for a high perfor- mance nlp infrastructure. In Proc. of the Work- shop on Software Engineering and Architecture of Language Technology Systems (SEALTS), Ed- monton, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Committeebased sampling for training probabilistic classifiers", |
|
"authors": [ |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sean", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Engelson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proc. of the ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "150--157", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ido Dagan and Sean P. Engelson. 1995. Committee- based sampling for training probabilistic classi- fiers. In Proc. of the ICML, pages 150-157.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Mixed-initiative development of language processing systems", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Day", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Aberdeen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lynette", |
|
"middle": [], |
|
"last": "Hirschman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robyn", |
|
"middle": [], |
|
"last": "Kozierok", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patricia", |
|
"middle": [], |
|
"last": "Robinson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Vilain", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proc. of the 5th conference on Applied NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "348--355", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Day, John Aberdeen, Lynette Hirschman, Robyn Kozierok, Patricia Robinson, and Marc Vi- lain. 1997. Mixed-initiative development of lan- guage processing systems. In Proc. of the 5th con- ference on Applied NLP, pages 348-355.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Corpus variation and parser performance", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the EMNLP Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "167--202", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Gildea. 2001. Corpus variation and parser performance. In Proceedings of the EMNLP Con- ference, pages 167-202, Pittsburgh, PA.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "SOAP version 1.2 part 1: Messaging framework", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Gudgin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Hadley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Mendelsohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean-Jacques", |
|
"middle": [], |
|
"last": "Moreau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Henrik", |
|
"middle": [ |
|
"Frystyk" |
|
], |
|
"last": "Nielsen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Gudgin, Marc Hadley, Noah Mendel- sohn, Jean-Jacques Moreau, and Henrik Frystyk Nielsen. 2003. SOAP version 1.2 part 1: Messag- ing framework. http://www.w3.org/TR/2003/ REC-soap12-part1-20030624/.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Data and Models for Statistical Parsing with Combinatory Categorial Grammar", |
|
"authors": [ |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hockenmaier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 3rd LREC Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1974--1981", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julia Hockenmaier and Mark Steedman. 2002. Ac- quiring compact lexicalized grammars from a cleaner treebank. In Proceedings of the 3rd LREC Conference, pages 1974-1981, Las Palmas, Spain. Julia Hockenmaier. 2003. Data and Models for Statistical Parsing with Combinatory Categorial Grammar. Ph.D. thesis, University of Edinburgh.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Gridenabling natural language engineering by stealth", |
|
"authors": [ |
|
{ |
|
"first": "Baden", |
|
"middle": [], |
|
"last": "Hughes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proc. of the Workshop on Software Engineering and Architecture of Language Technology Systems (SEALTS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baden Hughes and Steven Bird. 2003. Grid- enabling natural language engineering by stealth. In Proc. of the Workshop on Software Engineering and Architecture of Language Technology Systems (SEALTS), Edmonton, Canada.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Experiments with data intensive nlp on a computational grid", |
|
"authors": [ |
|
{ |
|
"first": "Baden", |
|
"middle": [], |
|
"last": "Hughes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ewan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haejoong", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of the 2004 Hong Kong International Workshop on Language Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baden Hughes, Steven Bird, Ewan Klein, and Hae- joong Lee. 2004a. Experiments with data inten- sive nlp on a computational grid. In Proc. of the 2004 Hong Kong International Workshop on Lan- guage Technology.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Grid-based indexing of a newswire corpus", |
|
"authors": [ |
|
{ |
|
"first": "Baden", |
|
"middle": [], |
|
"last": "Hughes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Srikumar", |
|
"middle": [], |
|
"last": "Venugopal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajkumar", |
|
"middle": [], |
|
"last": "Buyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of the 5th IEEE Workshop on Grid Computing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baden Hughes, Srikumar Venugopal, and Rajkumar Buyya. 2004b. Grid-based indexing of a newswire corpus. In Proc. of the 5th IEEE Workshop on Grid Computing.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Building a large annotated corpus of English: the Penn Treebank", |
|
"authors": [ |
|
{ |
|
"first": "Mitchell", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beatrice", |
|
"middle": [], |
|
"last": "Santorini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"Ann" |
|
], |
|
"last": "Marcinkiewicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Computational Linguistics", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "313--330", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mitchell P. Marcus, Beatrice Santorini, and Mary Ann Marcinkiewicz. 1994. Building a large annotated corpus of English: the Penn Treebank. Computational Linguistics, 19(2):313-330.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The LinGO Redwoods Treebank: Motivation and preliminary applications", |
|
"authors": [ |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Oepen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stuart", |
|
"middle": [], |
|
"last": "Shieber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Flickinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Brants", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 19th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1253--1257", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephan Oepen, Kristina Toutanova, Stuart Shieber, Christopher Manning, Dan Flickinger, and Thorsten Brants. 2002. The LinGO Redwoods Treebank: Motivation and preliminary applica- tions. In Proceedings of the 19th International Conference on Computational Linguistics, pages 1253-1257, Taipei, Taiwan.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "An annotation scheme for free word order languages", |
|
"authors": [ |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Skut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brigitte", |
|
"middle": [], |
|
"last": "Krenn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Brants", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hans", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of the 5th ACL Conference on Applied NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "88--95", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wojciech Skut, Brigitte Krenn, Thorsten Brants, and Hans Uszkoreit. 1997. An annotation scheme for free word order languages. In Proceedings of the 5th ACL Conference on Applied NLP, pages 88-95, Washington, DC.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Cross-Platform GUI Programming with wxWidgets", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Smart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Hock", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Csomor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian Smart, Kevin Hock, and Stefan Csomor. 2005. Cross-Platform GUI Programming with wxWidgets. Prentice Hall.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "The Syntactic Process", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steedman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Steedman. 2000. The Syntactic Process. The MIT Press, Cambridge, MA.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Active learning for statistical natural language parsing", |
|
"authors": [ |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoqing", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. of the 40th Annual Meeting of the ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "120--127", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Min Tang, Xiaoqing Luo, and Salim Roukos. 2002. Active learning for statistical natural language parsing. In Proc. of the 40th Annual Meeting of the ACL, pages 120-127, Philadelphia, PA USA.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Example sentence with ccg lexical categories" |
|
} |
|
} |
|
} |
|
} |