|
{ |
|
"paper_id": "E17-1021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:51:55.651544Z" |
|
}, |
|
"title": "Cross-Lingual Dependency Parsing with Late Decoding for Truly Low-Resource Languages", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"Sejr" |
|
], |
|
"last": "Schlichtkrull", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Amsterdam", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Copenhagen", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In cross-lingual dependency annotation projection, information is often lost during transfer because of early decoding. We present an end-to-end graph-based neural network dependency parser that can be trained to reproduce matrices of edge scores, which can be directly projected across word alignments. We show that our approach to cross-lingual dependency parsing is not only simpler, but also achieves an absolute improvement of 2.25% averaged across 10 languages compared to the previous state of the art.", |
|
"pdf_parse": { |
|
"paper_id": "E17-1021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In cross-lingual dependency annotation projection, information is often lost during transfer because of early decoding. We present an end-to-end graph-based neural network dependency parser that can be trained to reproduce matrices of edge scores, which can be directly projected across word alignments. We show that our approach to cross-lingual dependency parsing is not only simpler, but also achieves an absolute improvement of 2.25% averaged across 10 languages compared to the previous state of the art.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Dependency parsing is an integral part of many natural language processing systems. However, most research into dependency parsing has focused on learning from treebanks, i.e. collections of manually annotated, well-formed syntactic trees. In this paper, we develop and evaluate a graph-based parser which does not require the training data to be well-formed trees. We show that such a parser has an important application in cross-lingual learning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Annotation projection is a method for developing parsers for low-resource languages, relying on aligned translations from resource-rich source languages into the target language, rather than linguistic resources such as treebanks or dictionaries. The Bible has been translated completely into 542 languages, and partially translated into a further 2344 languages. As such, the assumption that we have access to parallel Bible data is much less constraining than the assumption of access to linguistic resources. Furthermore, for truly lowresource languages, relying upon the Bible scales better than relying on less biased data such as the EuroParl corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In Agi\u0107 et al. (2016) , a projection scheme is proposed wherein labels are collected from many sources, projected into a target language, and then averaged. Crucially, the paper demonstrates how projecting and averaging edge scores from a graph-based parser before decoding improves performance. Even so, decoding is still a requirement between projecting labels and retraining from the projected data, since their parser (TurboParser) requires well-formed input trees. This introduces a potential source of noise and loss of information that may be important for finding the best target sentence parse.", |
|
"cite_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 21, |
|
"text": "Agi\u0107 et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our approach circumvents the need for decoding prior to training, thereby surpassing a stateof-the-art dependency parser trained on decoded multi-source annotation projections as done by Agi\u0107 et al. We first evaluate the model across several languages, demonstrating results comparable to the state of the art on the Universal Dependencies (McDonald et al., 2013) dataset. Then, we evaluate the same model by inducing labels from cross-lingual multi-source annotation projection, comparing the performance of a model with early decoding to a model with late decoding.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 198, |
|
"text": "Agi\u0107 et al.", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 363, |
|
"text": "(McDonald et al., 2013)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Contributions We present a novel end-to-end neural graph-based dependency parser and apply it in a cross-lingual setting where the task is to induce models for truly low-resource languages, assuming only parallel Bible text. Our parser is more flexible than similar parsers, and accepts any weighted or non-weighted graph over a token sequence as input. In our setting, the input is a dense weighted graph, and we show that our parser is superior to previous best approaches to cross-lingual parsing. The code is made available on GitHub. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The goal of this section is to construct a first-order graph-based dependency parser capable of learning directly from potentially incomplete matrices of edge scores produced by another first-order graph-based parser. Our approach is to treat the encoding stage of the parser as a tensor transformation problem, wherein tensors of edge features are mapped to matrices of edge scores. This allows our model to approximate sets of scoring matrices generated by another parser directly through non-linear regression. The core component of the model is a layered sequence of recurrent neural network transformations applied to the axes of an input tensor.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "More formally, any digraph G = (V, E) can be expressed as a binary |V | \u00d7 |V |-matrix M , where M ij = 1 if and only if (j, i) \u2208 E -that is, if i has an ingoing edge from j. If G is a tree rooted at v 0 , v 0 has no ingoing edges. Hence, it suffices to use a (|V |\u22121)\u00d7|V |-matrix. In dependency parsing, every sentence is expressed as a matrix S \u2208 R w\u00d7f , where w is the number of words in the sentence and f is the width of a feature vector corresponding to each word. The goal is to learn a function P :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "R w\u00d7f \u2192 Z w\u00d7(w+1) 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": ", such that P (S) corresponds to the matrix representation of the correct parse tree for that sentence -see Figure 1 for an example.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 116, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "John walks his dog Figure 1 : An example dependency tree and the corresponding parse matrix.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 27, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\uf8ee \uf8ef \uf8ef \uf8f0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 0 \uf8f9 \uf8fa \uf8fa \uf8fb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the arc-factored (first-order), graph-based model, P is a composite function P = D \u2022 E where the encoder E : R w\u00d7f \u2192 R w\u00d7(w+1) is a real-valued scoring function and the decoder D :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "R w\u00d7(w+1) \u2192 Z w\u00d7(w+1) 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "is a minimum spanning tree algorithm (McDonald et al., 2005) . Commonly, the encoder includes only local information -that is, E ij is only dependent on S i and S j , where S i and S j are feature vectors corresponding to dependent and head. Our contribution is the introduction of an LSTM-based global encoder where the entirety of S is represented in the calculation of E ij .", |
|
"cite_spans": [ |
|
{ |
|
"start": 37, |
|
"end": 60, |
|
"text": "(McDonald et al., 2005)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We begin by extending S to a (w +1)\u00d7(f +1)matrix S * with an additional row corresponding to the root node and a single binary feature denoting whether a node is the root. We now compute a 3-tensor F = S S * of dimension w \u00d7 (w + 1) \u00d7 (2f + 1) consisting of concatenations of all combinations of rows in S and S * . This tensor effectively contains a featurization of every edge (u, v) in the complete digraph over the sentence, consisting of the features of the parent word u and child word v. These edge-wise feature vectors are organized in the tensor exactly as the dependency arcs in a parse matrix such as the one shown in the example in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 644, |
|
"end": 652, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The edges represented by elements F ij can as such easily be interpreted in the context of related edges represented by the row i and the column j in which that edge occurs. The classical arc-factored parsing algorithm of McDonald et al. (2005) corresponds to applying a function O : R 2f +1 \u2192 R pointwise to S S * , then decoding the resulting w \u00d7 (w + 1)-matrix. Our model diverges by applying an LSTM-based transformation Q : R w\u00d7(w+1)\u00d7(2f +1) \u2192 R w\u00d7(w+1)\u00d7d to S S * before applying an analogous transformation O :", |
|
"cite_spans": [ |
|
{ |
|
"start": 222, |
|
"end": 244, |
|
"text": "McDonald et al. (2005)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "R d \u2192 R.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Long Short-Term Memory (LSTM) unit is a function LST M (x, h t\u22121 , c t\u22121 ) = (h t , c t ) defined through the use of several intermediary steps, following Hochreiter et al. (2001) . A concatenated input vector I = x \u2295 h prev is constructed, where \u2295 represents vector concatenation. Then, functions corresponding to input, forget, and output gates are defined following the form g input = \u03c3(W input I +b input ). Finally, the internal cell state c t and the output vector h t at time t are defined using the Hadamard (pointwise) product \u2022:", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 183, |
|
"text": "Hochreiter et al. (2001)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "c t = g f orget \u2022 c prev + g input \u2022 tanh(W cell I + b cell ) h t = g output \u2022 tanh(c t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We define a function Matrix-LSTM inductively, that applies an LSTM to the rows of a matrix X. Formally, Matrix-LSTM is a function M : Figure 1 . The wordpair tensor S S * is represented with blue units (horizontal lines), a hidden Tensor-LSTM layer H with green units (vertical lines), and the output layer with white units. The recurrent connections in the hidden layer along H and H T (2,1,3) are illustrated respectively with dotted and fully drawn lines.", |
|
"cite_spans": [ |
|
{ |
|
"start": 385, |
|
"end": 394, |
|
"text": "T (2,1,3)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 142, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "R a\u00d7b \u2192 R a\u00d7c such that (h 1 , c 1 ) = LST M (X 1 , 0, 0), \u22001 < i \u2264 n (h i , c i ) = LST M (X i , h i\u22121 , c i\u22121 ), and M(X) i = h i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "An effective extension is the bidirectional LSTM, wherein the LSTM-function is applied to the sequence both in the forward and in the backward direction, and the results are concatenated. In the matrix formulation, reversing a sequence corresponds to inverting the order of the rows. This is most naturally accomplished through leftmultiplication with an exchange matrix J m \u2208 R m\u00d7m such that:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "J m = \uf8ee \uf8ef \uf8f0 0 \u2022 \u2022 \u2022 1 . . . . . . . . . 1 \u2022 \u2022 \u2022 0 \uf8f9 \uf8fa \uf8fb Bidirectional Matrix-LSTM is therefore defined as a function M 2d : R a\u00d7b \u2192 R a\u00d72c such that: M 2d (S) = M(S) \u2295 2 J a M(J a S)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here, \u2295 2 refers to concatenation along the second axis of the matrix. Keeping in mind the goal of constructing a tensor transformation Q capable of propagating information in an LSTM-like manner between any two elements of the input tensor, we are interested in constructing an equivalent of the Matrix-LSTMmodel operating on 3-tensors rather than matrices. This construct, when applied to the edge tensor F = S S * , can then provide a means of interpreting edges in the context of related edges.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A very simple variant of such an LSTMfunction operating on 3-tensors can be constructed by applying a bidirectional Matrix-LSTM to every matrix along the first axis of the tensor. This forms the center of our approach. Formally, bidirectional Tensor-LSTM is a function T 2d : R a\u00d7b\u00d7c \u2192 R a\u00d7b\u00d72h such that:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "T 2d (T ) i = M 2d (T i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This definition allows information to flow within the matrices of the first axis of the tensor, but not between them -corresponding in Figure 2 to horizontal connection along the rows, but no vertical connections along the columns. To fully cover the tensor structure, we must extend this model to include connections along columns. This is accomplished through tensor transposition. Formally, tensor transposition is an operator T T \u03c3 where \u03c3 is a permutation on the set {1, ..., rank(T )}. The last axis of the tensor contains the feature representations, which we are not interested in scrambling. For the Matrix-LSTM, this leaves only one option -M T (1,2) . When the LSTM is operating on a 3-tensor, we have two options -T T (2,1,3) and T T (1,2,3) . This leads to the following definition of four-directional Tensor-LSTM as a function T 4d : R a\u00d7b\u00d7c \u2192 R a\u00d7b\u00d74h analogous to bidirectional Sequence-LSTM:", |
|
"cite_spans": [ |
|
{ |
|
"start": 654, |
|
"end": 661, |
|
"text": "T (1,2)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 745, |
|
"end": 754, |
|
"text": "T (1,2,3)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 144, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "T 4d (T ) = T 2d (T ) \u2295 3 T 2d (T T (2,1,3) ) T (2,1,3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Calculating the LSTM-function on T T (1,2,3) and T T (2,1,3) can be thought of as constructing the recurrent links either \"side-wards\" or \"downwards\" in the tensor -or, equivalently, constructing recurrent links either between the outgoing or between the in-going edges of every vertex in the dependency graph. In Figure 2 , we illustrate the two directions respectively with full or dotted edges in the hidden layer.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 322, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The output of Tensor-LSTM is itself a tensor. In our experiments, we use a multi-layered variation implemented by stacking layers of models: T 4d,stack (T ) = T 4d (T 4d (...T 4d (T )...)). We do not share parameters between stacked layers. Training the model is done by minimizing the value E(G, O(Q(S S * ))) of some loss function E for each sentence S with gold tensor G. We experiment with two loss functions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our monolingual set-up, we exploit the fact that parse matrices by virtue of depicting trees are right stochastic matrices. Following this observation, we constrain each row of O(Q(S S * )) under a softmax-function and use as loss the rowwise cross entropy. In our cross-lingual set-up, we use mean squared error. In both cases, predictiontime decoding is done with Chu-Liu-Edmonds algorithm (Edmonds, 1968) following McDonald et al. (2005) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 395, |
|
"end": 410, |
|
"text": "(Edmonds, 1968)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 443, |
|
"text": "McDonald et al. (2005)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "3 Cross-lingual parsing Hwa et al. (2005) is a seminal paper for crosslingual dependency parsing, but they use very detailed heuristics to ensure that the projected syntactic structures are well-formed. Agi\u0107 et al. (2016) is the latest continuation of their work, presenting a new approach to cross-lingual projection, projecting edge scores rather than subtrees. Agi\u0107 et al. (2016) construct target-language treebanks by aggregating scores from multiple source languages, before decoding. Averaging before decoding is especially beneficial when the parallel data is of low quality, as the decoder introduces errors, when edge scores are missing. Despite averaging, there will still be scores missing from the input weight matrices, especially when the source and target languages are very distant. Below, we show that we can circumvent error-inducing early decoding by training directly on the projected edge scores.", |
|
"cite_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 41, |
|
"text": "Hwa et al. (2005)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 221, |
|
"text": "Agi\u0107 et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 364, |
|
"end": 382, |
|
"text": "Agi\u0107 et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We assume source language datasets L 1 , ..., L n , parsed by monolingual arc-factored parsers. In our case, this data comes from the Bible. We assume access to a set of sentence alignment functions", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "A s : L s \u00d7 L t \u2192 R 0,1 where A s (S s , S t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "is the confidence that S t is the translation of S s . Similarly, we have access to a set of word alignment functions W Ls,Ss,St : ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "S s \u00d7 S t \u2192 R 0,1 such that S s \u2208 L s , S t \u2208 L t ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "((u s , v s ), (u t , v t ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "operating on a source language edge (u s , v s ) \u2208 S s and a target language edge (u t , u t ) \u2208 S t . Intuitively, every source language edge votes for every target language edge with a score proportional to the confidence of the edges aligning and the score given in the source language. For every target language edge", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(u t , v t ) \u2208 S t : vote Ss ((u s , v s ), (u t , v t )) = W Ls,Ss,St (u s , u t ) \u2022 W Ls,Ss,St (v s , v t ) \u2022 score Ls (u s , v s )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Following Agi\u0107 et al. (2016) , a sentence-wise voting function is then constructed as the highest contribution from a source-language edge:", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 28, |
|
"text": "Agi\u0107 et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "vote Ss (u t , v t ) = max us,vs\u2208Ss vote Ss ((u s , v s ), (u t , v t ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The final contribution of each source language dataset L s to a target language edge (u t , v t ) is then calculated as the sum for all sentences S s \u2208 L s over vote Ss (u t , v t ) multiplied by the confidence that the source language sentence aligns with the target language sentence. For an edge (u t , v t ) in a target language sentence S t \u2208 L t :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "vote Ls (u t , v t ) = Ss\u2208Ls A s (S s , S t ) vote Ss (u t , v t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finally, we can compute a target language scoring function by summing over the votes for every source language:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "score(u t , v t ) = n i=1 vote L i (u t , v t ) Z St", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Here, Z St is a normalization constant ensuring that the target-language scores are proportional to those created by the source-language scoring functions. As such, Z St should consist of the sum over the weights for each sentence contributing to the scoring function. We can compute this as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Z St = n i=1 Ss\u2208L i A s (S s , S t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The sentence alignment function is not a probability distribution; it may be the case that no sourcelanguage sentences contribute to a target language sentence, causing the sum of the weights and the sum of the votes to approach zero. In this case, we define score(u t , v t ) = 0. Before projection, the source language scores are all standardized to have 0 as the mean and 1 as the standard deviation. Hence, this corresponds to assuming neither positive nor negative evidence concerning the edge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We experiment with two methods of learning from the projected data -decoding with Chu-Liu-Edmonds algorithm and then training as proposed in Agi\u0107 et al. (2016) , or directly learning to reproduce the matrices of edge scores. For alignment, we use the sentence-level hunalign algorithm introduced in Varga et al. (2005) and the token-level model presented in\u00d6stling (2015).", |
|
"cite_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 159, |
|
"text": "Agi\u0107 et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 299, |
|
"end": 318, |
|
"text": "Varga et al. (2005)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "root", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We conduct two sets of experiments. First, we evaluate the Tensor-LSTM-parser in the monolingual setting. We compare Tensor-LSTM to the TurboParser (Martins et al., 2010) on several languages from the Universal Dependencies dataset. In the second experiment, we evaluate Tensor-LSTM in the cross-lingual setting. We include as baselines the delexicalized parser of McDonald et al. (2011) , and the approach of Agi\u0107 et al. (2016) using TurboParser. To demonstrate the effectiveness of circumventing the decoding step, we conduct the cross-lingual evaluation of Tensor-LSTM using cross entropy loss with early decoding, and using mean squared loss with late decoding.", |
|
"cite_spans": [ |
|
{ |
|
"start": 365, |
|
"end": 387, |
|
"text": "McDonald et al. (2011)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 428, |
|
"text": "Agi\u0107 et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our features consist of 500-dimensional word embeddings trained on translations of the Bible. The word embeddings were trained using skipgram with negative sampling on a word-by-sentence PMI matrix induced from the Edinburgh Bible Corpus, following (Levy et al., 2017) . Our embeddings are not trainable, but fixed representations throughout the learning process. Unknown tokens were represented by zero-vectors.", |
|
"cite_spans": [ |
|
{ |
|
"start": 249, |
|
"end": 268, |
|
"text": "(Levy et al., 2017)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model selection and training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We combined the word embeddings with onehot-encodings of POS-tags, projected across word alignments following the method of Agi\u0107 et al. (2016) . To verify the value of the POS-features, we conducted preliminary experiments on English development data. When including POS- tags, we found small, non-significant improvements for monolingual parsing, but significant improvements for cross-lingual parsing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 142, |
|
"text": "Agi\u0107 et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model selection and training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The weights were initialized using the normalized values suggested in Glorot and Bengio (2010) . Following Jozefowicz et al. 2015, we add 1 to the initial forget gate bias. We trained the network using RMSprop (Tieleman and Hinton, 2012) with hyperparameters \u03b1 = 0.1 and \u03b3 = 0.9, using minibatches of 64 sentences. Following Neelakantan et al. (2015), we added a noise factor n \u223c N (0, 1 (1+t) 0.55 ) to the gradient in each update. We applied dropouts after each LSTMlayer with a dropout probability p = 0.5, and between the input layer and the first LSTM-layer with a dropout probability of p = 0.2 (Bluche et al., 2015) . As proposed in Pascanu et al. (2012), we employed a gradient clipping factor of 15. In the monolingual setting, we used early stopping on the development set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 94, |
|
"text": "Glorot and Bengio (2010)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 601, |
|
"end": 622, |
|
"text": "(Bluche et al., 2015)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model selection and training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We experimented with 10, 50, 100, and 200 hidden units per layer, and with up to 6 layers. Using greedy search on monolingual parsing and evaluating on the English development data, we determined the optimal network shape to contain 100 units per direction per hidden layer, and a total of 4 layers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model selection and training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For the cross-lingual setting, we used two additional hyper-parameters. We used the development data from one of our target languages (German) to determine the optimal number of epochs before stopping. Furthermore, we trained only on a subset of the projected sentences, choosing the size of the subset using the development data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model selection and training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We experimented with either 5000 or 10000 randomly sampled sentences. There are two motivating factors behind this subsampling. First, while the Bible in general consists of about 30000 sentences, for many low-resource languages we do not have access to annotation projections for the full Bible, because parts were never translated, and because of varying projection quality. Second, subsampling speeds up the training, which was necessary to make our experiments practical: At 10000 sentences and on a single GPU, each epoch takes approximately 2.5 hours. As such, training for a single language could be completed in less than a day. We plot the results in Figure 3 . We see that the best performance is achieved at 10000 sentences, and with respectively 6 and 5 epochs for cross entropy and mean squared loss.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 660, |
|
"end": 668, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model selection and training", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In the monolingual setting, we compare our parser to TurboParser (Martins et al., 2010) -a fast, capable graph-based parser used as a component in many larger systems. TurboParser is also the system of choice for the cross-lingual pipeline of Agi\u0107 et al. (2016) . It is therefore interesting to make a direct comparison between the two. The results can be seen in Table 1 Note that in order for a parser to be directly applicable to the annotation projection setup explored in the secondary experiment, it must be a first-order graph-based parser. In the monolingual setting, the best results reported so far (84.74, on average) for the above selection of treebanks were by the Parsito system (Straka et al., 2015) , a transition-based parser using a dynamic oracle.", |
|
"cite_spans": [ |
|
{ |
|
"start": 243, |
|
"end": 261, |
|
"text": "Agi\u0107 et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 693, |
|
"end": 714, |
|
"text": "(Straka et al., 2015)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 364, |
|
"end": 371, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For the cross-lingual annotation projection experiments, we use the delexicalized system suggested by McDonald et al. (2011) as a baseline. We also compare against the annotation projection scheme using TurboParser suggested in Agi\u0107 et al. (2016) , representing the previous state of the art for truly low-resource cross-lingual dependency parsing. Note that while our results for the TurboParser-based system use the same training data, test data, and model as in Agi\u0107 et al., our results differ due to the use of the Bible corpus rather than a Watchtower publications corpus as parallel data. The authors made results available using the Edinburgh Bible Corpus for unlabeled data. The two tested conditions of Tensor-LSTM are the mean squared loss model without intermediary decoding, and the cross entropy model with intermediary decoding. The results of the crosslingual experiment can be seen in Table 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 124, |
|
"text": "McDonald et al. (2011)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 228, |
|
"end": 246, |
|
"text": "Agi\u0107 et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 901, |
|
"end": 908, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As is evident from Table 2, the variation in performance across different languages is large for all systems. This is to be expected, as the quality of the projected label sets vary widely due to linguistic differences. On average, Tensor-LSTM with mean squared loss outperforms all other systems. In Section 1, we hypothesized that incomplete projected scorings would have a larger impact upon systems reliant on an intermediary decoding step. To investigate this claim, we plot in Figure 4 the performance difference with mean squared loss and cross entropy loss for each language versus the percentage of missing edge scores. Table 2 : Unlabeled attachment scores for the various systems. Tensor-LSTM is evaluated using cross entropy and mean squared loss. We include the results of two baselines -the delexicalized system of McDonald et al. (2011) and the Turbo-based projection scheme of Agi\u0107 et al. (2016) . English and German development data was used for hyperparameter tuning (marked *).", |
|
"cite_spans": [ |
|
{ |
|
"start": 829, |
|
"end": 851, |
|
"text": "McDonald et al. (2011)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 893, |
|
"end": 911, |
|
"text": "Agi\u0107 et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 483, |
|
"end": 491, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 629, |
|
"end": 636, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "For languages outside the Germanic and Latin families, our claim holds -the performance of the cross entropy loss system decreases faster with the percentage of missing labels than the performance of the mean squared loss system. To an extent, this confirms our hypothesis, as we for the average language observe an improvement by circumventing the decoding step. French and Spanish, however, do not follow the same trend, with cross entropy loss outperforming mean squared loss despite the high number of missing labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In Table 2 , performance on French and Spanish for both systems can be seen to be very high. It may be the case that Indo-European target languages are not as affected by missing labels as most of the source languages are themselves Indo-European. Another explanation could be that some feature of the cross entropy loss function makes it especially well suited for Latin languages -as seen in Table 1 , French and Spanish are also two of the languages for which Tensor-LSTM yields the highest performance improvement.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 394, |
|
"end": 401, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To compare the effect of missing edge scores upon performance without influence from linguistic factors such as language similarity, we repeat the cross-lingual experiment on one language with respectively 10%, 20%, 30%, and 40% of the projected and averaged edge scores artificially set to 0, simulating missing data. We choose the English data for this experiment, as the English projected data has the lowest percentage of missing labels across any of the languages. In Figure 5 , we plot the performance for each of the two systems versus the percentage of deleted values. As can be clearly seen, performance drops faster with the percentage of deleted labels for the cross entropy model. This confirms our intuition that the initially lower performance using mean squared loss compared to cross entropy loss is mitigated by a greater robustness towards missing labels, gained by circumventing the decoding step in the training process. In Table 2 , this is reflected as dramatic performance increases using mean squared error for Finnish, Persian, Hindi, and Hebrew -the four languages furthest removed from the predominantly Indo-European source languages and therefore the four languages with the poorest projected label quality.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 473, |
|
"end": 481, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF5" |
|
}, |
|
{ |
|
"start": 944, |
|
"end": 951, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Several possible avenues for future work on this project are available. In this paper, we used an extremely simple feature function. More complex feature functions is one potential source of improvement. Another interesting direction for future work would be to include POS-tagging directly as a component of Tensor-LSTM prior to the construction of S S * in a multi-task learning framework. Similarly, incorporating semantic tasks on top of dependency parsing could lead to interesting results. Finally, extensions of the Tensor-LSTM function to deeper models, wider models, or more connected models as seen in e.g. Kalchbrenner et al. (2015) may yield further performance gains.", |
|
"cite_spans": [ |
|
{ |
|
"start": 617, |
|
"end": 643, |
|
"text": "Kalchbrenner et al. (2015)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Experiments with neural networks for dependency parsing have focused mostly on learning higherorder scoring functions and creating efficient feature representations, with the notable exception of Fonseca et al. (2015) . In their paper, a convolutional neural network is used to evaluate local edge scores based on global information. In Zhang and Zhao (2015) and Pei et al. (2015) , neural networks are used to simultaneously evaluate first-order and higher-order scores for graph-based parsing, demonstrating good results. Bidirectional LSTM-models have been successfully applied to feature generation (Kiperwasser and Goldberg, 2016) . Such LSTM-based features could in future work be employed and trained in conjunction with Tensor-LSTM, incorporating global information both in parsing and in featurization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 196, |
|
"end": 217, |
|
"text": "Fonseca et al. (2015)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 358, |
|
"text": "Zhang and Zhao (2015)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 380, |
|
"text": "Pei et al. (2015)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 603, |
|
"end": 635, |
|
"text": "(Kiperwasser and Goldberg, 2016)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "An extension of LSTM to tensor-structured data has been explored in Graves et al. (2007) , and further improved upon in Kalchbrenner et al. (2015) in the form of GridLSTM. Our approach is similar, but simpler and computationally more efficient as no within-layer connections between the first and the second axes of the tensor are required.", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 88, |
|
"text": "Graves et al. (2007)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 120, |
|
"end": 146, |
|
"text": "Kalchbrenner et al. (2015)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Annotation projection for dependency parsing has been explored in a number of papers, starting with Hwa et al. (2005) . In Tiedemann (2014) and Tiedemann (2015) the process in extended and evaluated across many languages. Li et al. (2014) follows the method of Hwa et al. (2005) and adds a probabilistic target-language classifier to deter-mine and filter out high-uncertainty trees. In Ma and Xia (2014) , performance on projected data is used as an additional objective for unsupervised learning through a combined loss function.", |
|
"cite_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 117, |
|
"text": "Hwa et al. (2005)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 139, |
|
"text": "Tiedemann (2014)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 144, |
|
"end": 160, |
|
"text": "Tiedemann (2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 222, |
|
"end": 238, |
|
"text": "Li et al. (2014)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 278, |
|
"text": "Hwa et al. (2005)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 387, |
|
"end": 404, |
|
"text": "Ma and Xia (2014)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "A common thread in these papers is the use of high-quality parallel data such as the EuroParl corpus. For truly low-resource target languages, this setting is unrealistic as parallel resources may be restricted to biased data such as the Bible. In Agi\u0107 et al. (2016) this problem is addressed, and a parser is constructed which utilizes averaging over edge posteriors for many source languages to compensate for low-quality projected data. Our work builds upon their contribution by constructing a more flexible parser which can bypass a source of bias in their projected labels, and we therefore compared our results directly to theirs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 248, |
|
"end": 266, |
|
"text": "Agi\u0107 et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Annotation projection procedures for crosslingual dependency parsing has been the focus of several other recent papers (Guo et al., 2015; Zhang and Barzilay, 2015; Duong et al., 2015; Rasooli and Collins, 2015) . In Guo et al. (2015) , distributed, language-independent feature representations are used to train shared parsers. Zhang and Barzilay (2015) introduce a tensor-based feature representation capable of incorporating prior knowledge about feature interactions learned from source languages. In Duong et al. (2015) , a neural network parser is built wherein higher-level layers are shared between languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 137, |
|
"text": "(Guo et al., 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 163, |
|
"text": "Zhang and Barzilay, 2015;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 164, |
|
"end": 183, |
|
"text": "Duong et al., 2015;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 210, |
|
"text": "Rasooli and Collins, 2015)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 233, |
|
"text": "Guo et al. (2015)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 523, |
|
"text": "Duong et al. (2015)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Finally, Rasooli and Collins (2015) leverage dense information in high-quality sentence translations to improve performance. Their work can be seen as opposite to ours -whereas Rasooli and Collins leverage high-quality translations to improve performance when such are available, we focus on improving performance in the absence of high-quality translations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We have introduced a novel algorithm for graphbased dependency parsing based on an extension of sequence-LSTM to the more general Tensor-LSTM. We have shown how the parser with a cross entropy loss function performs comparably to state of the art for monolingual parsing. Furthermore, we have demonstrated that the flexibility of our parser enables learning from non wellformed data and from the output of other parsers. Using this property, we have applied our parser to a cross-lingual annotation projection problem for truly low-resource languages, demonstrating an average target-language unlabeled attachment score of 48.54, which to the best of our knowledge are the best results yet for the task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "https://github.com/MichSchli/Tensor-LSTM", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The second author was supported by ERC Starting Grant No. 313695. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Multilingual projection for parsing truly low-resource languages", |
|
"authors": [ |
|
{ |
|
"first": "Zeljko", |
|
"middle": [], |
|
"last": "Agi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "Johannsen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barbara", |
|
"middle": [], |
|
"last": "Plank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalie", |
|
"middle": [], |
|
"last": "H\u00e9ctor Mart\u00ednez Alonso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "Schluter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeljko Agi\u0107, Anders Johannsen, Barbara Plank, H\u00e9ctor Mart\u00ednez Alonso, Natalie Schluter, and Anders S\u00f8gaard. 2016. Multilingual projection for pars- ing truly low-resource languages. Transactions of the Association for Computational Linguistics, 4.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Where to apply dropout in recurrent neural networks for handwriting recognition?", |
|
"authors": [ |
|
{ |
|
"first": "Theodore", |
|
"middle": [], |
|
"last": "Bluche", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Kermorvant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jerome", |
|
"middle": [], |
|
"last": "Louradour", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Document Analysis and Recognition (IC-DAR)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "681--685", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Theodore Bluche, Christopher Kermorvant, and Jerome Louradour. 2015. Where to apply dropout in recurrent neural networks for handwriting recog- nition? In Document Analysis and Recognition (IC- DAR), 2015 13th International Conference on, pages 681-685. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Low resource dependency parsing: Cross-lingual parameter sharing in a neural network parser", |
|
"authors": [ |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Duong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Cook", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "845--850", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Long Duong, Trevor Cohn, Steven Bird, and Paul Cook. 2015. Low resource dependency parsing: Cross-lingual parameter sharing in a neural network parser. In Proceedings of the 53rd Annual Meet- ing of the Association for Computational Linguistics and the 7th International Joint Conference on Natu- ral Language Processing (Short Papers), pages 845- 850. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Optimum branchings", |
|
"authors": [], |
|
"year": 1968, |
|
"venue": "Mathematics and the Decision Sciences, Part 1", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "335--345", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jack Edmonds. 1968. Optimum branchings. In Math- ematics and the Decision Sciences, Part 1, pages 335-345. American Mathematical Society.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A deep architecture for non-projective dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Erick", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Fonseca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avenida", |
|
"middle": [], |
|
"last": "Trabalhador S\u00e3o-Carlense", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Alu\u00edsio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 NAACL-HLT Workshop on Vector Space Modeling for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "56--61", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erick R. Fonseca, Avenida Trabalhador S\u00e3o-carlense, and Sandra M. Alu\u00edsio. 2015. A deep architecture for non-projective dependency parsing. In Proceed- ings of the 2015 NAACL-HLT Workshop on Vector Space Modeling for NLP, pages 56-61. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Understanding the difficulty of training deep feedforward neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Glorot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 2010 International conference on Artificial Intelligence and Statistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "249--256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understand- ing the difficulty of training deep feedforward neural networks. In Proceedings of the 2010 International conference on Artificial Intelligence and Statistics, pages 249-256. Society for Artificial Intelligence and Statistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Multi-dimensional recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Graves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Santiago", |
|
"middle": [], |
|
"last": "Fern\u00e1ndez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:0705.2011" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Graves, Santiago Fern\u00e1ndez, and J\u00fcrgen Schmid- huber. 2007. Multi-dimensional recurrent neural networks. arXiv preprint arXiv:0705.2011.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Cross-lingual dependency parsing based on distributed representations", |
|
"authors": [ |
|
{ |
|
"first": "Jiang", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanxiang", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1234--1244", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiang Guo, Wanxiang Che, David Yarowsky, Haifeng Wang, and Ting Liu. 2015. Cross-lingual depen- dency parsing based on distributed representations. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Lan- guage Processing, pages 1234-1244. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Gradient flow in recurrent nets: the difficulty of learning long-term dependencies", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Frasconi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "A Field Guide to Dynamic Recurrent Neural Networks", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter, Yoshua Bengio, Paolo Frasconi, and J\u00fcrgen Schmidhuber. 2001. Gradient flow in re- current nets: the difficulty of learning long-term de- pendencies. In A Field Guide to Dynamic Recurrent Neural Networks. IEEE press.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bootstrapping parsers via syntactic projection across parallel texts", |
|
"authors": [ |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Hwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amy", |
|
"middle": [], |
|
"last": "Weinberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clara", |
|
"middle": [], |
|
"last": "Cabezas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Okan", |
|
"middle": [], |
|
"last": "Kolak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Natural language engineering", |
|
"volume": "11", |
|
"issue": "03", |
|
"pages": "311--325", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rebecca Hwa, Philip Resnik, Amy Weinberg, Clara Cabezas, and Okan Kolak. 2005. Bootstrapping parsers via syntactic projection across parallel texts. Natural language engineering, 11(03):311-325.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "An empirical exploration of recurrent network architectures", |
|
"authors": [ |
|
{ |
|
"first": "Rafal", |
|
"middle": [], |
|
"last": "Jozefowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wojciech", |
|
"middle": [], |
|
"last": "Zaremba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 32nd International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2342--2350", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rafal Jozefowicz, Wojciech Zaremba, and Ilya Sutskever. 2015. An empirical exploration of re- current network architectures. In Proceedings of the 32nd International Conference on Machine Learn- ing, pages 2342-2350. International Machine Learn- ing Society.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Grid long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Nal", |
|
"middle": [], |
|
"last": "Kalchbrenner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivo", |
|
"middle": [], |
|
"last": "Danihelka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Graves", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1507.01526" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nal Kalchbrenner, Ivo Danihelka, and Alex Graves. 2015. Grid long short-term memory. arXiv preprint arXiv:1507.01526.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Simple and accurate dependency parsing using bidirectional lstm feature representations", |
|
"authors": [ |
|
{ |
|
"first": "Eliyahu", |
|
"middle": [], |
|
"last": "Kiperwasser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1603.04351" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eliyahu Kiperwasser and Yoav Goldberg. 2016. Sim- ple and accurate dependency parsing using bidirec- tional lstm feature representations. arXiv preprint arXiv:1603.04351.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A strong baseline for learning cross-lingual word representations from sentence alignments", |
|
"authors": [ |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "EACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omer Levy, Anders S\u00f8gaard, and Yoav Goldberg. 2017. A strong baseline for learning cross-lingual word representations from sentence alignments. In EACL.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Soft cross-lingual syntax projection for dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Zhenghua", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenliang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 25th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "783--793", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhenghua Li, Min Zhang, and Wenliang Chen. 2014. Soft cross-lingual syntax projection for dependency parsing. In Proceedings of the 25th International Conference on Computational Linguistics, pages 783-793. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Unsupervised dependency parsing with transferring distribution via parallel guidance and entropy regularization", |
|
"authors": [ |
|
{ |
|
"first": "Xuezhe", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Xia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1337--1348", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuezhe Ma and Fei Xia. 2014. Unsupervised depen- dency parsing with transferring distribution via par- allel guidance and entropy regularization. In Pro- ceedings of the 52nd Annual Meeting of the Asso- ciation for Computational Linguistics, pages 1337- -1348. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Turbo parsers: Dependency parsing by approximate variational inference", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Andr\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pedro", |
|
"middle": [ |
|
"M Q" |
|
], |
|
"last": "Xing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Aguiar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "M\u00e1rio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Figueiredo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "34--44", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andr\u00e9 F.T. Martins, Noah A. Smith, Eric P. Xing, Pe- dro M.Q. Aguiar, and M\u00e1rio A.T. Figueiredo. 2010. Turbo parsers: Dependency parsing by approxi- mate variational inference. In Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing, pages 34-44. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Non-projective dependency parsing using spanning tree algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiril", |
|
"middle": [], |
|
"last": "Ribarov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "523--530", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan McDonald, Fernando Pereira, Kiril Ribarov, and Jan Haji\u010d. 2005. Non-projective dependency pars- ing using spanning tree algorithms. In Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Pro- cessing, pages 523-530. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Multi-source transfer of delexicalized dependency parsers", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "62--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan McDonald, Slav Petrov, and Keith Hall. 2011. Multi-source transfer of delexicalized dependency parsers. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Process- ing, pages 62-72. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Universal dependency annotation for multilingual parsing", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yvonne", |
|
"middle": [], |
|
"last": "Quirmbach-Brundage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuzman", |
|
"middle": [], |
|
"last": "Ganchev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keith", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan T. McDonald, Joakim Nivre, Yvonne Quirmbach-Brundage, Yoav Goldberg, Dipan- jan Das, Kuzman Ganchev, Keith B. Hall, Slav Petrov, Hao Zhang, Oscar T\u00e4ckstr\u00f6m, et al. 2013. Universal dependency annotation for multilingual parsing. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Adding gradient noise improves learning for very deep networks", |
|
"authors": [ |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Neelakantan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Vilnis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karol", |
|
"middle": [], |
|
"last": "Kurach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Martens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1511.06807" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arvind Neelakantan, Luke Vilnis, Quoc V. Le, Ilya Sutskever, Lukasz Kaiser, Karol Kurach, and James Martens. 2015. Adding gradient noise improves learning for very deep networks. arXiv preprint arXiv:1511.06807.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Bayesian models for multilingual word alignment", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Robert\u00f6stling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert\u00d6stling. 2015. Bayesian models for multilin- gual word alignment. Ph.D. thesis, Department of Linguistics, Stockholm University.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "On the difficulty of training recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Razvan", |
|
"middle": [], |
|
"last": "Pascanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1211.5063" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Razvan Pascanu, Tomas Mikolov, and Yoshua Bengio. 2012. On the difficulty of training recurrent neural networks. arXiv preprint arXiv:1211.5063.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "An effective neural network model for graph-based dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Wenzhe", |
|
"middle": [], |
|
"last": "Pei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Ge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baobao", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "313--322", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenzhe Pei, Tao Ge, and Baobao Chang. 2015. An effective neural network model for graph-based de- pendency parsing. In Proceedings of the 53rd An- nual Meeting of the Association for Computational Linguistics and the 7th International Joint Confer- ence on Natural Language Processing, pages 313- 322. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Density-driven cross-lingual transfer of dependency parsers", |
|
"authors": [ |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Sadegh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rasooli", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "328--338", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Sadegh Rasooli and Michael Collins. 2015. Density-driven cross-lingual transfer of de- pendency parsers. In Proceedings of the 2015 Con- ference on Empirical Methods in Natural Language Processing, pages 328-338. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Parsing universal dependency treebanks using neural networks and search-based oracle", |
|
"authors": [ |
|
{ |
|
"first": "Milan", |
|
"middle": [], |
|
"last": "Straka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Haji\u010d", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jana", |
|
"middle": [], |
|
"last": "Strakov\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Haji\u010d Jr", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 14th International Workshop on Treebanks and Linguistic Theories", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "208--220", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Milan Straka, Jan Haji\u010d, Jana Strakov\u00e1, and Jan Haji\u010d jr. 2015. Parsing universal dependency treebanks using neural networks and search-based oracle. In Proceedings of the 14th International Workshop on Treebanks and Linguistic Theories, pages 208-220. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Rediscovering annotation projection for cross-lingual parser induction", |
|
"authors": [ |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1854--1864", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00f6rg Tiedemann. 2014. Rediscovering annotation pro- jection for cross-lingual parser induction. In Pro- ceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Techni- cal Papers, pages 1854-1864. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Cross-lingual dependency parsing with universal dependencies and predicted pos labels", |
|
"authors": [ |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Tiedemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Third International Conference on Dependency Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "340--349", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00f6rg Tiedemann. 2015. Cross-lingual dependency parsing with universal dependencies and predicted pos labels. Proceedings of the Third International Conference on Dependency Linguistics, pages 340- 349.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude", |
|
"authors": [ |
|
{ |
|
"first": "Tijmen", |
|
"middle": [], |
|
"last": "Tieleman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "COURSERA: Neural Networks for Machine Learning", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tijmen Tieleman and Geoffrey Hinton. 2012. Lecture 6.5-rmsprop: Divide the gradient by a running av- erage of its recent magnitude. COURSERA: Neural Networks for Machine Learning, 4:2.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Parallel corpora for medium density languages", |
|
"authors": [ |
|
{ |
|
"first": "D\u00e1niel", |
|
"middle": [], |
|
"last": "Varga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P\u00e9ter", |
|
"middle": [], |
|
"last": "Hal\u00e1csy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e1s", |
|
"middle": [], |
|
"last": "Kornai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Viktor", |
|
"middle": [], |
|
"last": "Nagy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 2005 Conference on Recent Advances in Natural Language Processing. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D\u00e1niel Varga, P\u00e9ter Hal\u00e1csy, Andr\u00e1s Kornai, Viktor Nagy, L\u00e1szl\u00f3 N\u00e9meth, and Viktor Tr\u00f3n. 2005. Par- allel corpora for medium density languages. In Proceedings of the 2005 Conference on Recent Ad- vances in Natural Language Processing. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Hierarchical low-rank tensors for multilingual transfer parsing", |
|
"authors": [ |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1857--1867", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuan Zhang and Regina Barzilay. 2015. Hierarchical low-rank tensors for multilingual transfer parsing. In Proceedings of the 2015 Conference on Empiri- cal Methods in Natural Language Processing, pages 1857-1867. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "High-order graph-based neural dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Zhisong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 29th Pacific Asia Conference on Language, Information and Computation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "114--123", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhisong Zhang and Hai Zhao. 2015. High-order graph-based neural dependency parsing. In Pro- ceedings of the 29th Pacific Asia Conference on Lan- guage, Information and Computation, pages 114- 123. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Four-directional Tensor-LSTM applied to the example sentence seen in", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"text": "UAS per epoch on German development data training from 5000 or 10000 randomly sampled sentences with projected annotations.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF3": { |
|
"text": "Percentage of missing edge scores versus performance difference for Tensor-LSTM with mean squared loss and cross entropy loss.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF5": { |
|
"text": "Performance for Tensor-LSTM on English test data with 0-40% of the edge scores artificially maintained at 0.", |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "and W (w s , w t ) represents the confidence that w s aligns to w t given that S t is the translation of S s For each source language L s with a scoring function score Ls , we define a local edge-wise voting function vote Ss", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |