|
{ |
|
"paper_id": "K16-1018", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T07:11:28.324272Z" |
|
}, |
|
"title": "Learning when to trust distant supervision: An application to low-resource POS tagging using cross-lingual projection", |
|
"authors": [ |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Cross lingual projection of linguistic annotation suffers from many sources of bias and noise, leading to unreliable annotations that cannot be used directly. In this paper, we introduce a novel approach to sequence tagging that learns to correct the errors from cross-lingual projection using an explicit debiasing layer. This is framed as joint learning over two corpora, one tagged with gold standard and the other with projected tags. We evaluated with only 1,000 tokens tagged with gold standard tags, along with more plentiful parallel data. Our system equals or exceeds the state-of-the-art on eight simulated lowresource settings, as well as two real lowresource languages, Malagasy and Kinyarwanda.", |
|
"pdf_parse": { |
|
"paper_id": "K16-1018", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Cross lingual projection of linguistic annotation suffers from many sources of bias and noise, leading to unreliable annotations that cannot be used directly. In this paper, we introduce a novel approach to sequence tagging that learns to correct the errors from cross-lingual projection using an explicit debiasing layer. This is framed as joint learning over two corpora, one tagged with gold standard and the other with projected tags. We evaluated with only 1,000 tokens tagged with gold standard tags, along with more plentiful parallel data. Our system equals or exceeds the state-of-the-art on eight simulated lowresource settings, as well as two real lowresource languages, Malagasy and Kinyarwanda.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Part-of-speech (POS) tagging is a critical task for natural language processing (NLP) applications, providing lexical syntactic information. Automatic POS tagging has been extremely successful for many rich resource languages through the use of supervised learning over large training corpora (McCallum et al., 2000; Lafferty et al., 2001; Ammar et al., 2016) . However, learning POS taggers for low-resource languages from small amounts of annotated data is very challenging Duong et al., 2014) . For such problems, distant supervision via heuristic methods can provide cheap but inaccurately labelled data (Mintz et al., 2009; Takamatsu et al., 2012; Ritter et al., 2013; Plank et al., 2014) . A compromise, considered here, is to use a mixture of both resources: a small collection of clean annotated data and noisy \"distant\" data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 293, |
|
"end": 316, |
|
"text": "(McCallum et al., 2000;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 317, |
|
"end": 339, |
|
"text": "Lafferty et al., 2001;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 359, |
|
"text": "Ammar et al., 2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 476, |
|
"end": 495, |
|
"text": "Duong et al., 2014)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 608, |
|
"end": 628, |
|
"text": "(Mintz et al., 2009;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 629, |
|
"end": 652, |
|
"text": "Takamatsu et al., 2012;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 653, |
|
"end": 673, |
|
"text": "Ritter et al., 2013;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 674, |
|
"end": 693, |
|
"text": "Plank et al., 2014)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A popular method for distant supervision is to use parallel data between a low-resource language and a rich-resource language. Although annotated data in low-resource languages is difficult to obtain, bilingual resources are more plentiful. For example parallel translations into English are often available, in the form of news reports, novels or the Bible. Parallel data allows annotation from a high-resource language to be projected across alignments to the low-resource language, which has been shown to be effective for several language processing tasks including POS tagging (Yarowsky and Ngai, 2001 ; , named entity recognition (Wang and Manning, 2014) and dependency parsing .", |
|
"cite_spans": [ |
|
{ |
|
"start": 582, |
|
"end": 606, |
|
"text": "(Yarowsky and Ngai, 2001", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 636, |
|
"end": 660, |
|
"text": "(Wang and Manning, 2014)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Although cross-lingual POS projection is popular it has several problems, including errors from poor word alignments and cross-lingual syntactic divergence . Previous work has proposed heuristics or constraints to clean the projected tag before or during learning. In contrast, we consider compensating for these problems explicitly, by learning a bias transformation to encode the mapping between 'clean' tags and the kinds of tags produced from projection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We propose a new neural network model for sequence tagging in a low-resource language, suitable for training with both a tiny gold standard annotated corpus, as well as distant supervision using cross-lingual tag projection. Our model uses a bidirectional Long Short-Term Memory (BiL-STM), which produces two types of output: gold tags generated directly from the hidden states of a neural network, and uncertain projected tags generated after applying a further linear transformation. This transformation, encodes the mapping between the projected tags from the high-resource language, and the gold tags in the target low-resource language, and learns when and how much to trust the projected data. For example, for languages without determiners, the model can learn to map projected determiner tags to nouns, or if verbs are often poorly aligned, the model can learn to effectively ignore the projected verb tag, by associating all tags with verbs. Our model is trained jointly on gold and distant projected annotations, and can be trained end-to-end with backpropagation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our approach captures the relations among tokens, noisy projected POS tags and ground truth POS tags. Our work differs in the use of projection, in that we explicitly model the transformation between tagsets as part of a more expressive deep learning neural network. We make three main contributions. First, we study the noise of projected data in word alignments and describe it with an additional transformation layer in the model. Second, we integrate the model into a deep neural network and jointly train the model on both annotated and projected data to make the model learn from better supervision. Finally, evaluating on eight simulated and two real-world low-resource languages, experimental results demonstrate that our approach uniformly equals or exceeds existing methods on simulated languages, and achieves 86.7% accuracy for Malagasy and 82.6% on Kinyarwanda, exceeding the state-of-the-art results of Duong et al. (2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 917, |
|
"end": 936, |
|
"text": "Duong et al. (2014)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For most natural language processing tasks, the conventional approach to developing a system is to use supervised learning algorithms trained on a set of annotated data. However, this approach is inappropriate for low-resource languages due to the lack of annotated data. An alternative approach is to harness different source of information aside from annotated text. Knowledge-bases such as dictionaries are one such source, which can be used to inform or constrain models, such as limiting the search space for POS tagging (Banko and Moore, 2004; Goldberg et al., 2008; Li et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 526, |
|
"end": 549, |
|
"text": "(Banko and Moore, 2004;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 550, |
|
"end": 572, |
|
"text": "Goldberg et al., 2008;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 573, |
|
"end": 589, |
|
"text": "Li et al., 2012)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Parallel bilingual corpora provide another important source of information. These corpora are often plentiful even for many low-resource languages in the form of multilingual government documents, book translations, multilingual websites, etc. Word alignments can provide a bridge to project information from a resource-rich source language to a resource-poor target language. For example, parallel data has been used for named entity recognition (Wang and Manning, 2014) based on the observation that named entities are most often preserved in translation and also in syntactic tasks such as POS tagging (Yarowsky and Ngai, 2001; and dependency parsing . Clues from related languages can also compensate for the lack of annotated data, as we expect there to be information shared between closely related languages in terms of the lexical items, morphology and syntactic structure. Some successful applications using language relatedness information are dependency parsing and POS tagging (Hana et al., 2004) . However, these approaches are limited to closely related languages such as Czech and Russian, or Telugu and Kannada, and it is unclear whether these techniques will work well in situations where parallel data only exists for less-related languages, as is often the case in practice.", |
|
"cite_spans": [ |
|
{ |
|
"start": 447, |
|
"end": 471, |
|
"text": "(Wang and Manning, 2014)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 605, |
|
"end": 630, |
|
"text": "(Yarowsky and Ngai, 2001;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 989, |
|
"end": 1008, |
|
"text": "(Hana et al., 2004)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To summarize, for all these mentioned tasks, lexical resources are valuable sources of knowledge, but are also costly to build. Language relatedness information is applicable for closely related languages, but it is often the case that a given low-resource language does not have a closely-related, resource-rich language. Parallel data therefore appears to be the most realistic additional source of information for developing NLP systems for low-resource languages (Yarowsky and Ngai, 2001; Duong et al., 2014; Guo et al., 2015) , and here we primarily investigate methods to exploit parallel texts. Yarowsky and Ngai (2001) pioneered the use of parallel data for projecting POS tag information from a resource-rich language to a resourcepoor language. Duong et al. (2014) proposed an approach using a maximum entropy classifier trained on 1000 tagged tokens, and used projected tags as auxiliary outputs. used parallel data and exploited graph-based label propagation to expand the coverage of labelled tokens. Our work is closest to Duong et al. (2014) , and we share the same evaluation setting, which we believe is well suited to the lowresource applications. Our approach differs from theirs in two ways: first we propose a deep learning model based on a long short-term memory re-current structure versus their maximum entropy classifier, and secondly we model the projection tag explicitly as a biased variant of the classification output, while they attempt to capture the correlations between tagsets only implicitly through a joint feature set over both tags. We believe that our work is the first to explicitly model the bias affecting cross-lingual projected annotations, thereby allowing this rich data resource to be better exploited for learning NLP models in low-resource languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 467, |
|
"end": 492, |
|
"text": "(Yarowsky and Ngai, 2001;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 493, |
|
"end": 512, |
|
"text": "Duong et al., 2014;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 513, |
|
"end": 530, |
|
"text": "Guo et al., 2015)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 602, |
|
"end": 626, |
|
"text": "Yarowsky and Ngai (2001)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 755, |
|
"end": 774, |
|
"text": "Duong et al. (2014)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1037, |
|
"end": 1056, |
|
"text": "Duong et al. (2014)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this work, we consider the POS tagging problem for a low-resource language using both the gold annotated and distant projected corpora. For a low-resource language, we assume two sets of data. First, there is a small conventional corpus for the low-resource language, annotated with gold tags. Second, there is a parallel corpus between the language and English, where we can reliably tag the English side and project these annotations across the word alignments. Then based on the annotated and the projected data, we learn a deep neural model for the POS tagging. The goal of learning here is to improve the POS tagging accuracy on the low-resource language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Parallel data is often available for low-resource languages. For example, for Malagasy we can obtain bilingual documents with English directly from the web. This provides ample opportunity for projecting annotations from English into the low-resource language. Although the POS tags can be projected, given sentence and wordalignments, direct projection has several issues and results in noisy, biased and often unreliable annotations (Yarowsky and Ngai, 2001; Duong et al., 2014) . One source of error are the word alignments. These errors arise from words in the source language that are not aligned to any words in the target language, which might be due to them not being translated well enough, errors in alignments, or translation phenomena that do not fit the assumptions underlying the word based alignment models (e.g., many-to-many translations cannot be captured).", |
|
"cite_spans": [ |
|
{ |
|
"start": 435, |
|
"end": 460, |
|
"text": "(Yarowsky and Ngai, 2001;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 461, |
|
"end": 480, |
|
"text": "Duong et al., 2014)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "POS projection via word alignments", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "An example of POS projection via word alignments between Malagasy and English is shown in word in English or the NULL word. Thus there exist words in the target language which are not aligned to a word in the source language, for example ny in Figure 1 . Previous work has either used the majority projected POS tag for a token or used a default value to represent the token (Duong et al., 2014; . Another problem are errors in the projected tags: for example, in this sentence, fanomezan-kevitra is labelled as VERB incorrectly, but should be NOUN, a consequence of a non-literal translation. We now turn to the labelling of the projected data. For the parallel data, we consider each token in the low-resource language. Where this token is aligned to a single token in English, we assign the tag for that English token. For tokens that are aligned to many English words or none at all (NULL), we assign a distribution over tags according to the tag frequency distribution over the whole English sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 375, |
|
"end": 395, |
|
"text": "(Duong et al., 2014;", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 244, |
|
"end": 252, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "POS projection via word alignments", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "A natural question is whether this projected labelling might be suitable for use directly in supervised learning of a POS tagger. To test this, we compare training a bidirectional Long Short-Term Memory (BiLSTM) tagger on this data, a small 1000 token dataset with gold-standard tags, and the union of the two. 1 Evaluating the tagging accuracy against gold standard tags, we observe in Tables 1 and 2 (top section, rows labelled BiLSTM) that the use of the gold-standard (Annotated) data is considerably superior to training on the directly Projected data, despite the smaller amount of Annotated data, while using the union of the two datasets results in mild improvements in a few languages, but worsens performance for others.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "POS projection via word alignments", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "These sobering results raise the question of how we might use the bilingual resources in a more effective manner than direct projection. Clearly projections contain useful information, as the tagging accuracy is well above chance. However, they are riddled with noise and biases, which need to be accounted for to improve performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "POS projection via word alignments", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To address this problem, we propose a model that jointly models the clean annotated data and the projected data. For this we use a bidirectional LSTM tagger, as illustrated on the left in Figure 2, although other classifiers could be easily used in its place. The BiLSTM offers access to both the left and right lexical contexts around a given word (Graves et al., 2013) , which are likely be of considerable use in POS tagging where context of central importance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 349, |
|
"end": 370, |
|
"text": "(Graves et al., 2013)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 194, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Let x t indicate a word in a sentence and y t indicate its corresponding POS tag, and K denotes the size of the tagset. 2 The recurrent layer is designed to store contextual information, while the values in the hidden and output layers are computed as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 121, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2212 \u2192 h t = lstm( \u2212 \u2192 h t\u22121 , x t ) \u2190 \u2212 h t = lstm( \u2190 \u2212 h t+1 , x t ) o t = softmax(W \u2192 \u2212 \u2192 h t + W \u2190 \u2190 \u2212 h t + b) (1) y t \u223c Multinomial(o t ) .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "This supervised model is trained on annotated gold data in the standard manner using a cross-entropy objective with stochastic gradient descent through the use of gradient backpropagation. The projected data, however, needs to be treated differently to the annotated data: the tagging is often uncertain, as tokens may have been aligned to words with different parts of speech, or multiply aligned, or left as an unaligned word. These tags are not to be trusted in the same way as the gold annotated data. Our work accounts for bias explicitly in the training objective, by modelling the correspondence between the true tags and the errorful projected tags. The projected data consists of pairs, (x t ,\u1ef9), where\u1ef9 denotes the projected POS tag or tag distribution. In this setting, we assume that the true label, y t , is latent variable and both\u1ef9 and y are K-dimensional binary random variables: y t is a vector representation of a projected tag, and y t is a one-hot representation of a gold tag.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We augment the deep neural network model to include a bias transformation such that its prediction matches the distribution of the projected tags, as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "p(\u1ef8 t = j|x t , \u03b8, A) = softmax i a i,j o t,i , (2) where o t,i = p(Y t = i|x t , \u03b8)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "is the probability of tag i in position t according to (1). This equation is parameterized by a K \u00d7 K matrix A. 3 Each cell a i,j denotes the confusion score between classes i and j, with negative values quashing the correspondance, and positive values rewarding a pairing; in the situations where the projected tags closely match the supervised tagging, we expect that A \u221d I.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Joint modelling of the gold supervision and projected data gives rise to a training objective combining two cross-entropy terms,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "L(\u03b8, A) = \u2212 1 |T p | t\u2208T p \u1ef9 t , log softmax (Ao t ) \u2212 1 |T t | t\u2208T t y t , log o t ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where T p indexes all the token positions in the projected dataset, and T t does similarly for the annotated training set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We illustrate the combined model in Figure 2 , showing on the left the gold supervised model and on the right the distant supervised components. The distant model builds on the base part by feeding the output through a bias layer, which is finally used in a softmax to produce the biased output layer. The matrix A parameterizes the final layer, to adjust the tag probabilities from the supervised model into a distribution that better matches the projected POS tags. However, the ultimate goal is Figure 2 : Illustration of the model architecture, which uses a bidirectional LSTM recurrent network, with a tag classification output. The left part illustrates the supervised training scenario and test setting, where each word x is assigned a tag y; the right part shows the projection training setting, with a bias layer, where the supervision is either a projected label or label distribution (used for NULL aligned words).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 44, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 498, |
|
"end": 506, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "to predict the POS tag y t . Consider the training effect of the projected POS tags: when performing error backpropagation, the cross-entropy error signal must pass through the tag transformation linking\u00f5 with o, which can be seen as a debiasing step, after which the cleaned error signal can be further backpropagated to the rest of the model. Provided there are consistent patterns of errors in the projection output, this technique can readily model these sources of variation with a tiny handful of parameters, and thus greatly improve the utility of this form of distant supervision.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Directly training the whole deep neural network with random initialization is impractical, because without a good estimate for the A matrix, the errors from the projected tags may misdirect training result in a poor local optima. For this reason the training process contains two stages. In the first stage we use the clean annotated data to pretrain the network. In the second stage we jointly use both projected and annotated data to continue training the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BiLSTM with bias layer", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We evaluate our algorithm using two kinds of experimental setups, simulation experiments and real-world experiments. For the simulation experiments, we use the following eight European languages: Danish (da), Dutch (nl), German (de), Greek (el), Italian (it), Portuguese (pt), Spanish (es), Swedish (sv). These languages are obviously not low-resource languages, however we can use this data to simulate the low-resource setting by only using a small 1,000 tokens of the gold annotations for training. This evaluation technique is widely used in previous work, and allows us to compare our results with prior stateof-the-art algorithms. For the real-world experiments, we use the following two low-resource languages: Malagasy, an Austronesian language spoken in Madagascar, and Kinyarwanda, a Niger-Congo language spoken in Rwanda.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For the simulation experiments, we use the Europarl v7 corpus, with English as the source language and each of languages as the target language. There are an average of 1.85 million parallel sentences for each of the eight language pairs. For the real-world experiments, the parallel data is smaller and generally of a lower quality. For Malagasy, we use a web-sourced collection of parallel texts. 4 The parallel data of Malagasy has 100k sentences and 1,231k tokens. For Kinyarwanda, we obtained parallel texts from ARL MURI project. 5 Table 1 : The POS tagging accuracy for various models in eight languages: Danish (da), Dutch (nl), German (de), Greek (el), Italian (it), Portuguese (pt), Spanish (es), Swedish (sv). The top results of the second part are taken from Duong et al. (2014) , evaluated on the same data split.", |
|
"cite_spans": [ |
|
{ |
|
"start": 399, |
|
"end": 400, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 536, |
|
"end": 537, |
|
"text": "5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 771, |
|
"end": 790, |
|
"text": "Duong et al. (2014)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 538, |
|
"end": 545, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Parallel data", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "52k tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parallel data", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "We use GIZA++ to induce word alignments on the parallel data (Och and Ney, 2003), using IBM model 3 (Brown et al., 1993) . Following prior work (Duong et al., 2014) , we retain only one-toone alignments. Using all alignments (i.e., manyto-one and one-to-many), would result in many more POS-tagged tokens, but also bring considerable additional noise. For example, the English laws (NNS) aligned to French les (DT) lois (NNS) would end up incorrectly tagging the French determiner les as a noun (NNS). We use the Stanford POS tagger (Toutanova et al., 2003) to tag the English side of the parallel data and then project the labels to the target side. As we show in the following section, and confirmed in many studies , the directly projected labels have many errors and therefore it is unwise to use the tags directly. We further filter the corpus using the approach of Yarowsky and Ngai (2001) which selects sentences with the highest sentence alignment scores from IBM model 3. For the European languages, we retain 200k sentences for each language, while for the low-resource languages, we use all the parallel data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 69, |
|
"text": "(Och and", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 70, |
|
"end": 120, |
|
"text": "Ney, 2003), using IBM model 3 (Brown et al., 1993)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 144, |
|
"end": 164, |
|
"text": "(Duong et al., 2014)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 557, |
|
"text": "(Toutanova et al., 2003)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 871, |
|
"end": 895, |
|
"text": "Yarowsky and Ngai (2001)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "POS projection", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "Gold annotated data is expensive and difficult to obtain, and thus we assume that only a small annotated dataset is available. For the simulation experiments, annotated data is obtained from the CoNLL-X shared tasks (Buchholz and Marsi, 2006) . To simulate the low-resource setting, we take the first 1,000 tagged tokens for training and the remaining data is split equally between development and testing sets, following Duong et al. (2014) . For the real-world experiments, we use the Malagasy and Kinyarwanda data from Garrette and Baldridge 2013, who showed that a small annotated dataset could be collected very cheaply, requiring less than 2 hours of non-expert time to tag 1,000 tokens. This constitutes a reasonable demand for cheap portability to other lowresource languages. We use the datasets from , constituting annotated datasets of 383 sentences and 5,294 tokens in Malagasy and 196 sentences and 4,882 tokens for Kinyarwanda. We use 1,000 tokens as training set and the rest is used for testing for each language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 216, |
|
"end": 242, |
|
"text": "(Buchholz and Marsi, 2006)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 441, |
|
"text": "Duong et al. (2014)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotated data", |
|
"sec_num": "4.1.3" |
|
}, |
|
{ |
|
"text": "We compare our algorithm with several baselines, including the state-of-the-art algorithm from Duong et al. (2014) , a two-output maxent model, their reported baseline method of a supervised maximum entropy model trained on the annotated data, and our BiLSTM POS tagger trained directly from the annotated and/or projected data (denoted BiLSTM Annotated, Projected and Ann+Proj for the model trained on the union of the two datasets). For the real low-resource languages, we also compare our algorithm with , who reported good results on the two low-resource languages. Our implementation is based on the cnn toolkit. 6 In all cases, the BiLSTM models use 128 dimensional word embeddings and 128 dimensional hidden layers. We set the learning rate to 1.0 and use stochastic gradient descent model to learn the parameters. We evaluate all algorithms on the gold testing sets, evaluating in terms of tagging accuracy. Following standard practice in POS tagging, we report results using per-token accuracy (i.e., the fraction of predicted tags that exactly match the gold standard tags). Note that for all our experiments, Figure 3: Bias transformation matrix A between POS tags and projection outputs, shown respectively as columns and rows for the eight languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 114, |
|
"text": "Duong et al. (2014)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 618, |
|
"end": 619, |
|
"text": "6", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup and baselines", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "we work with the universal POS tags and accordingly accuracy is measured against the gold tags after automatic mapping into the universal tagset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup and baselines", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "First, we present the results for the 8 simulation languages in Table 1 . For most of the languages our method performs better than that of Duong et al. (2014) and the three naive BiLSTM baselines. Directly training on projected data hurts the performance, which can be seen by comparing BiLSTM Projected and BiLSTM Ann+Proj. BiL-STM Annotated mostly outperforms MaxEnt Supervised, but both methods are worse than Duong et al. and our BiLSTM Debias, which both use the projected data more effectively. The results show the debiasing layer makes more effective use of the projected data, improving the POS tagging accuracy. We show the learned bias transformation matrices for the different languages in Figure 3 . The blue (dark) cells in the grids denote values that are most highly weighted. Note the strong diagonal, showing that the tags are mostly trusted, although there is also evidence of significant mass in offdiagonal entries. The worst case is in Greek (el) with many weak values on the diagonal. In this case, PRON and X appear to be confused for one another. The light cells are also important, show- 86.7 82.6 81.2 81.9 ing tag combinations that the model learns to ignore, such as CONJ vs DET in Spanish (es) and PRON vs ADP in Swedish (sv). The tokens that are CONJ in Spanish (es) are seldom projected as DET. Overall, for most of languages the level of debiasing is modest, which might not come as a surprise given the large, clean parallel corpus for learning word alignments. Now we present results for the two low-resource languages, Malagasy and Kinyarwanda, which Figure 4: Bias transformation matrix A between POS tags and projected outputs, shown respectively as columns and rows for the two low-resource languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 159, |
|
"text": "Duong et al. (2014)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 71, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 703, |
|
"end": 711, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "both have much smaller parallel corpora. The results in Table 2 show that our method works better than all others in both languages, with a similar pattern of results as for the European languages. We also used the original Penn treebank tagset for both two languages. The results of BiLSTM Debias (Penn) show a small improvement, presumably due to the information loss in the mapping to the universal tagset. Note that our method outperforms the state of the art on both languages (Duong et al., 2014; . To better understand the effect of the bias layer, we present the learned transformation matrices A in Figure 4 . Note the strong diagonal for Malagasy in Figure 4 , showing that each tag is most likely to map to itself, however there are also many high magnitude off-diagonal elements. For instance nouns map to not just nouns, but also adjectives and numbers, but never pronouns (which are presumably well aligned). Comparing results of Malagasy and Kinyarwanda in Figure 4 , we can see the divergence between the gold and projected tags is much greater in Kinyarwanda. This tallies with the performance results, in which we get stronger results and a greater improvement on Malagasy from using projection data where we had more parallel data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 482, |
|
"end": 502, |
|
"text": "(Duong et al., 2014;", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 63, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 608, |
|
"end": 616, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 660, |
|
"end": 668, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 972, |
|
"end": 980, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In this paper we presented a technique for exploiting errorful cross-lingual projected annotations alongside a small amount of annotated data in the context of POS tagging. Projection on its own is unreliable and simple combination with gold is not sufficient to improve accuracy, even with only a tiny handful of gold annotations. To utilize both sources of data, we proposed a new model based on a bidirectional long short-term memory recurrent neural network, with a layer for explicitly handling projection labels. Over eight European and two real low-resource languages, our methods outperform other algorithms. Our technique is general, and is likely to prove useful for exploiting other noisy and biased annotations such as distant supervision and crowd-sourced annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "See \u00a73.2 for the model details, and \u00a74.1 for a description of the datasets and evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use the universal tagset from, enabling easier comparison with related work, although this is not a requirement of our work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our approach also supports mismatching tagsets, in which case A would be rectangular with dimensions based on the sizes of the two tag sets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.cs.cmu.edu/\u02dcark/ global-voices 5 The dataset was provided directly by Noah Smith.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/clab/cnn", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Michele Banko and Robert C Moore. 2004. Part of speech tagging in context. In Proceedings of COL-ING, page 556.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The mathematics of statistical machine translation: Parameter estimation", |
|
"authors": [ |
|
{ |
|
"first": "Vincent J Della", |
|
"middle": [], |
|
"last": "Peter F Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen A Della", |
|
"middle": [], |
|
"last": "Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert L", |
|
"middle": [], |
|
"last": "Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mercer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Computational linguistics", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "263--311", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter F Brown, Vincent J Della Pietra, Stephen A Della Pietra, and Robert L Mercer. 1993. The mathemat- ics of statistical machine translation: Parameter esti- mation. Computational linguistics, 19(2):263-311.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Conll-x shared task on multilingual dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Buchholz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erwin", |
|
"middle": [], |
|
"last": "Marsi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--164", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sabine Buchholz and Erwin Marsi. 2006. Conll-x shared task on multilingual dependency parsing. In Proceedings of CoNLL, pages 149-164.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Unsupervised part-of-speech tagging with bilingual graph-based projections", |
|
"authors": [ |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "600--609", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dipanjan Das and Slav Petrov. 2011. Unsupervised part-of-speech tagging with bilingual graph-based projections. In Proceedings of ACL, pages 600-609.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "What can we get from 1000 tokens? a case study of multilingual pos tagging for resource-poor languages", |
|
"authors": [ |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Duong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karin", |
|
"middle": [], |
|
"last": "Verspoor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Cook", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Long Duong, Trevor Cohn, Karin Verspoor, Steven Bird, and Paul Cook. 2014. What can we get from 1000 tokens? a case study of multilingual pos tag- ging for resource-poor languages. In Proceedings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Learning a part-of-speech tagger from two hours of annotation", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Garrette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "138--147", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Garrette and Jason Baldridge. 2013. Learning a part-of-speech tagger from two hours of annotation. In Proceedings of NAACL, pages 138-147.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Real-world semi-supervised learning of postaggers for low-resource languages", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Garrette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Mielens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Baldridge", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "583--592", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Garrette, Jason Mielens, and Jason Baldridge. 2013. Real-world semi-supervised learning of pos- taggers for low-resource languages. In Proceedings of ACL, pages 583-592.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "EM can find pretty good HMM POS-taggers (when given a good start)", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meni", |
|
"middle": [], |
|
"last": "Adler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Elhadad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "746--754", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Goldberg, Meni Adler, and Michael Elhadad. 2008. EM can find pretty good HMM POS-taggers (when given a good start). In Proceedings of ACL, pages 746-754.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Speech recognition with deep recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Graves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Abdel-Rahman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of ICASSP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6645--6649", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Graves, Abdel-rahman Mohamed, and Geoffrey Hinton. 2013. Speech recognition with deep recur- rent neural networks. In Proceedings of ICASSP, pages 6645-6649.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Cross-lingual dependency parsing based on distributed representations", |
|
"authors": [ |
|
{ |
|
"first": "Jiang", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanxiang", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of ACL-CoNLL", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1234--1244", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiang Guo, Wanxiang Che, David Yarowsky, Haifeng Wang, and Ting Liu. 2015. Cross-lingual depen- dency parsing based on distributed representations. In Proceedings of ACL-CoNLL, volume 1, pages 1234-1244.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A resource-light approach to russian morphology: Tagging russian using czech resources", |
|
"authors": [ |
|
{ |
|
"first": "Jiri", |
|
"middle": [], |
|
"last": "Hana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Feldman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brew", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "222--229", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiri Hana, Anna Feldman, and Chris Brew. 2004. A resource-light approach to russian morphology: Tagging russian using czech resources. In Proceed- ings of EMNLP, pages 222-229.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of ICML", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "282--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Lafferty, Andrew McCallum, and Fernando Pereira. 2001. Conditional random fields: Prob- abilistic models for segmenting and labeling se- quence data. In Proceedings of ICML, volume 1, pages 282-289.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Wiki-ly supervised part-of-speech tagging", |
|
"authors": [ |
|
{ |
|
"first": "Shen", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joao", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Gra\u00e7a", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Taskar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1389--1398", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shen Li, Joao V Gra\u00e7a, and Ben Taskar. 2012. Wiki-ly supervised part-of-speech tagging. In Proceedings of EMNLP-CoNLL, pages 1389-1398.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Maximum entropy markov models for information extraction and segmentation", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dayne", |
|
"middle": [], |
|
"last": "Freitag", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando Cn", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of ICML", |
|
"volume": "17", |
|
"issue": "", |
|
"pages": "591--598", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew McCallum, Dayne Freitag, and Fernando CN Pereira. 2000. Maximum entropy markov mod- els for information extraction and segmentation. In Proceedings of ICML, volume 17, pages 591-598.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Multi-source transfer of delexicalized dependency parsers", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keith", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "62--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan McDonald, Slav Petrov, and Keith Hall. 2011. Multi-source transfer of delexicalized dependency parsers. In Proceedings of EMNLP, pages 62-72.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Universal dependency annotation for multilingual parsing", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Ryan T Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yvonne", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Quirmbach-Brundage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuzman", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ganchev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Keith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "92--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan T McDonald, Joakim Nivre, Yvonne Quirmbach- Brundage, Yoav Goldberg, Dipanjan Das, Kuzman Ganchev, Keith B Hall, Slav Petrov, Hao Zhang, Os- car T\u00e4ckstr\u00f6m, et al. 2013. Universal dependency annotation for multilingual parsing. In Proceedings of ACL, pages 92-97.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Distant supervision for relation extraction without labeled data", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Mintz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bills", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of ACL-IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1003--1011", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Mintz, Steven Bills, Rion Snow, and Dan Juraf- sky. 2009. Distant supervision for relation extrac- tion without labeled data. In Proceedings of ACL- IJCNLP, pages 1003-1011.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A systematic comparison of various statistical alignment models", |
|
"authors": [ |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Franz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Computational linguistics", |
|
"volume": "29", |
|
"issue": "1", |
|
"pages": "19--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franz Josef Och and Hermann Ney. 2003. A sys- tematic comparison of various statistical alignment models. Computational linguistics, 29(1):19-51.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A universal part-of-speech tagset", |
|
"authors": [ |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Slav Petrov, Dipanjan Das, and Ryan McDonald. 2011. A universal part-of-speech tagset. In Proceedings of ACL.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Adapting taggers to twitter with not-so-distant supervision", |
|
"authors": [ |
|
{ |
|
"first": "Barbara", |
|
"middle": [], |
|
"last": "Plank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ryan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1783--1792", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Barbara Plank, Dirk Hovy, Ryan T McDonald, and An- ders S\u00f8gaard. 2014. Adapting taggers to twitter with not-so-distant supervision. In Proceedings of COLING, pages 1783-1792.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Modeling missing data in distant supervision for information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "367--378", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alan Ritter, Luke Zettlemoyer, Oren Etzioni, et al. 2013. Modeling missing data in distant supervision for information extraction. Transactions of the As- sociation for Computational Linguistics, 1:367-378.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Token and type constraints for cross-lingual part-of-speech tagging", |
|
"authors": [ |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oscar T\u00e4ckstr\u00f6m, Dipanjan Das, Slav Petrov, Ryan McDonald, and Joakim Nivre. 2013. Token and type constraints for cross-lingual part-of-speech tag- ging. Transactions of the Association for Computa- tional Linguistics, 1:1-12.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Reducing wrong labels in distant supervision for relation extraction", |
|
"authors": [ |
|
{ |
|
"first": "Shingo", |
|
"middle": [], |
|
"last": "Takamatsu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Issei", |
|
"middle": [], |
|
"last": "Sato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroshi", |
|
"middle": [], |
|
"last": "Nakagawa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "721--729", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shingo Takamatsu, Issei Sato, and Hiroshi Nakagawa. 2012. Reducing wrong labels in distant supervi- sion for relation extraction. In Proceedings of ACL, pages 721-729.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Feature-rich part-ofspeech tagging with a cyclic dependency network", |
|
"authors": [ |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoram", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "173--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kristina Toutanova, Dan Klein, Christopher D Man- ning, and Yoram Singer. 2003. Feature-rich part-of- speech tagging with a cyclic dependency network. In Proceedings of NAACL, pages 173-180.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Cross-lingual pseudo-projected expectation regularization for weakly supervised learning", |
|
"authors": [ |
|
{ |
|
"first": "Mengqiu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Transactions of the Association of Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mengqiu Wang and Christopher D Manning. 2014. Cross-lingual pseudo-projected expectation regular- ization for weakly supervised learning. Transac- tions of the Association of Computational Linguis- tics, 2.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Inducing multilingual POS taggers and NP brackets via robust projection across aligned corpora", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Yarowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grace", |
|
"middle": [], |
|
"last": "Ngai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Yarowsky and Grace Ngai. 2001. Inducing mul- tilingual POS taggers and NP brackets via robust projection across aligned corpora. In Proceedings of NAACL.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null, |
|
"text": "Figure 1. A word in Malagasy is connected to a fanomezan-kevitra ny vaovao (NULL) inspiring a new generation . An example of POS projection via word alignments. * indicates unknown POS tag, which we treat as having a tag distribution over all tokens in the source sentence (in the example, a uniform mix of VERB, DET, ADJ, NOUN and '.')." |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"type_str": "table", |
|
"text": "The POS tagging accuracy for various models in Malagasy and Kinyarwanda. The top results of the second part are taken fromDuong et al. (2014), evaluated on the same data split. Penn indicates the Penn treebank tagset. The proposed BiLSTM Debias can use different tagsets for the source language.", |
|
"content": "<table/>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |