|
{ |
|
"paper_id": "2017", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:00:53.202597Z" |
|
}, |
|
"title": "Using Deep Neural Networks to Learn Syntactic Agreement", |
|
"authors": [ |
|
{ |
|
"first": "Jean-Phillipe", |
|
"middle": [], |
|
"last": "Bernardy", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "LAPPIN", |
|
"institution": "Mary University of London", |
|
"location": { |
|
"region": "Queen" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We consider the extent to which different deep neural network (DNN) configurations can learn syntactic relations, by taking up Linzen et al.'s (2016) work on subject-verb agreement with LSTM RNNs. We test their methods on a much larger corpus than they used (a \u21e024 million example part of the WaCky corpus, instead of their \u21e01.35 million example corpus, both drawn from Wikipedia). We experiment with several different DNN architectures (LSTM RNNs, GRUs, and CNNs), and alternative parameter settings for these systems (vocabulary size, training to test ratio, number of layers, memory size, drop out rate, and lexical embedding dimension size). We also try out our own unsupervised DNN language model. Our results are broadly compatible with those that Linzen et al. report. However, we discovered some interesting, and in some cases, surprising features of DNNs and language models in their performance of the agreement learning task. In particular, we found that DNNs require large vocabularies to form substantive lexical embeddings in order to learn structural patterns. This finding has interesting consequences for our understanding of the way in which DNNs represent syntactic information. It suggests that DNNs learn syntactic patterns more efficiently through rich lexical embeddings, with semantic as well as syntactic cues, than from training on lexically impoverished strings that highlight structural patterns.", |
|
"pdf_parse": { |
|
"paper_id": "2017", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We consider the extent to which different deep neural network (DNN) configurations can learn syntactic relations, by taking up Linzen et al.'s (2016) work on subject-verb agreement with LSTM RNNs. We test their methods on a much larger corpus than they used (a \u21e024 million example part of the WaCky corpus, instead of their \u21e01.35 million example corpus, both drawn from Wikipedia). We experiment with several different DNN architectures (LSTM RNNs, GRUs, and CNNs), and alternative parameter settings for these systems (vocabulary size, training to test ratio, number of layers, memory size, drop out rate, and lexical embedding dimension size). We also try out our own unsupervised DNN language model. Our results are broadly compatible with those that Linzen et al. report. However, we discovered some interesting, and in some cases, surprising features of DNNs and language models in their performance of the agreement learning task. In particular, we found that DNNs require large vocabularies to form substantive lexical embeddings in order to learn structural patterns. This finding has interesting consequences for our understanding of the way in which DNNs represent syntactic information. It suggests that DNNs learn syntactic patterns more efficiently through rich lexical embeddings, with semantic as well as syntactic cues, than from training on lexically impoverished strings that highlight structural patterns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The extent to which machine learning systems in general, and neural networks in particular, can learn regularities that depend on syntactic structure is a topic of ongoing debate. 1 Subject-verb number agreement is a paradigmatic case of a syntactic feature that depends on structural properties of a sentence. Linzen et al. (2016) train a Long Short Term Memory Recurrent Neural Network (LSTM RNN) on a subset of a Wikipedia corpus to predict the number of a verb. As they observe, the task increases in difficulty in relation to the length of the sequence of NPs with the wrong number feature that occur between a subject and the verb that it controls. They refer to such intervening NPs as attractors. 2 In (1) the attractors for the The students-submit pair are indicated in boldface.", |
|
"cite_spans": [ |
|
{ |
|
"start": 311, |
|
"end": 331, |
|
"text": "Linzen et al. (2016)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(1a) The students submit a final project to complete the course.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "(b) The students enrolled in the program submit a final project to complete the course. (c) The students enrolled in the program in the Department submit a final project to complete the course. (d) The students enrolled in the program in the Department where my colleague teaches submit a final project to complete the course. Linzen et. al use a dependency parser to identify the controlling subject of each verb in their corpus of examples. This identification is necessary to compute the number of attractors, but it is not used in the training for the number prediction task, because the number of the verb is morphologically manifest in the raw data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "They train their LSTM RNN on \u21e0121,500 examples (9% of the total corpus) by supervised learning, in which the system is shown the correct number feature of the verb. They test the number predictions of their RNN on \u21e01.21 million examples (90% of their corpus). They encode the input words as vectors in 50 dimensions, and their RNN has 50 hidden units. They report that their system achieves 99% accuracy in the number prediction task for cases with 0 attractors between the subject and its verb, and declines to 83% accuracy for examples with 4 attractors. They do not report scores for examples with more than 4 attractors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In addition to the supervised number prediction task, they train a generative language model (predicting the next word) and use it to predict the number of the verb in an unsupervised manner. By contrast to their supervised LSTM RNN, Linzen et al.'s language model goes below chance in its predictions for 4 attractor cases. While the much larger Google LM (J\u00f3zefowicz et al., 2016) does better, at a \u21e045% error for 4 attractors, it also performs well below their supervised RNN. They conclude that a considerable amount of syntactic structure is accessible to DNN learning if training is properly supervised.", |
|
"cite_spans": [ |
|
{ |
|
"start": 357, |
|
"end": 382, |
|
"text": "(J\u00f3zefowicz et al., 2016)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our main objective in the work that we report here is to explore the capabilities, and the limitations of DNNs for learning complex syntactic relations which depend on structural properties of sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We used methods similar to Linzen et al.'s to test several DNN models on a much larger corpus. We experimented with different DNN architectures, and with alternative values for the following parameters:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": ". Ratio of training to testing as a partition of the corpus . Number of hidden units (memory size) . Vocabulary size . Number of layers . Dropout rate . Lexical embedding dimension size", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In addition we applied our own language model to the number prediction task, testing two distinct methods of predicting verb number from the model's probability distribution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our results are broadly compatible with those that Linzen et al. present. However, some of the correlations that we observed between certain parameter settings and levels of performance surprised us.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Specifically, by working with different vocabulary sizes, lexical encodings, and embedding sizes we discovered that our supervised DNN models learn agreement patterns more effectively from rich word embeddings than from abstract syntactically annotated input. We also found that, in general, our models required larger amounts of training relative to testing than Linzen et al. describe for their system, in order to reach the performance that they report. 3 We have observed that increasing the values of hyperparameters (and thereby the number of degrees of freedom in the network itself) generally improves accuracy, even if after a certain point, overfitting is observed. But changes to any individual hyperparameter do not create dramatic effects. However, the total effect of hypeparameter optimisation is quite significant.", |
|
"cite_spans": [ |
|
{ |
|
"start": 457, |
|
"end": 458, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Finally, we were able to construct a language model with significantly better (unsupervised) prediction. Yet, our model is much smaller than the Google LM. All of our supervised DNNs outperformed our language model on the number prediction task. 4 1 Experimental Design 1.1 Corpus For our experiments, we used the WaCkypedia English corpus (Baroni et al., 2009) , which contains \u21e024 million example cases of present tense subjectverb agreement. 5 The corpus is annotated for POS by TreeTagger (Schmid, 1995) , and for dependency relations by the MaltParser (Nivre et al., 2007) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 247, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 340, |
|
"end": 361, |
|
"text": "(Baroni et al., 2009)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 493, |
|
"end": 507, |
|
"text": "(Schmid, 1995)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 557, |
|
"end": 577, |
|
"text": "(Nivre et al., 2007)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Linzen et al. restricted training and testing to one agreement case per sentence in their corpus. We used the full set of number agreement relations in the sentences of our corpus for our experimental work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "They also limit their test, but not their training examples to cases in which all NPs intervening between the subject and the verb that it controls are attractors. We did not adopt this constraint on our test sets of examples. We included the cases in which agreeing, as well as non-agreeing NPs intervened between the subject and its verb. We are interested in measuring the accuracy with which a DNN predicts verb number in complex and possibly confusing syntactic sequences. Training and testing less filtered data of the kind that involves discarding the non-agreement constraint that Linzen et al. apply to their test set provides a realistic indication of the success with which different types of DNN learn number agreement patterns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Therefore, our test procedures depart slightly from those of Linzen et al. in relaxing both the single agreement case and the (non-) agreement conditions on the test set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We experiment with three types of DNN. We use a version of Linzen et al.'s LSTM RNN, a Convolutionnal Neural Network (CNN) similar to that presented in Kalchbrenner et al. (2016) , and a Gated Recurrent Unit (GRU) network of the kind described in Cho et al. (2014) We implemented our own versions of an LSTM RNN, GRU RNN, and CNN, using the Keras library (Chollet, 2015) with a TensorFlow backend. Our LSTM RNN and GRU RNN have standard architectures. We apply a dense layer with sigmoid activation to the output of the latest RNN cell to obtain the verb-number classifier.", |
|
"cite_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 178, |
|
"text": "Kalchbrenner et al. (2016)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 264, |
|
"text": "Cho et al. (2014)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 355, |
|
"end": 370, |
|
"text": "(Chollet, 2015)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models Models Trained on the Inflection Task", |
|
"sec_num": "1.2" |
|
}, |
|
{ |
|
"text": ". 6 class R 5 R 10 R 15 R 20 R 50 {0, 1}", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models Models Trained on the Inflection Task", |
|
"sec_num": "1.2" |
|
}, |
|
{ |
|
"text": "Our CNN has 6 levels with filtering successively compressing vector dimensions from 15 through 20, 15, 10 to 5. Convolution at these levels yields 7, 5, 5, and 3 features, respectively. Every convolution layer uses a ReLU activation function. The last layer is a dense layer with sigmoid activation. We represent this CNN graphically in fig.1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 337, |
|
"end": 342, |
|
"text": "fig.1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Models Models Trained on the Inflection Task", |
|
"sec_num": "1.2" |
|
}, |
|
{ |
|
"text": "In addition to models which predict verb numbers, we have trained a generative language model. For this purpose we use an LSTM RNN with two layers, 1200 units per layer, and a dropout rate of 0.5 to generate a language model from the WaCky corpus of sentences. 7 We did this by employing the 100 most common words and substituting corresponding POS tags for the others in the sentences. This design is intended both to improve the performance of the language model by restricting the number of possible outputs. It is targetted to focus on syntactic patterns that we seek to recognise by removing most of the semantic (lexical) features in the corpus. We have performed experiments with 100, 10k and 100k common words on the supervised number prediction task, to control for the role of the lexicon in supervised learning.", |
|
"cite_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 262, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A Generative Language Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For our supervised learning experiments we followed Linzen et al. in training and testing procedures. We trained each DNN on a portion of our agreement example corpus using customary back propagation and gradient descent, with the Adam (Kingma and Ba, 2014) optimizer. The training set was subdivided into a proper training and validation set. After each run through the proper training set, the average loss was computed for the validation set. We ended training when the validation loss function reached a local minimum value.", |
|
"cite_spans": [ |
|
{ |
|
"start": 236, |
|
"end": 257, |
|
"text": "(Kingma and Ba, 2014)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "1.3" |
|
}, |
|
{ |
|
"text": "Contrary to Linzen et al. we applied a relatively large batch size (1024 instead of 16). We tested the resulting model on the remainder of the corpus by having it predict the number feature of the verb in each example of the test set. As stated above, the dependency parse structures were not used in the training. They were applied only to determine the number of attractors between the subject and the verb that it controls, as well as all other intervening nouns in this sequence. The part-of-speech tagging was employed both to determine the correct number of the verb and to limit the vocabulary in a syntactically meaningful way, as explained below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "1.3" |
|
}, |
|
{ |
|
"text": "We first identified a benchmark of reasonable performance for our DNN configuration and training. We settled on an LSTM with one layer of 150 units and no dropout, a data-set constructed with 10000 words, lexical embeddings of dimension 50, and a training regimen of 90% of the corpus. We then conducted experiments varying each of these parameters independently, holding the others constant. We ran each DNN with training on 10%, 50%, and 90% of the corpus, testing on the remainder for each split. We tested 50, 150, 450 and 1350 units for the LSTM layers. We experimented with embedding vocabulary sizes of 100, 10k and 100k words, substituting corresponding POS tags for the rest. We used 1, 2 and 4 layers for our LSTM RNN. We tested dropout rates of 0, 0.1, 0.2 and 0.5. These dropouts applied to the weights within the LSTM layers, but not at the final dense layer. Finally we tested lexical embedding dimension sizes of 17, 50, 150, and 450.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "1.3" |
|
}, |
|
{ |
|
"text": "Our working hypothesis for the reduced vocabulary experiment was that a DNN would learn the target syntactic dependency pattern more efficiently if it was exposed to input consisting largely of POS sequences in which number features are marked on noun and verb tags. We thought that such input would highlight the dependency relations more clearly by abstracting away from possibly confounding distributional lexical information contained in richer embeddings. Specifically, we conjectured that if impoverished lexical sequences are used to exhibit a structural relation, with the relevant number feature spotlighted, it would be easier for a DNN to discern the relevant agreement pattern. Adding rich lexical content that includes semantic cues might conceal this pattern by adding irrelevant distributional information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "1.3" |
|
}, |
|
{ |
|
"text": "Our language model provides us with an unsupervised system for predicting agreement. We tested two ways for doing this. Let p(w i |w i 1 , ..., w i k ) be the predicted probability of a word w in a string, given the prefix of w i 1 , ..., w i k of preceding words in that string.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Predictions Using a Generative Language Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For the first way of predicting verb agreement we determine for each example in our test set if (1.1) holds, when V n ranges over verbs with the correct number feature (n) in a particular string (i.e. the number agreeing with that of the verb's controlling subject), and V \u00acn ranges over verbs with the wrong number feature in that string. X", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Predictions Using a Generative Language Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "V n p(V n i |w i 1 , ..., w i k ) > X V \u00acn p(V \u00acn i |w i 1 , ..., w i k ) (1.1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Predictions Using a Generative Language Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The second way of predicting verb agreement is to test if (1.2) applies. In this case we check wether the model gives a higher probability for occurrence of the inflected V erb i which is found in the test set versus the same verb with the opposite inflection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Predictions Using a Generative Language Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "p(V erb n i |w i 1 , ..., w i k ) > p(V erb \u00acn i |w i 1 , ..., w i k ) (1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Predictions Using a Generative Language Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "2) The prefix w i 1 , ..., w i k is identical on both sides of the inequality in each case.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Predictions Using a Generative Language Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The first method measures the summed conditional probabilities of all correctly number inflected verbs occurring after the prefix in a string against the summed predicted probabilities of all incorrectly number inflected verbs appearing in this position. We will refer to this as the summing method of predicting verb number from a LM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Predictions Using a Generative Language Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The second procedure compares only the predicted probability of the correctly number-marked form of the actual verb in this position with that of its incorrectly marked form. We will describe it as the verb targeted method of predicting verb number from a LM.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Predictions Using a Generative Language Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the summing method the LM is not given any semantic cue, whereas in the verb targeted method method it is given the cue of which verb is expected. Yet in our 100-word vocabulary there are only 4 inflectible verb forms: \"to be\", \"to have\", \"to state\", and other verbs lumped together in the \"VV\" part of speech code, so this semantic information is relatively limited.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Predictions Using a Generative Language Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We present our results in figures 2 to 6. In all the figures, except for fig. 6 , the y-axis gives the accuracy rate, and the x-axis the number of NP attractors. We found that 73% of the verbs in our test sets are singular. This provides a majority choice baseline, which is indicated by a straight horizontal line in these figures. Figures 2a and 2b shows that using a reduced vocabulary of 100 most common words, substituting POS tags for the remainder, consistently reduces accuracy across DNN architectures, for the supervised learning task. Increasing the vocabulary to 100k words generally yields a significant improvement in performance for the LSTM RNN, but gives mixed results for our CNN. Fig. 3a indicates that increasing the ratio of training to testing examples from 10% to 50% significantly improves the performance of the LSTM RNN (with 150 units and a vocabulary of 10,000 word embeddings). Further increasing it to 90% does not make much of a difference, even degrading accuracy slightly at 6 attractors. Fig. 3b reveals that the LSTM and GRU RNNs perform at a comparable level, and both achieve significantly better accuracy than our CNN. Fig. 3c suggests that increasing the number of units in an LSTM RNN from 50 to 150 improves its performance on the task, while further expanding this number to 450 yields greater accuracy in relation to the number to attractors. Each three-fold increase in the number of units achieves a similar improvement in percentage points for a higher number of attractors. A further increase to 1350 provides only a small overall improvement. Fig. 3d indicates that increasing the number of layers for an LSTM RNN from 1 150-unit layer to 2 such layers marginally improves its performance. A further increase to 4 150-unit layers makes no clear difference. Fig. 3e shows that by introducing a dropout rate of 0.1 for the LSTM RNN (1 layer with 150 units) we improve its performance slightly. An increase to 0.2 provides no clear benefit. Furthuer increasing the dropout rate to 0.5 degrades performance. Fig. 3f indicates that decreasing lexical embedding dimensions from our benchmark 50 to 17 decreases the performance of our LSTM RNN. Increasing dimension size to 150 improves performance, while further expanding it to 450 contributes no benefit, and, for some cases, reduces accuracy. using the summing method (inequality (1.1)) yields results comparable to Linzen et al.'s LM, the verb targeted method (inequality (1.2)) achieves a far higher level of accuracy than their model, and than the results that they give for the much larger Google LM (J\u00f3zefowicz et al., 2016) . We note that Linzen et al. report using the verb targeted method for their results. However the accuracy of our LM with the verb targeted method is still below our best supervised results for the LSTM RNN. Its performance with the verb targeted method is roughly comparable to that of our CNN. Fig. 4 shows the performance of a DNN configured with the best observed parameter values, which are model = LSTM RNN, layers = 2, number of units = 1350, dropout rate = 0.1, vocabulary size = 100k, training = 90%, of the corpus, and lexical embedding size = 150 dimensions. This DNN provides the highest level of accuracy of all the systems that we tested. Finally fig. 6 displays the inverse relation between the number of examples and the number of attractor NPs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2599, |
|
"end": 2624, |
|
"text": "(J\u00f3zefowicz et al., 2016)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 79, |
|
"text": "fig. 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 350, |
|
"text": "Figures 2a and 2b", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 699, |
|
"end": 706, |
|
"text": "Fig. 3a", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1022, |
|
"end": 1029, |
|
"text": "Fig. 3b", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1157, |
|
"end": 1164, |
|
"text": "Fig. 3c", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1591, |
|
"end": 1598, |
|
"text": "Fig. 3d", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1805, |
|
"end": 1812, |
|
"text": "Fig. 3e", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2052, |
|
"end": 2059, |
|
"text": "Fig. 3f", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2921, |
|
"end": 2927, |
|
"text": "Fig. 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 3286, |
|
"end": 3292, |
|
"text": "fig. 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our results support Linzen et al.'s finding that RNNs, both of the LSTM and GRU variety, are well suited to the task of identifying long distance syntactic dependencies, even when an extended, complex sequence of expressions that could cause confusion intervenes between a controller of a syntactic feature, and the lexical item on which it is realised. However, their success in learning subject-verb agreement scales with the size of the data set on which they train.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Given that RNNs rely on sequential composition of layers of neural units, it is not clear how much hierarchical structure they can identify in their recog- nition of these dependencies. It is also worth noting that we used fairly simple RNNs. It could be that an RNN with a more structured memory that incorporates the equivalent of a stack for encoding the beginning of a dependency and a pop mechanism for releasing it later in a sequence (Grefenstette et al., 2015) would yield even better results. CNNs have been highly successful in image recognition, which involves constructing levels of successfully more abstract visual feature patterns. Therefore it is surprising that our CNN did not perform particularly well on a task that would, at first glance, appear to require learning hierarchical phrase structure.", |
|
"cite_spans": [ |
|
{ |
|
"start": 441, |
|
"end": 468, |
|
"text": "(Grefenstette et al., 2015)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "It is important to recall that we employed a fairly simple, static CNN model. One with a dynamic memory might yield better performance for this task. In general, there is considerable room for exploring alternative architectures before drawing strong conclusions on the capabilities of the entire class of DNNs for learning syntactic relations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One of the most striking results to emerge from our experiments is that training DNNs on data that is lexically impoverished, but highlights the syntactic elements between which a relation is to be acquired does not, at least in the current case, facilitate learning. On the contrary, it degrades it. DNNs learn better from data populated by richer lexical sequences. This suggests that DNNs are not efficient at picking up abstract syntactic patterns when they are explicitly marked in the data. Instead they extract them incrementally from lexical embeddings through recognition of their distributional regularities. It is also possible that they use the lexical semantic cues that larger vocabularies introduce to determine agreement preferences for a verb.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "It is an open question as to how DNN modes of learning resemble and diverge from human learning. We are making no cognitive claims here. However, it is interesting to note that some recent work in neurolinguistics indicates that syntactic knowledge is distributed through different language centres in the brain, and closely integrated with lexical-semantic representations (Blank et al., 2016) . This lexically encoded and distributed way of representing syntactic information is consistent with the role of rich lexical embeddings in DNN syntactic learning. Finally our results show that a language model can achieve not entirely unreasonable results on the number agreement prediction task, if an appropriate method is applied for comparing the conditional probabilities of alternative number markings on verbs. Our LM is trained on only 100 words, with POS tags substituted for the others in order to highlight the agreement dependency relations that we are seeking to model. This strategy did not improve performance for supervised learning, but it does appear to have been successful for unsupervised learning with a language model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 374, |
|
"end": 394, |
|
"text": "(Blank et al., 2016)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our experimental results strengthen Linzen et al.'s conclusion that DNNs are able to learn long distance syntactic relations to a fairly high degree of accuracy, across extended complex sequences of potentially distracting phrases. We also found that accuracy in the supervised version of this task scales with the amount of training data used, and with the size of the lexical embedding vocabulary.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Performance improves with an increase in the number of hidden units. This effect may be even more pronounced when tracking more complex syntactic relations with multiple features. This is a question that we will explore in future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We also found that it is possible to obtain reasonable results with unsupervised learning through a comparatively small language model, when we use a targeted procedure for predicting verb number. The performance of this model, with this procedure, significantly exceeds that of the two models that Linzen et al. present. In furture work we will experiment with alternative DNN architectures. We are particularly interested in incorporating a structured memory that simulates a stack and pop mechanism for handling long distance dependencies.", |
|
"cite_spans": [ |
|
{ |
|
"start": 299, |
|
"end": 321, |
|
"text": "Linzen et al. present.", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We will also test larger language models trained on bigger vocabularies, using our targeted prediction procedure, to see if our conjecture that a smaller vocabulary produces better probabilistic predictions. This proposal did not hold for the supervised learning case, but we do not know if it will be sustained for unsupervised learning with a language model. One of our main concerns will be to explore syntactic dependencies involv-ing several agreement features. In languages in which gender, and person, as well as number are morphologically realised on verbs the agreement prediction task is more difficult. It requires accuracy across three feature dimensions rather than one. Testing DNNs on agreement in such languages will provide a better sense of their capacity to learn and represent syntactic information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "See Lau et al. (2016) for recent discussion and references. 2 It would actually be more perspicuous to describe these NPs as distractors, given that they are marked for the non-agreeing number. But to avoid confusion, we retain Linzen et al.'s original term.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We could not replicate their near perfect accuracy for zero attractors. This could be because we trained and tested our DNNs on much larger corpora with less filtered dependency sequences. 4 / LILT VOLUME 15, ISSUE 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "However, by limiting the vocabulary of our LM to the 100 most frequent words we have, in effect, introduced an element of supervision in learning. This feature of the model forces it to attend to POS sequences and abstract away from lexical semantic properties (as well as real world knowledge) that would be introduced with a larger vocabulary.5 We are grateful to the administrators of the WaCKy corpora (http://wacky.sslmit.unibo.it) for giving us access to to this corpus .6 We are grateful to Tal Linzen for sharing the code that Linzen et al. used in their work. We note that our experiments were performed from scratch: both the data and our code are new. The code for our models and our data sets are available at https://github.com/GU-CLASP/ DNNSyntax.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "/ LILT VOLUME 15, ISSUE 2 2 0 1 7", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also experimented with a dropout rate of 0.1 with our LM, but the results were significantly worse than those that we achieved with 0.5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "/ LILT VOLUME 15, ISSUE 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "/ LILT VOLUME 15, ISSUE 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The research reported here was supported by a grant from the Swedish Research Council, which funds the Centre for Linguistic Theory and Studies in Probability in the Department of Philosophy, Linguistics, and Theory of Science at the University of Gothenburg.We are grateful to an anonymous reviewer for helpful comments on a previous draft of this paper. We thank Tal Linzen for valuable discussion of some of the ideas presented in this paper. Earlier versions of the paper were presented to the Chalmers Machine Learning Seminar in April 2017 and the colloquium of the Cambridge Linguistics Society in May 2017. We thank the audiences of these venues for helpful comments and suggestions. We bear sole responsibility for any remaining errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "The WaCky Wide Web: A collection of very large linguistically processed web-crawled corpora", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Bernardini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ferraresi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Zanchetta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Language Resources and Evaluation", |
|
"volume": "43", |
|
"issue": "", |
|
"pages": "209--226", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Baroni, M., S. Bernardini, A. Ferraresi, and E. Zanchetta (2009). The WaCky Wide Web: A collection of very large linguistically processed web-crawled corpora. Lan- guage Resources and Evaluation 43, 209-226.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Syntactic processing is distributed across the language system", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Blank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Balewski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Mahowald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Fedorenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "NeuroImage", |
|
"volume": "127", |
|
"issue": "", |
|
"pages": "307--323", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Blank, I., Z. Balewski, K. Mahowald, and E. Fedorenko (2016). Syntactic processing is distributed across the language system. NeuroImage 127, 307-323.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Learning phrase representations using RNN encoder-decoder for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Van Merri\u00ebnboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00c7", |
|
"middle": [], |
|
"last": "G\u00fcl\u00e7ehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Bougares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cho, K., B. van Merri\u00ebnboer, \u00c7 . G\u00fcl\u00e7ehre, D. Bahdanau, F. Bougares, H. Schwenk, and Y. Bengio (2014, October). Learning phrase representations using RNN encoder-decoder for statistical machine translation. In Proceedings of the 2014", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1724--1734", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Conference on Empirical Methods in Natural Language Processing (EMNLP), Doha, Qatar, pp. 1724-1734. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Learning to transduce with unbounded memory", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Hermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Suleyman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 28th International Conference on Neural Information Processing Systems, NIPS'15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1828--1836", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grefenstette, E., K. M. Hermann, M. Suleyman, and P. Blunsom (2015). Learning to transduce with unbounded memory. In Proceedings of the 28th International Conference on Neural Information Processing Systems, NIPS'15, Cambridge, MA, USA, pp. 1828-1836. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Exploring the limits of language modeling", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "J\u00f3zefowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00f3zefowicz, R., O. Vinyals, M. Schuster, N. Shazeer, and Y. Wu (2016). Exploring the limits of language modeling. CoRR abs/1602.02410.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Neural machine translation in linear time", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Neural machine translation in linear time.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kingma, D. P. and J. Ba (2014). Adam: A method for stochastic optimization. CoRR abs/1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Grammaticality, acceptability, and probability: A probabilistic view of linguistic knowledge", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Lau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Lappin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Cognitive Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lau, J. H., A. Clark, and S. Lappin (2016). Grammaticality, acceptability, and proba- bility: A probabilistic view of linguistic knowledge. Cognitive Science, 1-40.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Assessing the ability of LSTMs to learn syntax-sensitive dependencies", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Linzen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Dupoux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Golberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association of Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "521--535", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linzen, T., E. Dupoux, and Y. Golberg (2016). Assessing the ability of LSTMs to learn syntax-sensitive dependencies. Transactions of the Association of Computational Linguistics 4, 521-535.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Maltparser: A language-independent system for data-driven dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Nilsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Chanev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Eryigit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Marinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Marsi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Natural Language Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "95--135", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nivre, J., J. Hall, J. Nilsson, A. Chanev, G. Eryigit, S. K\u00fcbler, S. Marinov, and E. Marsi (2007). Maltparser: A language-independent system for data-driven dependency parsing. Natural Language Engineering, 95-135.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Improvements in part-of-speech tagging with an application to German", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proceedings of the ACL SIGDAT-Workshop. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Schmid, H. (1995). Improvements in part-of-speech tagging with an application to German. In Proceedings of the ACL SIGDAT-Workshop. Association for Computa- tional Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "FIGURE 1: Our CNN Architecture" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "shows that while predicting verb number with our language model For CNN architectureFIGURE 2: Comparing effect of vocabulary size across architectures." |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Comparing embedding dimensions FIGURE 3: Results of testing the effect of various hyper-parameters. The reference (solid blue line) is an LSTM architecture, vocabulary size 10000, training set 90%, single layer, 150 memory units, no drop out." |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Results for a configuration with best parameters values: LSTM RNN with 2 layers of 1350 units, dropout rate 0.1, vocabulary size 100k, training on 90%, and lexical embedding dimension size 150 Comparing LSTM trained language model (with voc. size 100 and 1000 units) for the two methods of predicting verb number. The solid blue line represents our (supervised) benchmark LSTM RNN. Number of test examples per number of attractors" |
|
} |
|
} |
|
} |
|
} |