|
{ |
|
"paper_id": "E17-1027", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:52:02.891654Z" |
|
}, |
|
"title": "A Systematic Study of Neural Discourse Models for Implicit Discourse Relation", |
|
"authors": [ |
|
{ |
|
"first": "Attapol", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Rutherford", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Vera", |
|
"middle": [], |
|
"last": "Demberg", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Saarland University", |
|
"location": { |
|
"settlement": "Saarbr\u00fccken", |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Brandeis University Waltham", |
|
"location": { |
|
"region": "MA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Inferring implicit discourse relations in natural language text is the most difficult subtask in discourse parsing. Many neural network models have been proposed to tackle this problem. However, the comparison for this task is not unified, so we could hardly draw clear conclusions about the effectiveness of various architectures. Here, we propose neural network models that are based on feedforward and long-short term memory architecture and systematically study the effects of varying structures. To our surprise, the best-configured feedforward architecture outperforms LSTM-based model in most cases despite thorough tuning. Further, we compare our best feedforward system with competitive convolutional and recurrent networks and find that feedforward can actually be more effective. For the first time for this task, we compile and publish outputs from previous neural and nonneural systems to establish the standard for further comparison.", |
|
"pdf_parse": { |
|
"paper_id": "E17-1027", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Inferring implicit discourse relations in natural language text is the most difficult subtask in discourse parsing. Many neural network models have been proposed to tackle this problem. However, the comparison for this task is not unified, so we could hardly draw clear conclusions about the effectiveness of various architectures. Here, we propose neural network models that are based on feedforward and long-short term memory architecture and systematically study the effects of varying structures. To our surprise, the best-configured feedforward architecture outperforms LSTM-based model in most cases despite thorough tuning. Further, we compare our best feedforward system with competitive convolutional and recurrent networks and find that feedforward can actually be more effective. For the first time for this task, we compile and publish outputs from previous neural and nonneural systems to establish the standard for further comparison.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The discourse structure of a natural language text has been analyzed and conceptualized under various frameworks (Mann and Thompson, 1988; Lascarides and Asher, 2007; Prasad et al., 2008) . The Penn Discourse TreeBank (PDTB) and the Chinese Discourse Treebank (CDTB), currently the largest corpora annotated with discourse structures in English and Chinese respectively, view the discourse structure of a text as a set of discourse relations (Prasad et al., 2008; Zhou and Xue, 2012) . Each discourse relation (e.g. causal or temporal) is grounded by a discourse connective (e.g. because or meanwhile) taking two text segments as argu-ments (Prasad et al., 2008) . Implicit discourse relations are those where discourse connectives are omitted from the text and yet the discourse relations still hold.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 138, |
|
"text": "(Mann and Thompson, 1988;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 166, |
|
"text": "Lascarides and Asher, 2007;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 167, |
|
"end": 187, |
|
"text": "Prasad et al., 2008)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 442, |
|
"end": 463, |
|
"text": "(Prasad et al., 2008;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 464, |
|
"end": 483, |
|
"text": "Zhou and Xue, 2012)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 641, |
|
"end": 662, |
|
"text": "(Prasad et al., 2008)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "While classifying explicit discourse relations is relatively easy, as the discourse connective itself provides a strong cue for the discourse relation (Pitler et al., 2008) , the classification of implicit discourse relations has proved to be notoriously hard and remained one of the last missing pieces in an end-to-end discourse parser . In the absence of explicit discourse connectives, implicit discourse relations have to be inferred from their two arguments. Previous approaches on inferring implicit discourse relations have typically relied on features extracted from their two arguments. These features include the Cartesian products of the word tokens in the two arguments as well as features manually crafted from various lexicons such as verb classes and sentiment lexicons (Pitler et al., 2009; Rutherford and Xue, 2014) . These lexicons are used mainly to offset the data sparsity problem created by pairs of word tokens used directly as features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 172, |
|
"text": "(Pitler et al., 2008)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 786, |
|
"end": 807, |
|
"text": "(Pitler et al., 2009;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 808, |
|
"end": 833, |
|
"text": "Rutherford and Xue, 2014)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Neural network models are an attractive alternative for this task, but it is not clear how well they will fare with a small dataset, typically found in discourse annotation projects. Many neural approaches have been proposed. However, we lack a unified standard comparison to really learn whether we make any progress at all because not all past studies agree on the same experimental settings such as label sets to use. Previous work used four binary classification (Pitler et al., 2008; Rutherford and Xue, 2014) , 4-way coarse sense classification , and intermediate sense classification (Lin et al., 2009) . CoNLL Shared Task introduces a unified scheme for evaluation along with a new unseen test set in English in 2015 and in Chinese in 2016 (Xue et al., 2016) . We want to corrobo-rate this new evaluation scheme by running more benchmark results and providing the output under this evaluation scheme. We systematically compare the relative advantages of different neural architectures and publish the outputs from the systems for the research community to conduct further analysis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 467, |
|
"end": 488, |
|
"text": "(Pitler et al., 2008;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 489, |
|
"end": 514, |
|
"text": "Rutherford and Xue, 2014)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 591, |
|
"end": 609, |
|
"text": "(Lin et al., 2009)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 748, |
|
"end": 766, |
|
"text": "(Xue et al., 2016)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we explore multiple neural architectures in an attempt to find the best distributed representation and neural network architecture suitable for this task in both English and Chinese. We do this by probing the different points on the spectrum of structurality from structureless bag-of-words models to sequential and tree-structured models. We use feedforward, sequential long short-term memory (LSTM), and tree-structured LSTM models to represent these three points on the spectrum. To the best of our knowledge, there is no prior study that investigates the contribution of the different architectures in neural discourse analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our main contributions and findings from this work can be summarized as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We establish that the simplest feedforward discourse model outperforms systems with surface features and perform comparably with or even outperforms recurrent and convolutional architectures. This holds across different label sets in English and in Chinese.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We investigate the contribution of the linguistic structures in neural discourse modeling and found that high-dimensional word vectors trained on a large corpus can compensate for the lack of structures in the model, given the small amount of annotated data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We collect and publish the system outputs from many neural architectures on the standard experimental settings for the community to conduct more error analysis. These are made available on the author's website.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Following previous work, we assume that the two arguments of an implicit discourse relation are given so that we can focus on predicting the senses of the implicit discourse relations. The input to our model is a pair of text segments called Arg1 and Arg2, and the label is one of the senses defined in the Penn Discourse Treebank as in the example below: Input: Arg1 Senator Pete Domenici calls this effort \"the first gift of democracy\" Arg2 The Poles might do better to view it as a Trojan Horse. Output: Sense Comparison.Contrast In all architectures, each word in the argument is represented as a k-dimensional word vector trained on an unannotated data set. We use various model architectures to transform the semantics represented by the word vectors into distributed continuous-valued features. In the rest of the section, we explain the details of the neural network architectures that we design for the implicit discourse relations classification task. The models are summarized schematically in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1005, |
|
"end": 1013, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Architectures", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "This model does not model the structure or word order of a sentence. The features are simply obtained through element-wise pooling functions. Pooling is one of the key techniques in neural network modeling of computer vision (Krizhevsky et al., 2012; LeCun et al., 2010) . Max pooling is known to be very effective in vision, but it is unclear what pooling function works well when it comes to pooling word vectors. Summation pooling and mean pooling have been claimed to perform well at composing meaning of a short phrase from individual word vectors (Le and Mikolov, 2014; Blacoe and Lapata, 2012; Mikolov et al., 2013b; Braud and Denis, 2015) . The Arg1 vector a 1 and Arg2 vector a 2 are computed by applying element-wise pooling function f on all of the N 1 word vectors in Arg1 w 1 1:N 1 and all of the N 2 word vectors in Arg2 w 2 1:N 2 respectively:", |
|
"cite_spans": [ |
|
{ |
|
"start": 225, |
|
"end": 250, |
|
"text": "(Krizhevsky et al., 2012;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 251, |
|
"end": 270, |
|
"text": "LeCun et al., 2010)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 561, |
|
"end": 575, |
|
"text": "Mikolov, 2014;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 576, |
|
"end": 600, |
|
"text": "Blacoe and Lapata, 2012;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 601, |
|
"end": 623, |
|
"text": "Mikolov et al., 2013b;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 624, |
|
"end": 646, |
|
"text": "Braud and Denis, 2015)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bag-of-words Feedforward Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "a 1 i = f (w 1 1:N 1 ,i ) a 2 i = f (w 2 1:N 2 ,i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bag-of-words Feedforward Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We consider three different pooling functions namely max, summation, and mean pooling functions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bag-of-words Feedforward Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "f max (w 1:N , i) = N max j=1 w j,i f sum (w 1:N , i) = N j=1 w j,i f mean (w 1:N , i) = N j=1 w j,i /N", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bag-of-words Feedforward Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Inter-argument interaction is modeled directly by the hidden layers that take argument vectors as features. Discourse relations cannot be determined based on the two arguments individually. Instead, the sense of the relation can only be determined when the arguments in a discourse relation are analyzed jointly. The first hidden layer h 1 is the non-linear transformation of the weighted linear combination of the argument vectors:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bag-of-words Feedforward Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "h 1 = tanh(W 1 \u2022 a 1 + W 2 \u2022 a 2 + b h 1 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bag-of-words Feedforward Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where W 1 and W 2 are d \u00d7 k weight matrices and b h 1 is a d-dimensional bias vector. Further hidden layers h t and the output layer o follow the standard feedforward neural network model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bag-of-words Feedforward Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "h t = tanh(W ht \u2022 h t\u22121 + b ht ) o = softmax(W o \u2022 h T + b o )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bag-of-words Feedforward Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where W ht is a d \u00d7 d weight matrix, b ht is a ddimensional bias vector, and T is the number of hidden layers in the network.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Bag-of-words Feedforward Model", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "A sequential Long Short-Term Memory Recurrent Neural Network (LSTM-RNN) models the semantics of a sequence of words through the use of hidden state vectors. Therefore, the word ordering does affect the resulting hidden state vectors, unlike the bag-of-word model. For each word vector at word position t, we compute the corresponding hidden state vector s t and the memory cell vector from the previous step, using standard formula for LSTM. The argument vectors are the results of applying a pooling function over the hidden state vectors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequential Long Short-Term Memory (LSTM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "a 1 i = f (s 1 1:N 1 ,i ) a 2 i = f (s 2 1:N 2 ,i )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequential Long Short-Term Memory (LSTM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In addition to the three pooling functions that we describe in the previous subsection, we also consider using only the last hidden state vector, which should theoretically be able to encode the semantics of the entire word sequence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequential Long Short-Term Memory (LSTM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "f last (s 1:N,i ) = s N,i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequential Long Short-Term Memory (LSTM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Inter-argument interaction and the output layer are modeled in the same fashion as the bag-of-words model once the argument vector is computed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sequential Long Short-Term Memory (LSTM)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The principle of compositionality leads us to believe that the semantics of the argument vector should be determined by the syntactic structures and the meanings of the constituents. For a fair comparison with the sequential model, we apply the same formulation of LSTM on the binarized constituent parse tree. The hidden state vector now corresponds to a constituent in the tree. These hidden state vectors are then used in the same fashion as the sequential LSTM. The mathematical formulation is the same as Tai et al. (2015) . This model is similar to the recursive neural networks proposed by Ji and Eisenstein (2015) . Our model differs from their model in several ways. We use the LSTM networks instead of the \"vanilla\" RNN formula and expect better results due to less complication with vanishing and exploding gradients during training. Furthermore, our purpose is to compare the influence of the model structures. Therefore, we must use LSTM cells in both sequential and tree LSTM models for a fair and meaningful comparison. The more indepth comparison of our work and recursive neural network model by Ji and Eisenstein (2015) is provided in the discussion section.", |
|
"cite_spans": [ |
|
{ |
|
"start": 510, |
|
"end": 527, |
|
"text": "Tai et al. (2015)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 597, |
|
"end": 621, |
|
"text": "Ji and Eisenstein (2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1113, |
|
"end": 1137, |
|
"text": "Ji and Eisenstein (2015)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tree LSTM", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The Penn Discourse Treebank (PDTB) We use the PDTB due to its theoretical simplicity in discourse analysis and its reasonably large size. The annotation is done as another layer on the Penn Treebank on Wall Street Journal sections. Each relation consists of two spans of text that are minimally required to infer the relation, and the sense is organized hierarchically. The classification problem can be formulated in various ways based on the hierarchy. Previous work in this task has been done over three schemes of evaluation: top-level 4-way classification (Pitler et al., 2009) , second-level 11-way classification (Lin et al., 2009; Ji and Eisenstein, 2015) , and modified second-level classification introduced in the CoNLL 2015 Shared Task . We focus on the second-level 11-way classification because the labels are fine-grained enough to be useful for downstream tasks and also because the strongest neural network systems are tuned to this formulation. If an instance is annotated with two labels (\u223c3% of the data), we only use the first label. Partial labels, which constitute \u223c2% of the data, are excluded. Table 3 shows the distribution of labels in the training set (sections 2-21), development set (section 22), and test set (section 23).", |
|
"cite_spans": [ |
|
{ |
|
"start": 561, |
|
"end": 582, |
|
"text": "(Pitler et al., 2009)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 620, |
|
"end": 638, |
|
"text": "(Lin et al., 2009;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 639, |
|
"end": 663, |
|
"text": "Ji and Eisenstein, 2015)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1119, |
|
"end": 1126, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Corpora and Implementation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Training Weight initialization is uniform random, following the formula recommended by Bengio (2012) . The cost function is the standard crossentropy loss function, as the hinge loss function (large-margin framework) yields consistently inferior results. We use Adagrad as the optimization algorithm of choice. The learning rates are tuned over a grid search. We monitor the accuracy on the development set to determine convergence and prevent overfitting. L2 regularization and/or dropout do not make a big impact on performance in our case, so we do not use them in the final re-sults. Implementation All of the models are implemented in Theano (Bergstra et al., 2010; Bastien et al., 2012 4 Experiment on the Second-level Sense in the PDTB", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 100, |
|
"text": "Bengio (2012)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 647, |
|
"end": 670, |
|
"text": "(Bergstra et al., 2010;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 671, |
|
"end": 691, |
|
"text": "Bastien et al., 2012", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpora and Implementation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We want to test the effectiveness of the interargument interaction and the three models described above on the fine-grained discourse relations in English. The data split and the label set are exactly the same as previous works that use this label set (Lin et al., 2009; Ji and Eisenstein, 2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 252, |
|
"end": 270, |
|
"text": "(Lin et al., 2009;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 295, |
|
"text": "Ji and Eisenstein, 2015)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpora and Implementation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Preprocessing All tokenization is taken from the gold standard tokenization in the PTB (Marcus et al., 1993) . We use the Berkeley parser to parse all of the data (Petrov et al., 2006) . We test the effects of word vector sizes. 50-dimensional and 100dimensional word vectors are trained on the training sections of WSJ data, which is the same text as the PDTB annotation. Although this seems like too little data, 50-dimensional WSJ-trained word vectors have previously been shown to be the most effective in this task (Ji and Eisenstein, 2015) . Additionally, we also test the off-the-shelf word vectors trained on billions of tokens from Google News data freely available with the word2vec tool. All word vectors are trained on the Skipgram architecture (Mikolov et al., 2013b; Mikolov et al., 2013a) . Other models such as GloVe and continuous bag-of-words seem to yield broadly similar results (Pennington et al., 2014) . We keep the word vectors fixed, instead of fine-tuning during training.", |
|
"cite_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 108, |
|
"text": "(Marcus et al., 1993)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 184, |
|
"text": "(Petrov et al., 2006)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 520, |
|
"end": 545, |
|
"text": "(Ji and Eisenstein, 2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 757, |
|
"end": 780, |
|
"text": "(Mikolov et al., 2013b;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 781, |
|
"end": 803, |
|
"text": "Mikolov et al., 2013a)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 899, |
|
"end": 924, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Corpora and Implementation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The feedforward model performs best overall among all of the neural architectures we explore (Table 2) . It outperforms the recursive neural network with bilinear output layer introduced by Ji and Eisenstein (2015) (p < 0.05; bootstrap test) and performs comparably with the surface feature baseline (Lin et al., 2009) , which uses var- Lin et al., (2009) 40.20 Figure 3 : Inter-argument interaction can be modeled effectively with hidden layers. The results are shown for the feedforward models with summation pooling, but this effect can be observed robustly in all architectures we consider.", |
|
"cite_spans": [ |
|
{ |
|
"start": 300, |
|
"end": 318, |
|
"text": "(Lin et al., 2009)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 355, |
|
"text": "Lin et al., (2009)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 102, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 362, |
|
"end": 370, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "pus (Figure 4 ). Moving from 50 units to 100 units trained on the same dataset, we do not observe much of a difference in performance in both architectures, but the sequential LSTM model beats the feedforward model in both settings (Table 3) . This suggests that only 50 dimensions are needed for the WSJ corpus. However, the trend reverses when we move to 300-dimensional word vectors trained on a much larger corpus. These results suggest an interaction between the lexical information encoded by word vectors and the structural information encoded by the model itself. Hidden layers, especially the first one, make a substantial impact on performance. This effect is observed across all architectures (Figure 3) . Strikingly, the improvement can be as high as 8% absolute when used with the feedforward model with small word vectors. We tried up to four hidden layers and found that the additional hidden layers yield diminishing-if not negative-returns. These effects are not an artifact of the training process as we have tuned the models quite extensively, although it might be the case that we do not have sufficient data to fit those extra parameters. Summation pooling is effective for both feedforward and LSTM models (Figure 2) . The word vectors we use have been claimed to have some additive properties (Mikolov et al., 2013b) , so summation pooling in this experiment supports this claim. Max pooling is only effective for LSTM, probably because the values in the word vector encode the abstract features of each word relative to each other. It can be trivially shown that if all of the vectors are multiplied by -1, then the results from max pooling will be totally different, but the word similarities remain the same. The memory cells and the state vectors in the LSTM models transform the original word vectors to work well the max pooling operation, but the feedforward net cannot transform the word vectors to work well with max pooling as it is not allowed to change the word vectors themselves.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1316, |
|
"end": 1339, |
|
"text": "(Mikolov et al., 2013b)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 13, |
|
"text": "(Figure 4", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 241, |
|
"text": "(Table 3)", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 704, |
|
"end": 714, |
|
"text": "(Figure 3)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1228, |
|
"end": 1238, |
|
"text": "(Figure 2)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Summing up vectors indeed works better than recurrent models. We provide further evidence for this claim in Section 5. Sequential and tree LSTM models might work better if we are given larger amount of data. We observe that LSTM models outperform the feedforward model when word vectors are smaller, so it is unlikely that we train the LSTMs incorrectly. It is more likely that we do not have enough annotated data to train a more powerful model such as LSTM. In previous work, LSTMs are applied to tasks with a lot of labeled data compared to mere 12,930 instances that we have (Vinyals et al., 2015; Chiu and Nichols, 2015; \u0130rsoy and Cardie, 2014) . Another explanation comes from the fact that the contextual information encoded in the word vectors can compen-sate for the lack of structure in the model in this task. Word vectors are already trained to encode the words in their linguistic context especially information from word order. Our discussion would not be complete without explaining our results in relation to the recursive neural network model proposed by Ji and Eisenstein (2015) . Why do sequential LSTM models outperform recursive neural networks or tree LSTM models? Although this first comes as a surprise to us, the results are consistent with recent works that use sequential LSTM to encode syntactic information. For example, Vinyals et al. (2015) use sequential LSTM to encode the features for syntactic parse output. Tree LSTM seems to show improvement when there is a need to model longdistance dependency in the data (Tai et al., 2015; Li et al., 2015) . Furthermore, the benefits of tree LSTM are not readily apparent for a model that discards the syntactic categories in the intermediate nodes and makes no distinction between heads and their dependents, which are at the core of syntactic representations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 579, |
|
"end": 601, |
|
"text": "(Vinyals et al., 2015;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 602, |
|
"end": 625, |
|
"text": "Chiu and Nichols, 2015;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 626, |
|
"end": 649, |
|
"text": "\u0130rsoy and Cardie, 2014)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1072, |
|
"end": 1096, |
|
"text": "Ji and Eisenstein (2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1350, |
|
"end": 1371, |
|
"text": "Vinyals et al. (2015)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 1545, |
|
"end": 1563, |
|
"text": "(Tai et al., 2015;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 1564, |
|
"end": 1580, |
|
"text": "Li et al., 2015)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Why does the feedforward model outperform the LSTM models?", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Another point of contrast between our work and Ji and Eisenstein's (2015) is the modeling choice for inter-argument interaction. Our experimental results show that the hidden layers are an important contributor to the performance for all of our models. We choose linear inter-argument interaction instead of bilinear interaction, and this decision gives us at least two advantages. Linear interaction allows us to stack up hidden layers without the exponential growth in the number of parameters. Secondly, using linear interaction allows us to use high dimensional word vectors, which we found to be another important component for the performance. The recursive model by Ji and Eisenstein (2015) is limited to 50 units due to the bilinear layer. Our choice of linear interargument interaction and high-dimensional word vectors turns out to be crucial to building a competitive neural network model for classifying implicit discourse relations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Why does the feedforward model outperform the LSTM models?", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We want to provide further evidence that feedforward models perform well without surface features or without sophisticated recurrent or convolutional structures across different label sets and languages as well. Toward that goal, we evaluate our models on non-explicit discourse relation data used in English and Chinese CoNLL 2016 Shared Task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extending the results across neural architectures, label sets, and languages", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We follow the experimental setting used in CoNLL 2015-2016 Shared Task. To compare our results against previous systems, we compile all of the official system outputs, and make them publicly available. The label set is modified by the shared task organizers into 15 different senses including EntRel as another sense Xue et al., 2016) . We use the 300-dimensional word vector used in the previous experiment and tune the number of hidden layers and hidden units on the development set. We consider the following models: Bidirectional-LSTM (Akanksha and Eisenstein, 2016) , two flavors of convolutional networks (Qin et al., 2016; Wang and Lan, 2016) , two variations of simple argument pooling (Mihaylov and Frank, 2016; Schenk et al., 2016) , and the best system using surface features alone (Wang and Lan, 2015) . The comparison results and brief system descriptions are shown in Table 4 . Our model presents the state-of-the-art system on the blind test set in English. We once again confirm that manual features are not necessary for this task and that our feedforward network outperforms the best available LSTM and convolutional networks in many settings despite its simplicity. While performing well in-domain, convolutional networks degrade sharply when tested on the blind slightly out-of-domain dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 317, |
|
"end": 334, |
|
"text": "Xue et al., 2016)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 539, |
|
"end": 570, |
|
"text": "(Akanksha and Eisenstein, 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 611, |
|
"end": 629, |
|
"text": "(Qin et al., 2016;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 630, |
|
"end": 649, |
|
"text": "Wang and Lan, 2016)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 694, |
|
"end": 720, |
|
"text": "(Mihaylov and Frank, 2016;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 721, |
|
"end": 741, |
|
"text": "Schenk et al., 2016)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 793, |
|
"end": 813, |
|
"text": "(Wang and Lan, 2015)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 882, |
|
"end": 889, |
|
"text": "Table 4", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "English discourse relations", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We evaluate our model on the Chinese Discourse Treebank (CDTB) because its annotation is the most comparable to the PDTB (Zhou and Xue, 2015) . The sense set consists of 10 different senses, which are not organized in a hierarchy, unlike the PDTB. We use the version of the data provided to the CoNLL 2016 Shared Task participants. This version has 16,946 instances of discourse relations total in the combined training and development sets. The test set is not yet available at the time of submission, so the system is evaluated based on the average accuracy over 7-fold cross-validation on the combined set of training and development sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 141, |
|
"text": "(Zhou and Xue, 2015)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Chinese discourse relations", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "To establish baseline comparison, we use Max-Ent models loaded with the feature sets previously shown to be effective for English, namely dependency rule pairs, production rule pairs (Lin et al., 2009) , Brown cluster pairs (Rutherford and Xue, 2014) , and word pairs (Marcu and Echihabi, 2002) . We use information gain criteria to select the best subset of each feature set, which is crucial in feature-based discourse parsing. Chinese word vectors are induced through CBOW and Skipgram architecture in word2vec (Mikolov et al., 2013a) on Chinese Gigaword corpus (Graff and Chen, 2005) using default settings. The number of dimensions that we try are 50, 100, 150, 200, 250, and 300. We induce 1,000 and 3,000 Brown clusters on the Gigaword corpus. Table 5 shows the results for the models which are best tuned on the number of hidden units, hidden layers, and the types of word vectors. The feedforward variant of our model significantly outperforms the strong baselines in both English and Chinese (p < 0.05 bootstrap test). This suggests that our approach is robust against different label sets, and our findings are valid across languages. Our Chinese model outperforms all of the feature sets known to work well in English despite using only word vectors. The choice of neural architecture used for inducing Chinese word vectors turns out to be crucial. Chinese word vectors from Skipgram model perform consistently better than the ones from CBOW model ( Figure 5 ). These two types of word vectors do not show much difference in the English tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 201, |
|
"text": "(Lin et al., 2009)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 250, |
|
"text": "(Rutherford and Xue, 2014)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 294, |
|
"text": "(Marcu and Echihabi, 2002)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 514, |
|
"end": 537, |
|
"text": "(Mikolov et al., 2013a)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 565, |
|
"end": 587, |
|
"text": "(Graff and Chen, 2005)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 751, |
|
"end": 758, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 1462, |
|
"end": 1470, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Chinese discourse relations", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The prevailing approach for this task is to use surface features derived from various semantic lexicons (Pitler et al., 2009) , reducing the number of parameters by mapping raw word tokens in the arguments of discourse relations to a limited number of entries in a semantic lexicon such as polarity and verb classes. Along the same vein, Brown cluster assignments have also been used as a general purpose lexicon that requires no human manual annotation (Rutherford and Xue, 2014) . However, these solutions still suffer from the data sparsity problem and almost always require extensive feature selection to work well (Park and Cardie, 2012; Lin et al., 2009; Ji and Eisenstein, 2015) . The work we report here explores the use of the expressive power of distributed representations to overcome the data sparsity problem found in the traditional feature engineering paradigm. Neural network modeling has been explored to some extent in the context of this task. Recently, Braud and Denis (2015) tested various word vectors as features for implicit discourse relation classification and show that distributed features achieve the same level of accuracy as onehot representations in some experimental settings. Ji et al. (2015; advance the state of the art for this task by using recursive and recurrent neural networks. In the work we report here, we systematically explore the use of different neural network architectures and show that when highdimensional word vectors are used as input, a simple feed-forward architecture can outperform more sophisticated architectures such as sequential and tree-based LSTM networks, given the small amount of data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 125, |
|
"text": "(Pitler et al., 2009)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 454, |
|
"end": 480, |
|
"text": "(Rutherford and Xue, 2014)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 619, |
|
"end": 642, |
|
"text": "(Park and Cardie, 2012;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 643, |
|
"end": 660, |
|
"text": "Lin et al., 2009;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 661, |
|
"end": 685, |
|
"text": "Ji and Eisenstein, 2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1210, |
|
"end": 1226, |
|
"text": "Ji et al. (2015;", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Recurrent neural networks, especially LSTM networks, have changed the paradigm of deriving distributed features from a sentence (Hochreiter and Schmidhuber, 1997) , but they have not been much explored in the realm of discourse parsing. LSTM models have been notably used to encode the meaning of source language sentence in neural machine translation (Cho et al., 2014; Devlin et al., 2014) and recently used to encode the meaning of an entire sentence to be used as features (Kiros et al., 2015) . Many neural architectures have been explored and evaluated, but there is no single technique that is decidedly better across all tasks. The LSTM-based models such as Kiros et al. (2015) perform well across tasks but do not outperform some other strong neural baselines. Ji et al. (2016) uses a joint discourse language model to improve the performance on the coarse-grained label in the PDTB, but in our case, we would like to deduce how well LSTM fares in fine-grained implicit discourse relation classification, which is more practical for application.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 162, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 352, |
|
"end": 370, |
|
"text": "(Cho et al., 2014;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 371, |
|
"end": 391, |
|
"text": "Devlin et al., 2014)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 497, |
|
"text": "(Kiros et al., 2015)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 666, |
|
"end": 685, |
|
"text": "Kiros et al. (2015)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 770, |
|
"end": 786, |
|
"text": "Ji et al. (2016)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We report a series of experiments that systematically probe the effectiveness of various neural network architectures for the task of implicit discourse relation classification. We found that a feedforward variant of our model combined with hidden layers and high dimensional word vectors outperforms more complicated LSTM and convolutional models. We also establish that manually crafted surface features are not necessary for this task. These results hold for different settings and different languages. In addition, we collect and compile the system outputs from all competitive systems and make it available for the research community to conduct further analysis. We encourage that researchers who work on this task to evaluate their systems under the CoNLL Shared Task 2015-2016 scheme to allow for easy comparison and progress tracking.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and future work", |
|
"sec_num": "7" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The first author was funded by the German Research Foundation (DFG) as part of SFB 1102: Information Density and Linguistic Encoding", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Shallow discourse parsing using distributed argument representations and bayesian optimization", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Akanksha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Akanksha and Jacob Eisenstein. 2016. Shallow discourse parsing using distributed argument rep- resentations and bayesian optimization. CoRR, abs/1606.04503.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Arnaud Bergeron, Nicolas Bouchard, and Yoshua Bengio. 2012. Theano: new features and speed improvements. Deep Learning and Unsupervised Feature Learning NIPS 2012 Workshop", |
|
"authors": [ |
|
{ |
|
"first": "Fr\u00e9d\u00e9ric", |
|
"middle": [], |
|
"last": "Bastien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Lamblin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Razvan", |
|
"middle": [], |
|
"last": "Pascanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bergstra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Goodfellow", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fr\u00e9d\u00e9ric Bastien, Pascal Lamblin, Razvan Pascanu, James Bergstra, Ian J. Goodfellow, Arnaud Berg- eron, Nicolas Bouchard, and Yoshua Bengio. 2012. Theano: new features and speed improvements. Deep Learning and Unsupervised Feature Learning NIPS 2012 Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Practical recommendations for gradient-based training of deep architectures", |
|
"authors": [ |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Neural Networks: Tricks of the Trade", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "437--478", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoshua Bengio. 2012. Practical recommendations for gradient-based training of deep architectures. In Neural Networks: Tricks of the Trade, pages 437- 478. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Theano: a CPU and GPU math expression compiler", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bergstra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Breuleux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fr\u00e9d\u00e9ric", |
|
"middle": [], |
|
"last": "Bastien", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Lamblin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Razvan", |
|
"middle": [], |
|
"last": "Pascanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Desjardins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Turian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Warde-Farley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the Python for Scientific Computing Conference (SciPy)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Bergstra, Olivier Breuleux, Fr\u00e9d\u00e9ric Bastien, Pascal Lamblin, Razvan Pascanu, Guillaume Des- jardins, Joseph Turian, David Warde-Farley, and Yoshua Bengio. 2010. Theano: a CPU and GPU math expression compiler. In Proceedings of the Python for Scientific Computing Conference (SciPy), June. Oral Presentation.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A comparison of vector-based representations for semantic composition", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Blacoe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "546--556", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Blacoe and Mirella Lapata. 2012. A com- parison of vector-based representations for semantic composition. In Proceedings of the 2012 Joint Con- ference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 546-556. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Comparing word representations for implicit discourse relation classification", |
|
"authors": [ |
|
{ |
|
"first": "Chlo\u00e9", |
|
"middle": [], |
|
"last": "Braud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Denis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chlo\u00e9 Braud and Pascal Denis. 2015. Comparing word representations for implicit discourse relation classi- fication. In Empirical Methods in Natural Language Processing (EMNLP 2015).", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Named entity recognition with bidirectional lstm-cnns", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Jason", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nichols", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1511.08308" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason P.C. Chiu and Eric Nichols. 2015. Named en- tity recognition with bidirectional lstm-cnns. arXiv preprint arXiv:1511.08308.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Learning phrase representations using rnn encoder-decoder for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Van Merrienboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caglar", |
|
"middle": [], |
|
"last": "Gulcehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fethi", |
|
"middle": [], |
|
"last": "Bougares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1724--1734", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho, Bart van Merrienboer, Caglar Gul- cehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using rnn encoder-decoder for statistical machine translation. In Proceedings of the 2014 Conference on Empirical Methods in Nat- ural Language Processing (EMNLP), pages 1724- 1734, Doha, Qatar, October. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Fast and robust neural network joint models for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rabih", |
|
"middle": [], |
|
"last": "Zbib", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhongqiang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Lamar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Makhoul", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1370--1380", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Rabih Zbib, Zhongqiang Huang, Thomas Lamar, Richard Schwartz, and John Makhoul. 2014. Fast and robust neural network joint models for sta- tistical machine translation. In Proceedings of the 52nd Annual Meeting of the Association for Compu- tational Linguistics, volume 1, pages 1370-1380.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Chinese gigaword. LDC Catalog No.: LDC2003T09, ISBN", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Graff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "58563--58230", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Graff and Ke Chen. 2005. Chinese gigaword. LDC Catalog No.: LDC2003T09, ISBN, 1:58563- 58230.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Opinion mining with deep recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Ozanirsoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "720--728", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ozan\u0130rsoy and Claire Cardie. 2014. Opinion mining with deep recurrent neural networks. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, pages 720-728.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "One vector is not enough: Entity-augmented distributed semantics for discourse relations", |
|
"authors": [ |
|
{ |
|
"first": "Yangfeng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "329--344", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yangfeng Ji and Jacob Eisenstein. 2015. One vector is not enough: Entity-augmented distributed semantics for discourse relations. Transactions of the Associa- tion for Computational Linguistics, 3:329-344.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A latent variable recurrent neural network for discourse relation language models", |
|
"authors": [ |
|
{ |
|
"first": "Yangfeng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yangfeng Ji, Gholamreza Haffari, and Jacob Eisen- stein. 2016. A latent variable recurrent neural net- work for discourse relation language models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Skip-thought vectors", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raquel", |
|
"middle": [], |
|
"last": "Urtasun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanja", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3276--3284", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan Kiros, Yukun Zhu, Ruslan R. Salakhutdinov, Richard Zemel, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Skip-thought vectors. In Advances in Neural Information Processing Sys- tems, pages 3276-3284.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Imagenet classification with deep convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1097--1105", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E. Hin- ton. 2012. Imagenet classification with deep con- volutional neural networks. In Advances in neural information processing systems, pages 1097-1105.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Segmented discourse representation theory: Dynamic semantics with discourse structure", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Lascarides", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Asher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Computing meaning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "87--124", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Lascarides and Nicholas Asher. 2007. Seg- mented discourse representation theory: Dynamic semantics with discourse structure. In Computing meaning, pages 87-124. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Distributed representations of sentences and documents", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1405.4053" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Quoc V. Le and Tomas Mikolov. 2014. Distributed representations of sentences and documents. arXiv preprint arXiv:1405.4053.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Convolutional networks and applications in vision", |
|
"authors": [ |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koray", |
|
"middle": [], |
|
"last": "Kavukcuoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cl\u00e9ment", |
|
"middle": [], |
|
"last": "Farabet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Circuits and Systems (ISCAS), Proceedings of 2010 IEEE International Symposium on", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "253--256", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yann LeCun, Koray Kavukcuoglu, and Cl\u00e9ment Fara- bet. 2010. Convolutional networks and applications in vision. In Circuits and Systems (ISCAS), Pro- ceedings of 2010 IEEE International Symposium on, pages 253-256. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "When are tree structures necessary for deep learning of representations?", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2304--2314", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Thang Luong, Dan Jurafsky, and Eduard Hovy. 2015. When are tree structures necessary for deep learning of representations? In Proceed- ings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2304-2314, Lisbon, Portugal, September. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Recognizing implicit discourse relations in the penn discourse treebank", |
|
"authors": [ |
|
{ |
|
"first": "Ziheng", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "343--351", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ziheng Lin, Min-Yen Kan, and Hwee Tou Ng. 2009. Recognizing implicit discourse relations in the penn discourse treebank. In Proceedings of the 2009 Con- ference on Empirical Methods in Natural Language Processing: Volume 1-Volume 1, pages 343-351. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Rhetorical structure theory: Toward a functional theory of text organization", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "William", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Mann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Thompson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1988, |
|
"venue": "Text", |
|
"volume": "8", |
|
"issue": "3", |
|
"pages": "243--281", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William C. Mann and Sandra A. Thompson. 1988. Rhetorical structure theory: Toward a functional the- ory of text organization. Text, 8(3):243-281.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "An unsupervised approach to recognizing discourse relations", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdessamad", |
|
"middle": [], |
|
"last": "Echihabi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "368--375", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Marcu and Abdessamad Echihabi. 2002. An unsupervised approach to recognizing discourse re- lations. In Proceedings of the 40th Annual Meet- ing on Association for Computational Linguistics, pages 368-375. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Building a large annotated corpus of english: The penn treebank", |
|
"authors": [ |
|
{ |
|
"first": "Mitchell", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"Ann" |
|
], |
|
"last": "Marcinkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beatrice", |
|
"middle": [], |
|
"last": "Santorini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Computational linguistics", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "313--330", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mitchell P. Marcus, Mary Ann Marcinkiewicz, and Beatrice Santorini. 1993. Building a large anno- tated corpus of english: The penn treebank. Compu- tational linguistics, 19(2):313-330.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Discourse relation sense classification using cross-argument semantic similarity based on word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Todor", |
|
"middle": [], |
|
"last": "Mihaylov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anette", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Twentieth Conference on Computational Natural Language Learning -Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Todor Mihaylov and Anette Frank. 2016. Discourse relation sense classification using cross-argument semantic similarity based on word embeddings. In Proceedings of the Twentieth Conference on Compu- tational Natural Language Learning -Shared Task, page 100.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "CoRR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013a. Efficient estimation of word represen- tations in vector space. CoRR, abs/1301.3781.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S. Cor- rado, and Jeff Dean. 2013b. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in Neural Information Processing Systems, pages 3111-3119.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Improving implicit discourse relation recognition through feature set optimization", |
|
"authors": [ |
|
{ |
|
"first": "Joonsuk", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 13th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "108--112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joonsuk Park and Claire Cardie. 2012. Improving im- plicit discourse relation recognition through feature set optimization. In Proceedings of the 13th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 108-112. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Empirical Methods in Natural Language Processing", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christo- pher D. Manning. 2014. Glove: Global vectors for word representation. Proceedings of the Empirical Methods in Natural Language Processing (EMNLP 2014), 12:1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Learning accurate, compact, and interpretable tree annotation", |
|
"authors": [ |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Barrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Romain", |
|
"middle": [], |
|
"last": "Thibaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "433--440", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Slav Petrov, Leon Barrett, Romain Thibaux, and Dan Klein. 2006. Learning accurate, compact, and interpretable tree annotation. In Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the Asso- ciation for Computational Linguistics, pages 433- 440. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Easily identifiable discourse relations", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Pitler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mridhula", |
|
"middle": [], |
|
"last": "Raghupathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hena", |
|
"middle": [], |
|
"last": "Mehta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind K", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Pitler, Mridhula Raghupathy, Hena Mehta, Ani Nenkova, Alan Lee, and Aravind K Joshi. 2008. Easily identifiable discourse relations. Technical Reports (CIS), page 884.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Automatic sense prediction for implicit discourse relations in text", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Pitler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Annie", |
|
"middle": [], |
|
"last": "Louis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "683--691", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Pitler, Annie Louis, and Ani Nenkova. 2009. Automatic sense prediction for implicit discourse re- lations in text. In Proceedings of the Joint Confer- ence of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Lan- guage Processing of the AFNLP: Volume 2-Volume 2, pages 683-691. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "The penn discourse treebank 2.0", |
|
"authors": [ |
|
{ |
|
"first": "Rashmi", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Dinesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eleni", |
|
"middle": [], |
|
"last": "Miltsakaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Livio", |
|
"middle": [], |
|
"last": "Robaldo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Webber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rashmi Prasad, Nikhil Dinesh, Alan Lee, Eleni Milt- sakaki, Livio Robaldo, Aravind K. Joshi, and Bon- nie L. Webber. 2008. The penn discourse treebank 2.0. In LREC.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Shallow discourse parsing using convolutional neural network", |
|
"authors": [ |
|
{ |
|
"first": "Lianhui", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhisong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Twentieth Conference on Computational Natural Language Learning -Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lianhui Qin, Zhisong Zhang, and Hai Zhao. 2016. Shallow discourse parsing using convolutional neu- ral network. In Proceedings of the Twentieth Con- ference on Computational Natural Language Learn- ing -Shared Task, page 70.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Discovering implicit discourse relations through brown cluster pair representation and coreference patterns", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Attapol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Rutherford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics (EACL 2014)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Attapol T. Rutherford and Nianwen Xue. 2014. Dis- covering implicit discourse relations through brown cluster pair representation and coreference patterns. In Proceedings of the 14th Conference of the Eu- ropean Chapter of the Association for Computa- tional Linguistics (EACL 2014), Gothenburg, Swe- den, April.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Improving the inference of implicit discourse relations via classifying explicit discourse connectives", |
|
"authors": [ |
|
{ |
|
"first": "Attapol", |
|
"middle": [], |
|
"last": "Rutherford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "799--808", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Attapol Rutherford and Nianwen Xue. 2015. Improv- ing the inference of implicit discourse relations via classifying explicit discourse connectives. In Pro- ceedings of the 2015 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 799-808, Denver, Colorado, May-June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Do we really need all those rich linguistic features? a neural networkbased approach to implicit sense labeling", |
|
"authors": [ |
|
{ |
|
"first": "Niko", |
|
"middle": [], |
|
"last": "Schenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Chiarcos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathrin", |
|
"middle": [], |
|
"last": "Donandt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "R\u00f6nnqvist", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Evgeny", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giuseppe", |
|
"middle": [], |
|
"last": "Stepanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Riccardi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Twentieth Conference on Computational Natural Language Learning -Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Niko Schenk, Christian Chiarcos, Kathrin Donandt, Samuel R\u00f6nnqvist, Evgeny A. Stepanov, and Giuseppe Riccardi. 2016. Do we really need all those rich linguistic features? a neural network- based approach to implicit sense labeling. In Pro- ceedings of the Twentieth Conference on Computa- tional Natural Language Learning -Shared Task, page 41.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Improved semantic representations from tree-structured long short-term memory networks", |
|
"authors": [ |
|
{ |
|
"first": "Kai Sheng", |
|
"middle": [], |
|
"last": "Tai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1556--1566", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai Sheng Tai, Richard Socher, and Christopher D. Manning. 2015. Improved semantic representa- tions from tree-structured long short-term memory networks. In Proceedings of the 53rd Annual Meet- ing of the Association for Computational Linguistics and the 7th International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers), pages 1556-1566, Beijing, China, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Grammar as a foreign language", |
|
"authors": [ |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Terry", |
|
"middle": [], |
|
"last": "Koo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "2755--2763", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oriol Vinyals, \u0141ukasz Kaiser, Terry Koo, Slav Petrov, Ilya Sutskever, and Geoffrey Hinton. 2015. Gram- mar as a foreign language. In C. Cortes, N.D. Lawrence, D.D. Lee, M. Sugiyama, R. Garnett, and R. Garnett, editors, Advances in Neural Information Processing Systems 28, pages 2755-2763. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "A refined endto-end discourse parser", |
|
"authors": [ |
|
{ |
|
"first": "Jianxiang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Man", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Nineteenth Conference on Computational Natural Language Learning -Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "17--24", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianxiang Wang and Man Lan. 2015. A refined end- to-end discourse parser. In Proceedings of the Nine- teenth Conference on Computational Natural Lan- guage Learning -Shared Task, pages 17-24, Bei- jing, China, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Two end-toend shallow discourse parsers for english and chinese in conll-2016 shared task", |
|
"authors": [ |
|
{ |
|
"first": "Jianxiang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Man", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Twentieth Conference on Computational Natural Language Learning -Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianxiang Wang and Man Lan. 2016. Two end-to- end shallow discourse parsers for english and chi- nese in conll-2016 shared task. In Proceedings of the Twentieth Conference on Computational Natural Language Learning -Shared Task, page 33.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "The conll-2015 shared task on shallow discourse parsing", |
|
"authors": [ |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Hwee Tou Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rashmi", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Attapol", |
|
"middle": [], |
|
"last": "Bryant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rutherford", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Nineteenth Conference on Computational Natural Language Learning -Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--16", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nianwen Xue, Hwee Tou Ng, Sameer Pradhan, Rashmi Prasad, Christopher Bryant, and Attapol Ruther- ford. 2015. The conll-2015 shared task on shal- low discourse parsing. In Proceedings of the Nine- teenth Conference on Computational Natural Lan- guage Learning -Shared Task, pages 1-16, Beijing, China, July. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "The conll-2016 shared task on multilingual shallow discourse parsing", |
|
"authors": [ |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Hwee Tou Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Pradhan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Attapol", |
|
"middle": [], |
|
"last": "Webber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chuan", |
|
"middle": [], |
|
"last": "Rutherford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongmin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Twentieth Conference on Computational Natural Language Learning -Shared Task", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nianwen Xue, Hwee Tou Ng, Sameer Pradhan, Bon- nie Webber, Attapol Rutherford, Chuan Wang, and Hongmin Wang. 2016. The conll-2016 shared task on multilingual shallow discourse parsing. In Pro- ceedings of the Twentieth Conference on Computa- tional Natural Language Learning -Shared Task, Berlin, Germany, August. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Pdtb-style discourse annotation of chinese text", |
|
"authors": [ |
|
{ |
|
"first": "Yuping", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Long Papers", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "69--77", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuping Zhou and Nianwen Xue. 2012. Pdtb-style dis- course annotation of chinese text. In Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Long Papers-Volume 1, pages 69-77. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "The chinese discourse treebank: A chinese corpus annotated with discourse relations. Language Resources and Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Yuping", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "49", |
|
"issue": "", |
|
"pages": "397--431", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuping Zhou and Nianwen Xue. 2015. The chinese discourse treebank: A chinese corpus annotated with discourse relations. Language Resources and Eval- uation, 49(2):397-431.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Top) Feedforward architecture. (Bottom) Sequential Long Short-Term Memory architecture." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Comparison between feedforward and sequential LSTM when using summation pooling function." |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Comparing the accuracies across Chinese word vectors for feedforward model." |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"text": "The distribution of the level 2 sense labels in the Penn Discourse Treebank. The instances annotated with two labels are not double-counted, and partial labels are excluded.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"text": "). The gradient computation is done with symbolic differentiation, a functionality provided by Theano. Feedforward models and sequential LSTM models are trained on CPUs on Intel Xeon X5690 3.47GHz, using only a single core per model. A tree LSTM model is trained on a GPU on Intel Xeon CPU E5-2660. All models converge within hours.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"text": "32.11 34.46 31.85 34.07 33.15 36.16 34.34 36.16 35.11 37.2 35.24 Tree LSTM 50 28.59 28.32 30.93 28.72 29.89 30.15 32.5 31.59 32.11 31.2 32.5 29.63 Feedforward 100 33.29 32.77 28.72 -36.55 35.64 37.21 -36.55 36.29 37.47 -LSTM 100 30.54 33.81 35.9 33.02 36.81 34.98 37.33 35.11 37.46 36.68 37.2 35.77 Tree LSTM 100 29.76 28.72 31.72 31.98 31.33 26.89 33.02 33.68 32.63 31.07 32.24 33.02 Feedforward 300 32.51 34.46 35.12 -35.77 38.25 39.56 -35.25 38.51 39.03 -LSTM 300 28.72 34.59 35.24 34.64 38.25 36.42 37.07 35.5 38.38 37.72 37.2 36.29 Tree LSTM 300 28.45 31.59 32.76 26.76 33.81 32.89 33.94 32.63 32.11 32.76 34.07 32.50", |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td/><td/><td>No hidden layer</td><td/><td/><td>1 hidden layer</td><td/><td/><td colspan=\"2\">2 hidden layers</td></tr><tr><td colspan=\"2\">Architecture k</td><td>max</td><td>mean sum</td><td>last</td><td>max</td><td>mean sum</td><td>last</td><td>max</td><td colspan=\"2\">mean sum</td><td>last</td></tr><tr><td colspan=\"2\">Feedforward 50</td><td colspan=\"3\">31.85 31.98 29.24 -</td><td colspan=\"3\">33.28 34.98 37.85 -</td><td colspan=\"2\">34.85 35.5</td><td>38.51 -</td></tr><tr><td>LSTM</td><td>50</td><td>31.85</td><td/><td/><td/><td/><td/><td/><td/></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"text": "Compilation of all experimental configurations for 11-way classification on the PDTB test set. k is the word vector size. Bold-faced numbers indicate the best performance for each architecture, which is also shown inTable 2.", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Model</td><td>Accuracy</td></tr><tr><td>PDTB Second-level senses</td><td/></tr><tr><td>Most frequent tag baseline</td><td>25.71</td></tr><tr><td>Our best tree LSTM</td><td>34.07</td></tr><tr><td>Ji & Eisenstein, (2015)</td><td>36.98</td></tr><tr><td>Our best sequential LSTM variant</td><td>38.38</td></tr><tr><td>Our best feedforward variant</td><td>39.56</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"text": "Performance comparison across different models for second-level senses.", |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>Number of Hidden Layers</td><td>0</td><td>1</td><td>2</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>Accuracy</td><td>40 30 35</td><td>q 29.24</td><td>q 37.85 38.51 q</td><td>q 28.72</td><td>q 37.21 37.47 q</td><td>q 35.12</td><td>q 39.56 39.03 q</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>25</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>50</td><td>100</td><td>300</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>Embedding Size</td></tr><tr><td/><td/><td colspan=\"3\">Pooling q</td><td>Last</td><td>q</td><td>Mean</td><td>q</td><td colspan=\"2\">Max</td><td>q</td><td>Summation</td></tr><tr><td/><td>40</td><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>Accuracy</td><td>30 35</td><td>q 29.37</td><td>q 35.64</td><td colspan=\"2\">q 36.55</td><td colspan=\"2\">q 37.21</td><td colspan=\"2\">q 35.11</td><td>q 34.98</td><td>q 36.81</td><td>q 37.33</td></tr><tr><td/><td>25</td><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td colspan=\"5\">Feedforward</td><td/><td/><td/><td>LSTM</td></tr><tr><td/><td/><td/><td/><td/><td/><td colspan=\"4\">Architecture</td></tr><tr><td colspan=\"11\">Figure 2: Summation pooling gives the best per-</td></tr><tr><td colspan=\"11\">formance in general. The results are shown for the</td></tr><tr><td colspan=\"11\">systems using 100-dimensional word vectors and</td></tr><tr><td colspan=\"4\">one hidden layer.</td><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"11\">ious lexical and syntactic features and extensive</td></tr><tr><td colspan=\"11\">feature selection. Tree LSTM achieves inferior</td></tr><tr><td colspan=\"11\">accuracy than our best feedforward model. The</td></tr><tr><td colspan=\"11\">best configuration of the feedforward model uses</td></tr><tr><td colspan=\"11\">300-dimensional word vectors, one hidden layer,</td></tr><tr><td colspan=\"11\">and the summation pooling function to derive ar-</td></tr><tr><td colspan=\"11\">gument feature vectors. The model behaves well</td></tr><tr><td colspan=\"11\">during training and converges in less than an hour</td></tr><tr><td colspan=\"2\">on a CPU.</td><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"11\">The sequential LSTM model outperforms the</td></tr><tr><td colspan=\"11\">feedforward model when word vectors are not</td></tr><tr><td colspan=\"11\">high-dimensional and not trained on a large cor-</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"6\">: Our best feedforward variant signifi-</td></tr><tr><td colspan=\"7\">cantly outperforms the systems with surface fea-</td></tr><tr><td colspan=\"7\">tures (p < 0.05). ME=Maximum Entropy model</td></tr><tr><td/><td colspan=\"4\">Number of Hidden Layers</td><td>0</td><td>1</td><td>2</td></tr><tr><td/><td/><td>CBOW</td><td/><td/><td colspan=\"2\">Skipgram</td></tr><tr><td/><td>0.84</td><td/><td/><td/><td/></tr><tr><td>Accuracy</td><td>0.82</td><td/><td/><td/><td/></tr><tr><td/><td>0.80</td><td/><td/><td/><td/></tr><tr><td/><td>0.78</td><td/><td/><td/><td/></tr><tr><td/><td>100</td><td>200</td><td>300</td><td>100</td><td/><td>200</td><td>300</td></tr><tr><td/><td/><td colspan=\"5\">Dimensionality of word vectors</td></tr></table>", |
|
"html": null |
|
}, |
|
"TABREF10": { |
|
"num": null, |
|
"text": "Comparing various systems on the CoNLL 2016 Shared Task standard datasets. Manual features are no longer needed for a competitive system. While performing well in-domain, convolutional networks degrade sharply when tested on the blind slightly out-of-domain dataset.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |