|
{ |
|
"paper_id": "D16-1037", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:37:02.676835Z" |
|
}, |
|
"title": "Variational Neural Discourse Relation Recognizer", |
|
"authors": [ |
|
{ |
|
"first": "Biao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Provincial Key Laboratory for Computer Information Processing Technology", |
|
"institution": "Soochow University", |
|
"location": { |
|
"postCode": "215006", |
|
"settlement": "Suzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Deyi", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Xiamen University", |
|
"location": { |
|
"postCode": "361005", |
|
"settlement": "Xiamen", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jinsong", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Provincial Key Laboratory for Computer Information Processing Technology", |
|
"institution": "Soochow University", |
|
"location": { |
|
"postCode": "215006", |
|
"settlement": "Suzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Key Laboratory of Intelligent Information Processing", |
|
"institution": "Chinese Academy of Sciences", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rongrong", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Provincial Key Laboratory for Computer Information Processing Technology", |
|
"institution": "Soochow University", |
|
"location": { |
|
"postCode": "215006", |
|
"settlement": "Suzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Provincial Key Laboratory for Computer Information Processing Technology", |
|
"institution": "Soochow University", |
|
"location": { |
|
"postCode": "215006", |
|
"settlement": "Suzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Implicit discourse relation recognition is a crucial component for automatic discourselevel analysis and nature language understanding. Previous studies exploit discriminative models that are built on either powerful manual features or deep discourse representations. In this paper, instead, we explore generative models and propose a variational neural discourse relation recognizer. We refer to this model as VarNDRR. VarNDRR establishes a directed probabilistic model with a latent continuous variable that generates both a discourse and the relation between the two arguments of the discourse. In order to perform efficient inference and learning, we introduce neural discourse relation models to approximate the prior and posterior distributions of the latent variable, and employ these approximated distributions to optimize a reparameterized variational lower bound. This allows VarNDRR to be trained with standard stochastic gradient methods. Experiments on the benchmark data set show that VarNDRR can achieve comparable results against stateof-the-art baselines without using any manual features.", |
|
"pdf_parse": { |
|
"paper_id": "D16-1037", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Implicit discourse relation recognition is a crucial component for automatic discourselevel analysis and nature language understanding. Previous studies exploit discriminative models that are built on either powerful manual features or deep discourse representations. In this paper, instead, we explore generative models and propose a variational neural discourse relation recognizer. We refer to this model as VarNDRR. VarNDRR establishes a directed probabilistic model with a latent continuous variable that generates both a discourse and the relation between the two arguments of the discourse. In order to perform efficient inference and learning, we introduce neural discourse relation models to approximate the prior and posterior distributions of the latent variable, and employ these approximated distributions to optimize a reparameterized variational lower bound. This allows VarNDRR to be trained with standard stochastic gradient methods. Experiments on the benchmark data set show that VarNDRR can achieve comparable results against stateof-the-art baselines without using any manual features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Discourse relation characterizes the internal structure and logical relation of a coherent text. Automatically identifying these relations not only plays an important role in discourse comprehension and generation, but also obtains wide applications in many other relevant natural language processing tasks, such as text summarization (Yoshida et al., 2014) , conversation (Higashinaka et al., 2014) , question answering (Verberne et al., 2007) and information extraction (Cimiano et al., 2005) . Generally, discourse relations can be divided into two categories: explicit and implicit, which can be illustrated in the following example:", |
|
"cite_spans": [ |
|
{ |
|
"start": 335, |
|
"end": 357, |
|
"text": "(Yoshida et al., 2014)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 399, |
|
"text": "(Higashinaka et al., 2014)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 421, |
|
"end": 444, |
|
"text": "(Verberne et al., 2007)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 472, |
|
"end": 494, |
|
"text": "(Cimiano et al., 2005)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The company was disappointed by the ruling. because The obligation is totally unwarranted. (adapted from wsj 0294)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "With the discourse connective because, these two sentences display an explicit discourse relation CONTINGENCY which can be inferred easily. Once this discourse connective is removed, however, the discourse relation becomes implicit and difficult to be recognized. This is because almost no surface information in these two sentences can signal this relation. For successful recognition of this relation, in the contrary, we need to understand the deep semantic correlation between disappointed and obligation in the two sentences above. Although explicit discourse relation recognition (DRR) has made great progress (Miltsakaki et al., 2005; Pitler et al., 2008) , implicit DRR still remains a serious challenge due to the difficulty in semantic analysis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 616, |
|
"end": 641, |
|
"text": "(Miltsakaki et al., 2005;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 642, |
|
"end": 662, |
|
"text": "Pitler et al., 2008)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Conventional approaches to implicit DRR often treat the relation recognition as a classification problem, where discourse arguments and relations are regarded as the inputs and outputs respectively. Generally, these methods first generate a representation for a discourse, denoted as x 1 (e.g., manual fea- tures in SVM-based recognition (Pitler et al., 2009; Lin et al., 2009) or sentence embeddings in neural networks-based recognition (Ji and Eisenstein, 2015; Zhang et al., 2015)) , and then directly model the conditional probability of the corresponding discourse relation y given x, i.e. p(y|x). In spite of their success, these discriminative approaches rely heavily on the goodness of discourse representation x. Sophisticated and good representations of a discourse, however, may make models suffer from overfitting as we have no large-scale balanced data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 338, |
|
"end": 359, |
|
"text": "(Pitler et al., 2009;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 377, |
|
"text": "Lin et al., 2009)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 438, |
|
"end": 463, |
|
"text": "(Ji and Eisenstein, 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 464, |
|
"end": 484, |
|
"text": "Zhang et al., 2015))", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Instead, we assume that there is a latent continuous variable z from an underlying semantic space. It is this latent variable that generates both discourse arguments and the corresponding relation, i.e. p(x, y|z). The latent variable enables us to jointly model discourse arguments and their relations, rather than conditionally model y on x. However, the incorporation of the latent variable makes the modeling difficult due to the intractable computation with respect to the posterior distribution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Inspired by Kingma and Welling (2014) as well as Rezende et al. (2014) who introduce a variational neural inference model to the intractable posterior via optimizing a reparameterized variational lower bound, we propose a variational neural discourse relation recognizer (VarNDRR) with a latent continuous variable for implicit DRR in this paper. The key idea behind VarNDRR is that although the posterior distribution is intractable, we can approximate it via a deep neural network. Figure 1 illustrates the treat them as univariate variables in most cases. Additionally, we use bold symbols to denote variables, and plain symbols to denote values. graph structure of VarNDRR. Specifically, there are two essential components:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 484, |
|
"end": 492, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 neural discourse recognizer As a discourse x and its corresponding relation y are independent with each other given the latent variable z (as shown by the solid lines), we can formulate the generation of x and y from z in the equation p \u03b8 (x, y|z) = p \u03b8 (x|z)p \u03b8 (y|z). These two conditional probabilities on the right hand side are modeled via deep neural networks (see section 3.1). \u2022 neural latent approximator VarNDRR assumes that the latent variable can be inferred from discourse arguments x and relations y (as shown by the dash lines). In order to infer the latent variable, we employ a deep neural network to approximate the posterior q \u03c6 (z|x, y) as well as the prior q \u03c6 (z|x) (see section 3.2), which makes the inference procedure efficient. We further employ a reparameterization technique to sample z from q \u03c6 (z|x, y) that not only bridges the gap between the recognizer and the approximator but also allows us to use the standard stochastic gradient ascent techniques for optimization (see section 3.3).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The main contributions of our work lie in two aspects. 1) We exploit a generative graphic model for implicit DRR. To the best of our knowledge, this has never been investigated before. 2) We develop a neural recognizer and two neural approximators specifically for implicit DRR, which enables both the recognition and inference to be efficient. We conduct a series of experiments for English implicit DRR on the PDTB-style corpus to evaluate the effectiveness of our proposed VarNDRR model. Experiment results show that our variational model achieves comparable results against several strong baselines in term of F1 score. Extensive analysis on the variational lower bound further reveals that our model can indeed fit the data set with respect to discourse arguments and relations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The variational autoencoder (VAE) , which forms the basis of our model, is a generative model that can be regarded as a regularized version of the standard autoencoder. With a latent random variable z, VAE significantly changes the autoencoder architecture to be able to capture the variations in the observed variable x. The joint distribution of (x, z) is formulated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background: Variational Autoencoder", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p \u03b8 (x, z) = p \u03b8 (x|z)p \u03b8 (z)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Background: Variational Autoencoder", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "where p \u03b8 (z) is the prior over the latent variable, usually equipped with a simple Gaussian distribution. p \u03b8 (x|z) is the conditional distribution that models the probability of x given the latent variable z. Typically, VAE parameterizes p \u03b8 (x|z) with a highly nonlinear but flexible function approximator such as a neural network. The objective of VAE is to maximize a variational lower bound as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background: Variational Autoencoder", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L V AE (\u03b8, \u03c6; x) = \u2212KL(q \u03c6 (z|x)||p \u03b8 (z)) +E q \u03c6 (z|x) [log p \u03b8 (x|z)] \u2264 log p \u03b8 (x)", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Background: Variational Autoencoder", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "where KL(Q||P ) is Kullback-Leibler divergence between two distributions Q and P . q \u03c6 (z|x) is an approximation of the posterior p(z|x) and usually follows a diagonal Gaussian N (\u00b5, diag(\u03c3 2 )) whose mean \u00b5 and variance \u03c3 2 are parameterized by again, neural networks, conditioned on x. To optimize Eq. (2) stochastically with respect to both \u03b8 and \u03c6, VAE introduces a reparameterization trick that parameterizes the latent variable z with the Gaussian parameters \u00b5 and \u03c3 in q \u03c6 (z|x):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background: Variational Autoencoder", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "z = \u00b5 + \u03c3 (3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background: Variational Autoencoder", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "where is a standard Gaussian variable, and denotes an element-wise product. Intuitively, VAE learns the representation of the latent variable not as single points, but as soft ellipsoidal regions in latent space, forcing the representation to fill the space rather than memorizing the training data as isolated representations. With this trick, the VAE model can be trained through standard backpropagation technique with stochastic gradient ascent.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background: Variational Autoencoder", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "This section introduces our proposed VarNDRR model. Formally, in VarNDRR, there are two observed variables, x for a discourse and y for the corresponding relation, and one latent variable z. As", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The VarNDRR Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "z y x 1 x 2 p \u03b8 (x|z) p \u03b8 (y|z) h \u2032 1 h \u2032 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The VarNDRR Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Figure 2: Neural networks for conditional probabilities p \u03b8 (x|z) and p \u03b8 (y|z). The gray color denotes real-valued representations while the white and black color 0-1 representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The VarNDRR Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "illustrated in Figure 1 , the joint distribution of the three variables is formulated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 23, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The VarNDRR Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p \u03b8 (x, y, z) = p \u03b8 (x, y|z)p(z)", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "The VarNDRR Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We begin with this distribution to elaborate the major components of VarNDRR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The VarNDRR Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The conditional distribution p(x, y|z) in Eq. 4shows that both discourse arguments and the corresponding relation are generated from the latent variable. As shown in Figure 1 , x is d-separated from y by z. Therefore the discourse x and the corresponding relation y is independent given the latent variable z. The joint probability can be therefore formulated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 166, |
|
"end": 174, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "p \u03b8 (x, y, z) = p \u03b8 (x|z)p \u03b8 (y|z)p(z)", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We use a neural model q \u03c6 (z|x) to approximate the prior p(z) conditioned on the discourse x (see the following section). With respect to the other two conditional distributions, we parameterize them via neural networks as shown in Figure 2 . Before we describe these neural networks, it is necessary to briefly introduce how discourse relations are annotated in our training data. The PDTB corpus, used as our training data, annotates implicit discourse relations between two neighboring arguments, namely Arg1 and Arg2. In VarNDRR, we represent the two arguments with bag-of-word representations, and denote them as x 1 and x 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 232, |
|
"end": 240, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To model p \u03b8 (x|z) (the bottom part in Figure 2 ), we project the representation of the latent variable z \u2208 R dz onto a hidden layer:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 47, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "h 1 = f (W h 1 z + b h 1 ) h 2 = f (W h 2 z + b h 1 ) (6)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "h 1 \u2208 R d h 1 , h 2 \u2208 R d h 2 , W *", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "is the transformation matrix, b * is the bias term, d u denotes the dimensionality of vector representations of u and f (\u2022) is an element-wise activation function, such as tanh(\u2022), which is used throughout our model. Upon this hidden layer, we further stack a Sigmoid layer to predict the probabilities of corresponding discourse arguments:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "x 1 = Sigmoid(W x 1 h 1 + b x 1 ) x 2 = Sigmoid(W x 2 h 2 + b x 2 )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "here, x 1 \u2208 R dx 1 and x 2 \u2208 R dx 2 are the realvalued representations of the reconstructed x 1 and x 2 respectively. 2 We assume that p \u03b8 (x|z) is a multivariate Bernoulli distribution because of the bagof-word representation. Therefore the logarithm of p(x|z) is calculated as the sum of probabilities of words in discourse arguments as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "log p(x|z) = i x 1,i log x 1,i + (1 \u2212 x 1,i ) log(1 \u2212 x 1,i ) + j x 2,j log x 2,j + (1 \u2212 x 2,j ) log(1 \u2212 x 2,j )", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where u i,j is the jth element in u i . In order to estimate p \u03b8 (y|z) (the top part in Figure 2), we stack a softmax layer over the multilayerperceptron-transformed representation of the latent variable z:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 94, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y = SoftMax(W y MLP(z) + b y )", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "y \u2208 R dy , and d y denotes the number of discourse relations. MLP projects the representation of latent variable z into a d m -dimensional space through four internal layers, each of which has dimension d m .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Suppose that the true relation is y \u2208 R dy , the logarithm of p(y|z) is defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "log p(y|z) = dy i=1 y i log y i (10) \u00b5 x 1 h 1 log \u03c3 2 h 2 x 2 y h y q \u2032 \u03c6 (z|x) q \u03c6 (z|x, y)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Figure 3: Neural networks for Gaussian parameters \u00b5 and log \u03c3 in the approximated posterior q \u03c6 (z|x, y) and prior q \u03c6 (z|x).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In order to precisely estimate these conditional probabilities, our model will force the representation z of the latent variable to encode semantic information for both the reconstructed discourse x (Eq. 8)and predicted discourse relation y (Eq. (10)), which is exactly what we want.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Discourse Recognizer", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "For the joint distribution in Eq. 5, we can define a variational lower bound that is similar to Eq. (2). The difference lies in two aspects: the approximate prior q \u03c6 (z|x) and posterior q \u03c6 (z|x, y). We model both distributions as a multivariate Gaussian distribution with a diagonal covariance structure:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Latent Approximator", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "N (z; \u00b5, \u03c3 2 I)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Latent Approximator", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The mean \u00b5 and s.d. \u03c3 of the approximate distribution are the outputs of the neural network as shown in Figure 3 , where the prior and posterior have different conditions and independent parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 112, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Neural Latent Approximator", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Approximate Posterior q \u03c6 (z|x, y) is modeled to condition on both observed variables: the discourse arguments x and relations y. Similar to the calculation of p \u03b8 (x|z), we first transform the input x and y into a hidden representation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Latent Approximator", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "h 1 = f (W h 1 x 1 + b h 1 ) h 2 = f (W h 2 x 2 + b h 2 ) h y = f (W hy y + b hy ) (11) where h 1 \u2208 R d h 1 , h 2 \u2208 R d h 2 and h y \u2208 R d hy . 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Latent Approximator", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We then obtain the Gaussian parameters of the posterior \u00b5 and log \u03c3 2 through linear regression:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Latent Approximator", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u00b5 = W \u00b5 1 h 1 + W \u00b5 2 h 2 + W \u00b5y h y + b \u00b5 log \u03c3 2 = W \u03c3 1 h 1 + W \u03c3 2 h 2 + W \u03c3y h y + b \u03c3 (12)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Latent Approximator", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where \u00b5, \u03c3 \u2208 R dz . In this way, this posterior approximator can be efficiently computed. Approximate Prior q \u03c6 (z|x) is modeled to condition on discourse arguments x alone. This is based on the observation that discriminative models are able to obtain promising results using only x. Therefore, assuming the discourse arguments encode the prior information for discourse relation recognition is meaningful.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Latent Approximator", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The neural model for prior q \u03c6 (z|x) is the same as that (i.e. Eq (11) and (12)) for posterior q \u03c6 (z|x, y) (see Figure 3) , except for the absence of discourse relation y. For clarity , we use \u00b5 and \u03c3 to denote the mean and s.d. of the approximate prior.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 122, |
|
"text": "Figure 3)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Neural Latent Approximator", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "With the parameters of Gaussian distribution, we can access the representation z using different sampling strategies. However, traditional sampling approaches often breaks off the connection between recognizer and approximator, making the optimization difficult. Instead, we employ the reparameterization trick (Kingma and Welling, 2014; Rezende et al., 2014) as in Eq. (3). During training, we sample the latent variable usingz = \u00b5 + \u03c3 ; during testing, however, we employ the expectation of z in the approximate prior distribution, i.e. setz = \u00b5 to avoid uncertainty.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Neural Latent Approximator", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We employ the Monte Carlo method to estimate the expectation over the approximate posterior, that is", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "E q \u03c6 (z|x,y) [log p \u03b8 (x, y|z)]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": ". Given a training instance (x (t) , y (t) ), the joint training objective is defined:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "L(\u03b8, \u03c6) \u2212KL(q \u03c6 (z|x (t) , y (t) )||q \u03c6 (z|x (t) )) + 1 L L l=1 log p \u03b8 (x (t) , y (t) |z (t,l) ) (13) wherez (t,l) = \u00b5 (t) + \u03c3 (t) (l) and (l) \u223c N (0, I)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "L is the number of samples. The first term is the KL divergence of two Gaussian distributions which can be computed and differentiated without estimation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Algorithm 1 Parameter Learning Algorithm of VarNDRR.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Inputs: A, the maximum number of iterations; M , the number of instances in one batch; L, the number of samples; \u03b8, \u03c6 \u2190 Initialize parameters repeat Maximizing this objective will minimize the difference between the approximate posterior and prior, thus making the settingz = \u00b5 during testing reasonable. The second term is the approximate expectation of E q \u03c6 (z|x,y) [log p \u03b8 (x, y|z)], which is also differentiable.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "D \u2190 getRandomMiniBatch(M) \u2190 getRandomNoiseFromStandardGaussian() g \u2190 \u2207 \u03b8,\u03c6 L(\u03b8, \u03c6; D, ) \u03b8, \u03c6 \u2190 parameterUpdater(\u03b8, \u03c6; g) until", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "As the objective function in Eq. (13) is differentiable, we can optimize both the model parameters \u03b8 and variational parameters \u03c6 jointly using standard gradient ascent techniques. The training procedure for VarNDRR is summarized in Algorithm 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Learning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We conducted experiments on English implicit DRR task to validate the effectiveness of VarNDRR. 4", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We used the largest hand-annotated discourse corpus PDTB 2.0 5 (Prasad et al., 2008 ) (PDTB hereafter). This corpus contains discourse annotations over 2,312 Wall Street Journal articles, and is organized in different sections. Following previous work (Pitler et al., 2009; Zhou et al., al., 2013; Zhang et al., 2015), we used sections 2-20 as our training set, sections 21-22 as the test set. Sections 0-1 were used as the development set for hyperparameter optimization. In PDTB, discourse relations are annotated in a predicate-argument view. Each discourse connective is treated as a predicate that takes two text spans as its arguments. The discourse relation tags in PDTB are arranged in a three-level hierarchy, where the top level consists of four major semantic classes: TEMPORAL (TEM), CONTINGENCY (CON), EX-PANSION (EXP) and COMPARISON (COM). Because the top-level relations are general enough to be annotated with a high inter-annotator agreement and are common to most theories of discourse, in our experiments we only use this level of annotations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 83, |
|
"text": "(Prasad et al., 2008", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 273, |
|
"text": "(Pitler et al., 2009;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 286, |
|
"text": "Zhou et al.,", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We formulated the task as four separate oneagainst-all binary classification problems: each top level class vs. the other three discourse relation classes. We also balanced the training set by resampling training instances in each class until the number of positive and negative instances are equal. In contrast, all instances in the test and development set are kept in nature. The statistics of various data sets is listed in Table 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We tokenized all datasets using Stanford NLP Toolkit 6 . For optimization, we employed the Adam ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "L = 1, d z = 20, d x 1 = d x 2 = 10001, d h 1 = d h 2 = d h 1 = d h 2 = d m = d hy = 400, d y = 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "for all experiments. 7 . All parameters of VarNDRR are initialized by a Gaussian distribution (\u00b5 = 0, \u03c3 = 0.01). For Adam, we set \u03b2 1 = 0.9, \u03b2 2 = 0.999 with a learning rate 0.001. Additionally, we tied the following parameters in practice:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "W h 1 and W h 2 , W x 1 and W x 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We compared VarNDRR against the following two different baseline methods:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 SVM: a support vector machine (SVM) classifier 8 trained with several manual features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 SCNN: a shallow convolutional neural network proposed by Zhang et al. (2015) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 78, |
|
"text": "Zhang et al. (2015)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We also provide results from two state-of-the-art systems:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Rutherford and Xue (2015) convert explicit discourse relations into implicit instances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 Ji and Eisenstein (2015) augment discourse representations via entity connections. Features used in SVM are taken from the stateof-the-art implicit discourse relation recognition model, including Bag of Words, Cross-Argument Word Pairs, Polarity, First-Last, First3, Production Rules, Dependency Rules and Brown cluster pair (Rutherford and Xue, 2014) . In order to collect bag of words, production rules, dependency rules, and cross-argument word pairs, we used a frequency cutoff of 5 to remove rare features, following Lin et al. (2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 327, |
|
"end": 353, |
|
"text": "(Rutherford and Xue, 2014)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 524, |
|
"end": 541, |
|
"text": "Lin et al. (2009)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Setup", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Because the development and test sets are imbalanced in terms of the ratio of positive and negative instances, we chose the widely-used F1 score as our major evaluation metric. In addition, we also provide the precision, recall and accuracy for further analysis. Table 2 summarizes the classification results. From Table 2 , we observe that the proposed VarN-DRR outperforms SVM on COM/EXP/TEM and SCNN on EXP/COM according to their F1 scores. Although it fails on CON, VarNDRR achieves the best result on EXP/COM among these three models. Overall, VarNDRR is competitive in comparison with these two baselines. With respect to the accuracy, our model does not yield substantial im-provements over the two baselines. This may be because that we used the F1 score rather than the accuracy, as our selection criterion on the development set. With respect to the precision and recall, our model tends to produce relatively lower precisions but higher recalls. This suggests that the improvements of VarNDRR in terms of F1 scores mostly benefits from the recall values.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 263, |
|
"end": 270, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 315, |
|
"end": 322, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classification Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Comparing with the state-of-the-art results of previous work (Ji and Eisenstein, 2015; Rutherford and Xue, 2015) , VarNDRR achieves comparable results in term of the F1 scores. Specifically, VarNDRR outperforms Rutherford and Xue (2015) on EXP, and Ji and Eisenstein (2015) on TEM. However, the accuracy of our model fails to surpass these models. We argue that this is because both baselines use many manual features designed with prior human knowledge, but our model is purely neural-based.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 86, |
|
"text": "(Ji and Eisenstein, 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 87, |
|
"end": 112, |
|
"text": "Rutherford and Xue, 2015)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 236, |
|
"text": "Rutherford and Xue (2015)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classification Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Additionally, we find that the performance of our model is proportional to the number of training instances. This suggests that collecting more training instances (in spite of the noises) may be beneficial to our model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classification Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In addition to the classification performance, the efficiency in learning and inference is another concern for variational methods. Figure 4 shows the training procedure for four tasks in terms of the variational lower bound on the training set. We also provide F1 scores on the development set to investigate the relations between the variational lower bound and recognition performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 140, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Variational Lower Bound Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We find that our model converges toward the variational lower bound considerably fast in all experiments (within 100 epochs), which resonates with the previous findings . However, the change trend of the F1 score does not follow that of the lower bound which takes more time to converge. Particularly to the four discourse relations, we further observe that the change paths of the F1 score are completely different. This may suggest that the four discourse relations have different properties and distributions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Variational Lower Bound Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "In particular, the number of epochs when the best F1 score reaches is also different for the four discourse relations. This indicates that dividing the implicit DRR into four different tasks according to the type of discourse relations is reasonable and better than performing DRR on the mixtures of the four relations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Variational Lower Bound Analysis", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "There are two lines of research related to our work: implicit discourse relation recognition and variational neural model, which we describe in succession.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Implicit Discourse Relation Recognition Due to the release of Penn Discourse Treebank (Prasad et al., 2008) corpus, constantly increasing efforts are made for implicit DRR. Upon this corpus, Pilter et al. (2009) exploit several linguistically informed features, such as polarity tags, modality and lexical features. Lin et al. (2009) further incorporate context words, word pairs as well as discourse parse information into their classifier. Following this direction, several more powerful features have been exploited: entities (Louis et al., 2010) , word embeddings (Braud and Denis, 2015) , Brown cluster pairs and co-reference patterns (Rutherford and Xue, 2014) . With these features, Park and Cardie (2012) perform feature set optimization for better feature combination.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 107, |
|
"text": "(Prasad et al., 2008)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 333, |
|
"text": "Lin et al. (2009)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 529, |
|
"end": 549, |
|
"text": "(Louis et al., 2010)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 568, |
|
"end": 591, |
|
"text": "(Braud and Denis, 2015)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 640, |
|
"end": 666, |
|
"text": "(Rutherford and Xue, 2014)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 690, |
|
"end": 712, |
|
"text": "Park and Cardie (2012)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Different from feature engineering, predicting discourse connectives can indirectly help the relation classification (Zhou et al., 2010; Patterson and Kehler, 2013) . In addition, selecting explicit discourse instances that are similar to the implicit ones can enrich the training corpus for implicit DRR and gains improvement (Wang et al., 2012; Lan et al., 2013; Braud and Denis, 2014; Fisher and Simmons, 2015; Rutherford and Xue, 2015) . Very recently, neural network models have been also used for implicit DRR due to its capability for representation learning (Ji and Eisenstein, 2015; Zhang et al., 2015) . Despite their successes, most of them focus on the discriminative models, leaving the field of generative models for implicit DRR a relatively uninvestigated area. In this respect, the most related work to ours is the latent variable recurrent neural network recently proposed by Ji et al. (2016) . However, our work differs from theirs significantly, which can be summarized in the following three aspects: 1) they employ the recurrent neural network to represent the discourse arguments, while we use the simple feedforward neural network; 2) they treat the discourse relations directly as latent variables, rather than the underlying semantic representation of discourses; 3) their model is optimized in terms of the data likelihood, since the discourse relations are observed during training. However, VarNDRR is optimized under the variational theory.", |
|
"cite_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 136, |
|
"text": "(Zhou et al., 2010;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 137, |
|
"end": 164, |
|
"text": "Patterson and Kehler, 2013)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 346, |
|
"text": "(Wang et al., 2012;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 347, |
|
"end": 364, |
|
"text": "Lan et al., 2013;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 365, |
|
"end": 387, |
|
"text": "Braud and Denis, 2014;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 388, |
|
"end": 413, |
|
"text": "Fisher and Simmons, 2015;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 414, |
|
"end": 439, |
|
"text": "Rutherford and Xue, 2015)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 566, |
|
"end": 591, |
|
"text": "(Ji and Eisenstein, 2015;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 592, |
|
"end": 611, |
|
"text": "Zhang et al., 2015)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 894, |
|
"end": 910, |
|
"text": "Ji et al. (2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Variational Neural Model In the presence of continuous latent variables with intractable posterior distributions, efficient inference and learning in directed probabilistic models is required. Kingma and Welling (2014) as well as introduce variational neural networks that employ an approximate inference model for intractable posterior and reparameterized variational lower bound for stochastic gradient optimization. revisit the approach to semi-supervised learning with generative models and further develop new models that allow effective generalization from a small labeled dataset to a large unlabeled dataset. Chung et al. (2015) incorporate latent variables into the hidden state of a recurrent neural network, while Gregor et al. (2015) combine a novel spatial attention mechanism that mimics the foveation of human eyes, with a sequential variational auto-encoding framework that allows the iterative construction of complex images.", |
|
"cite_spans": [ |
|
{ |
|
"start": 617, |
|
"end": 636, |
|
"text": "Chung et al. (2015)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 725, |
|
"end": 745, |
|
"text": "Gregor et al. (2015)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We follow the spirit of these variational models, but focus on the adaptation and utilization of them onto implicit DRR, which, to the best of our knowledge, is the first attempt in this respect.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we have presented a variational neural discourse relation recognizer for implicit DRR. Different from conventional discriminative models that directly calculate the conditional probability of the relation y given discourse arguments x, our model assumes that it is a latent variable from an underlying semantic space that generates both x and y. In order to make the inference and learning efficient, we introduce a neural discourse recognizer and two neural latent approximators as our generative and inference model respectively. Using the reparameterization technique, we are able to optimize the whole model via standard stochastic gradient ascent algorithm. Experiment results in terms of classification and variational lower bound verify the effectiveness of our model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In the future, we would like to exploit the utilization of discourse instances with explicit relations for implicit DRR. For this we can start from two directions: 1) converting explicit instances into pseudo implicit instances and retraining our model; 2) developing a semi-supervised model to leverage semantic information inside discourse arguments. Furthermore, we are also interested in adapting our model to other similar tasks, such as nature language inference.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Unless otherwise specified, all variables in the paper, e.g.,x, y, z are multivariate. But for notational convenience, we", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Notice that the equality of dx 1 = dx 2 , d h 1 = d h 2 is not necessary though we assume so in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Notice that d h 1 /d h 2 are not necessarily equal to d h 1 /d h 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There is one dimension in dx 1 and dx 2 for unknown words. 8 http://svmlight.joachims.org/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The authors were supported by National Natural Science Foundation of China (Grant Nos 61303082 (Grant No. IIP2015-4). We also thank the anonymous reviewers for their insightful comments.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 94, |
|
"text": "(Grant Nos 61303082", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Combining natural and artificial examples to improve implicit discourse relation identification", |
|
"authors": [ |
|
{ |
|
"first": "Chlo\u00e9", |
|
"middle": [], |
|
"last": "Braud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Denis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1694--1705", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chlo\u00e9 Braud and Pascal Denis. 2014. Combining nat- ural and artificial examples to improve implicit dis- course relation identification. In Proc. of COLING, pages 1694-1705, August.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Comparing word representations for implicit discourse relation classification", |
|
"authors": [ |
|
{ |
|
"first": "Chlo\u00e9", |
|
"middle": [], |
|
"last": "Braud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascal", |
|
"middle": [], |
|
"last": "Denis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2201--2211", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chlo\u00e9 Braud and Pascal Denis. 2015. Comparing word representations for implicit discourse relation classifi- cation. In Proc. of EMNLP, pages 2201-2211.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "A recurrent latent variable model for sequential data", |
|
"authors": [ |
|
{ |
|
"first": "Junyoung", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Kastner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Dinh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kratarth", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. of NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junyoung Chung, Kyle Kastner, Laurent Dinh, Kratarth Goel, Aaron C. Courville, and Yoshua Bengio. 2015. A recurrent latent variable model for sequential data. In Proc. of NIPS.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Ontology-driven discourse analysis for information extraction", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Cimiano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Uwe", |
|
"middle": [], |
|
"last": "Reyle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jasmin\u0161ari\u0107", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Data & Knowledge Engineering", |
|
"volume": "55", |
|
"issue": "", |
|
"pages": "59--83", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Cimiano, Uwe Reyle, and Jasmin\u0160ari\u0107. 2005. Ontology-driven discourse analysis for information extraction. Data & Knowledge Engineering, 55:59- 83.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Spectral semisupervised discourse relation classification", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Fisher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reid", |
|
"middle": [], |
|
"last": "Simmons", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. of ACL-IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "89--93", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Fisher and Reid Simmons. 2015. Spectral semi- supervised discourse relation classification. In Proc. of ACL-IJCNLP, pages 89-93, July.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "DRAW: A recurrent neural network for image generation", |
|
"authors": [ |
|
{ |
|
"first": "Karol", |
|
"middle": [], |
|
"last": "Gregor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivo", |
|
"middle": [], |
|
"last": "Danihelka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Graves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daan", |
|
"middle": [], |
|
"last": "Wierstra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karol Gregor, Ivo Danihelka, Alex Graves, and Daan Wierstra. 2015. DRAW: A recurrent neural network for image generation. CoRR, abs/1502.04623.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Towards an open-domain conversational system fully based on natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Ryuichiro", |
|
"middle": [], |
|
"last": "Higashinaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Imamura", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toyomi", |
|
"middle": [], |
|
"last": "Meguro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chiaki", |
|
"middle": [], |
|
"last": "Miyazaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nozomi", |
|
"middle": [], |
|
"last": "Kobayashi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroaki", |
|
"middle": [], |
|
"last": "Sugiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toru", |
|
"middle": [], |
|
"last": "Hirano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Toshiro", |
|
"middle": [], |
|
"last": "Makino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshihiro", |
|
"middle": [], |
|
"last": "Matsuo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "928--939", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryuichiro Higashinaka, Kenji Imamura, Toyomi Me- guro, Chiaki Miyazaki, Nozomi Kobayashi, Hiroaki Sugiyama, Toru Hirano, Toshiro Makino, and Yoshi- hiro Matsuo. 2014. Towards an open-domain conver- sational system fully based on natural language pro- cessing. In Proc. of COLING, pages 928-939.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "One vector is not enough: Entity-augmented distributed semantics for discourse relations. TACL", |
|
"authors": [ |
|
{ |
|
"first": "Yangfeng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "329--344", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yangfeng Ji and Jacob Eisenstein. 2015. One vector is not enough: Entity-augmented distributed semantics for discourse relations. TACL, pages 329-344.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A latent variable recurrent neural network for discourse-driven language models", |
|
"authors": [ |
|
{ |
|
"first": "Yangfeng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Eisenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proc. of NAACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "332--342", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yangfeng Ji, Gholamreza Haffari, and Jacob Eisenstein. 2016. A latent variable recurrent neural network for discourse-driven language models. In Proc. of NAACL, pages 332-342, June.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. CoRR, abs/1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Auto-Encoding Variational Bayes", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. of ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Max Welling. 2014. Auto- Encoding Variational Bayes. In Proc. of ICLR.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Semi-supervised learning with deep generative models", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shakir", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Jimenez Rezende", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. of NIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3581--3589", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma, Shakir Mohamed, Danilo Jimenez Rezende, and Max Welling. 2014. Semi-supervised learning with deep generative models. In Proc. of NIPS, pages 3581-3589.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Leveraging Synthetic Discourse Data via Multi-task Learning for Implicit Discourse Relation Recognition", |
|
"authors": [ |
|
{ |
|
"first": "Man", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengyu", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "476--485", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Man Lan, Yu Xu, and Zhengyu Niu. 2013. Leveraging Synthetic Discourse Data via Multi-task Learning for Implicit Discourse Relation Recognition. In Proc. of ACL, pages 476-485, Sofia, Bulgaria, August.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Recognizing implicit discourse relations in the Penn Discourse Treebank", |
|
"authors": [ |
|
{ |
|
"first": "Ziheng", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "343--351", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ziheng Lin, Min-Yen Kan, and Hwee Tou Ng. 2009. Recognizing implicit discourse relations in the Penn Discourse Treebank. In Proc. of EMNLP, pages 343- 351.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Using entity features to classify implicit discourse relations", |
|
"authors": [ |
|
{ |
|
"first": "Annie", |
|
"middle": [], |
|
"last": "Louis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rashmi", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of SIGDIAL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "59--62", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Annie Louis, Aravind Joshi, Rashmi Prasad, and Ani Nenkova. 2010. Using entity features to classify im- plicit discourse relations. In Proc. of SIGDIAL, pages 59-62, Tokyo, Japan, September.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Experiments on sense annotations and sense disambiguation of discourse connectives", |
|
"authors": [ |
|
{ |
|
"first": "Eleni", |
|
"middle": [], |
|
"last": "Miltsakaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Dinesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rashmi", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Webber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proc. of TLT2005", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eleni Miltsakaki, Nikhil Dinesh, Rashmi Prasad, Aravind Joshi, and Bonnie Webber. 2005. Experiments on sense annotations and sense disambiguation of dis- course connectives. In Proc. of TLT2005.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Improving Implicit Discourse Relation Recognition Through Feature Set Optimization", |
|
"authors": [ |
|
{ |
|
"first": "Joonsuk", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Cardie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of SIGDIAL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "108--112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joonsuk Park and Claire Cardie. 2012. Improving Im- plicit Discourse Relation Recognition Through Fea- ture Set Optimization. In Proc. of SIGDIAL, pages 108-112, Seoul, South Korea, July.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Predicting the presence of discourse connectives", |
|
"authors": [ |
|
{ |
|
"first": "Gary", |
|
"middle": [], |
|
"last": "Patterson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Kehler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "914--923", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gary Patterson and Andrew Kehler. 2013. Predicting the presence of discourse connectives. In Proc. of EMNLP, pages 914-923.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Easily identifiable discourse relations", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Pitler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mridhula", |
|
"middle": [], |
|
"last": "Raghupathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hena", |
|
"middle": [], |
|
"last": "Mehta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind K", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Pitler, Mridhula Raghupathy, Hena Mehta, Ani Nenkova, Alan Lee, and Aravind K Joshi. 2008. Eas- ily identifiable discourse relations. Technical Reports (CIS), page 884.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Automatic sense prediction for implicit discourse relations in text", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Pitler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Annie", |
|
"middle": [], |
|
"last": "Louis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ani", |
|
"middle": [], |
|
"last": "Nenkova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of ACL-AFNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "683--691", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily Pitler, Annie Louis, and Ani Nenkova. 2009. Au- tomatic sense prediction for implicit discourse rela- tions in text. In Proc. of ACL-AFNLP, pages 683-691, August.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "The penn discourse treebank 2.0", |
|
"authors": [ |
|
{ |
|
"first": "Rashmi", |
|
"middle": [], |
|
"last": "Prasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Dinesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eleni", |
|
"middle": [], |
|
"last": "Miltsakaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Livio", |
|
"middle": [], |
|
"last": "Robaldo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Aravind", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Webber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rashmi Prasad, Nikhil Dinesh, Alan Lee, Eleni Milt- sakaki, Livio Robaldo, Aravind K Joshi, and Bonnie L Webber. 2008. The penn discourse treebank 2.0. In LREC. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Stochastic backpropagation and approximate inference in deep generative models", |
|
"authors": [ |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Jimenez Rezende", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shakir", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daan", |
|
"middle": [], |
|
"last": "Wierstra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. of ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1278--1286", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danilo Jimenez Rezende, Shakir Mohamed, and Daan Wierstra. 2014. Stochastic backpropagation and ap- proximate inference in deep generative models. In Proc. of ICML, pages 1278-1286.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Discovering implicit discourse relations through brown cluster pair representation and coreference patterns", |
|
"authors": [ |
|
{ |
|
"first": "Attapol", |
|
"middle": [], |
|
"last": "Rutherford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. of EACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "645--654", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Attapol Rutherford and Nianwen Xue. 2014. Discover- ing implicit discourse relations through brown cluster pair representation and coreference patterns. In Proc. of EACL, pages 645-654, April.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Improving the inference of implicit discourse relations via classifying explicit discourse connectives", |
|
"authors": [ |
|
{ |
|
"first": "Attapol", |
|
"middle": [], |
|
"last": "Rutherford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. of NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "799--808", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Attapol Rutherford and Nianwen Xue. 2015. Improv- ing the inference of implicit discourse relations via classifying explicit discourse connectives. In Proc. of NAACL-HLT, pages 799-808, May-June.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Evaluating discourse-based answer extraction for why-question answering", |
|
"authors": [ |
|
{ |
|
"first": "Suzan", |
|
"middle": [], |
|
"last": "Verberne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lou", |
|
"middle": [], |
|
"last": "Boves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nelleke", |
|
"middle": [], |
|
"last": "Oostdijk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter-Arno", |
|
"middle": [], |
|
"last": "Coppen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of SIGIR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "735--736", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suzan Verberne, Lou Boves, Nelleke Oostdijk, and Peter- Arno Coppen. 2007. Evaluating discourse-based an- swer extraction for why-question answering. In Proc. of SIGIR, pages 735-736.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Implicit discourse relation recognition by selecting typical training examples", |
|
"authors": [ |
|
{ |
|
"first": "Xun", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenjie", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2757--2772", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xun Wang, Sujian Li, Jiwei Li, and Wenjie Li. 2012. Im- plicit discourse relation recognition by selecting typ- ical training examples. In Proc. of COLING, pages 2757-2772.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Dependency-based discourse parser for single-document summarization", |
|
"authors": [ |
|
{ |
|
"first": "Yasuhisa", |
|
"middle": [], |
|
"last": "Yoshida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Suzuki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsutomu", |
|
"middle": [], |
|
"last": "Hirao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masaaki", |
|
"middle": [], |
|
"last": "Nagata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1834--1839", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yasuhisa Yoshida, Jun Suzuki, Tsutomu Hirao, and Masaaki Nagata. 2014. Dependency-based discourse parser for single-document summarization. In Proc. of EMNLP, pages 1834-1839, October.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Shallow convolutional neural network for implicit discourse relation recognition", |
|
"authors": [ |
|
{ |
|
"first": "Biao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinsong", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deyi", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaojie", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junfeng", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proc. of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Biao Zhang, Jinsong Su, Deyi Xiong, Yaojie Lu, Hong Duan, and Junfeng Yao. 2015. Shallow convolutional neural network for implicit discourse relation recogni- tion. In Proc. of EMNLP, September.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Predicting discourse connectives for implicit discourse relation recognition", |
|
"authors": [ |
|
{ |
|
"first": "Zhi-Min", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zheng-Yu", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Man", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chew Lim", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of COLING", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1507--1514", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhi-Min Zhou, Yu Xu, Zheng-Yu Niu, Man Lan, Jian Su, and Chew Lim Tan. 2010. Predicting discourse con- nectives for implicit discourse relation recognition. In Proc. of COLING, pages 1507-1514.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "Graphical illustration for VarNDRR. Solid lines denote the generative model p \u03b8 (x|z)p \u03b8 (y|z), dashed lines denote the variational approximation q \u03c6 (z|x, y) to the posterior p(z|x, y) and q \u03c6 (z|x) to the prior p(z) for inference. The variational parameters \u03c6 are learned jointly with the generative model parameters \u03b8.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "6 http://nlp.stanford.edu/software/corenlp.shtml algorithm (Kingma and Ba, 2014) to update parameters. With respect to the hyperparameters M, L, A and the dimensionality of all vector representations, we set them according to previous work and preliminary experiments on the development set. Finally, we set M = 16, A = 1000,", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"text": "Illustration of the variational lower bound (blue color) on the training set and F-score (brown color) on the development set. Horizontal axis: the epoch numbers; Vertical axis: the F1 score for relation classification (left) and the estimated average variational lower bound per datapoint (right).", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"num": null, |
|
"text": "SVM 63.10 22.79 64.47 33.68 SCNN 60.42 22.00 67.76 33.22 VarNDRR 63.30 24.00 71.05 35.88 (a) COM vs Other", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Model</td><td>Acc</td><td>P</td><td>R</td><td>F1</td><td>Model</td><td>Acc</td><td>P</td><td>R</td><td>F1</td></tr><tr><td colspan=\"2\">R & X (2015) J & E (2015) 70.27 -</td><td>--</td><td>--</td><td>41.00 35.93</td><td colspan=\"2\">(R & X (2015)) (J & E (2015)) 76.95 -</td><td>--</td><td>--</td><td>53.80 52.78</td></tr><tr><td/><td/><td/><td/><td/><td>SVM SCNN</td><td colspan=\"4\">62.62 39.14 72.40 50.82 63.00 39.80 75.29 52.04</td></tr><tr><td/><td/><td/><td/><td/><td>VarNDRR</td><td colspan=\"4\">53.82 35.39 88.53 50.56</td></tr><tr><td/><td/><td/><td/><td/><td/><td colspan=\"2\">(b) CON vs Other</td><td/><td/></tr><tr><td>Model</td><td>Acc</td><td>P</td><td>R</td><td>F1</td><td>Model</td><td>Acc</td><td>P</td><td>R</td><td>F1</td></tr><tr><td colspan=\"2\">(R & X (2015)) (J & E (2015)) 69.80 -</td><td>--</td><td>--</td><td>69.40 80.02</td><td colspan=\"2\">(R & X (2015)) (J & E (2015)) 87.11 -</td><td>--</td><td>--</td><td>33.30 27.63</td></tr><tr><td>SVM SCNN</td><td colspan=\"4\">60.71 65.89 58.89 62.19 63.00 56.29 91.11 69.59</td><td>SVM SCNN</td><td colspan=\"4\">66.25 15.10 68.24 24.73 76.95 20.22 62.35 30.54</td></tr><tr><td>VarNDRR</td><td colspan=\"4\">57.36 56.46 97.39 71.48</td><td>VarNDRR</td><td colspan=\"4\">62.14 17.40 97.65 29.54</td></tr><tr><td/><td colspan=\"2\">(c) EXP vs Other</td><td/><td/><td/><td colspan=\"2\">(d) TEM vs Other</td><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"2\">2010; Lan et</td></tr><tr><td/><td/><td/><td/><td/><td>4 Source</td><td>code</td><td>is</td><td>available</td><td>at</td></tr><tr><td/><td/><td/><td/><td/><td colspan=\"4\">https://github.com/DeepLearnXMU/VarNDRR.</td><td/></tr></table>" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"num": null, |
|
"text": "Classification results of different models on the implicit DRR task. Acc=Accuracy, P=Precision, R=Recall, and F1=F1 score.", |
|
"type_str": "table", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |