|
{ |
|
"paper_id": "S18-1027", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:43:22.655561Z" |
|
}, |
|
"title": "uOttawa at SemEval-2018 Task 1: Self-Attentive Hybrid GRU-Based Network", |
|
"authors": [ |
|
{ |
|
"first": "Ahmed", |
|
"middle": [ |
|
"Husseini" |
|
], |
|
"last": "Orabi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Ottawa", |
|
"location": { |
|
"addrLine": "800 King Edward Avenue", |
|
"settlement": "Ottawa", |
|
"country": "Canada" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [ |
|
"Husseini" |
|
], |
|
"last": "Orabi", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Inkpen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Ottawa", |
|
"location": { |
|
"addrLine": "800 King Edward Avenue", |
|
"settlement": "Ottawa", |
|
"country": "Canada" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Van Bruwaene", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Ottawa", |
|
"location": { |
|
"addrLine": "800 King Edward Avenue", |
|
"settlement": "Ottawa", |
|
"country": "Canada" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We propose a novel attentive hybrid GRUbased network (SAHGN), which we used at SemEval-2018 Task 1: Affect in Tweets. Our network has two main characteristics, 1) has the ability to internally optimize its feature representation using attention mechanisms, and 2) provides a hybrid representation using a character-level Convolutional Neural Network (CNN), as well as a self-attentive word-level encoder. The key advantage of our model is its ability to signify the relevant and important information that enables self-optimization. Results are reported on the valence intensity regression task.", |
|
"pdf_parse": { |
|
"paper_id": "S18-1027", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We propose a novel attentive hybrid GRUbased network (SAHGN), which we used at SemEval-2018 Task 1: Affect in Tweets. Our network has two main characteristics, 1) has the ability to internally optimize its feature representation using attention mechanisms, and 2) provides a hybrid representation using a character-level Convolutional Neural Network (CNN), as well as a self-attentive word-level encoder. The key advantage of our model is its ability to signify the relevant and important information that enables self-optimization. Results are reported on the valence intensity regression task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Affect analysis is one of the main topics of natural language processing (NLP). It involves many sub-tasks such as sentiment and valence analyses expressed in text. We focus on the task of determining valence intensity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Hand-crafted features and/or sentiment lexicons are commonly used for affect analysis (Mohammad, Kiritchenko, & Zhu, 2013; Taboada, Brooke, Tofiloski, Voll, & Stede, 2011) with classifiers such as random forest and support vector machines (SVM).", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 122, |
|
"text": "(Mohammad, Kiritchenko, & Zhu, 2013;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 123, |
|
"end": 171, |
|
"text": "Taboada, Brooke, Tofiloski, Voll, & Stede, 2011)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Affect in tweets (AIT) is a challenging task as it requires handling an informal writing style, which typically has many grammar mistakes, slangs, and misspellings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we present a self-attentive hybrid GRU-based network (SAHGN) that competed at SemEval-2018 Task 1 (Mohammad, Bravo-Marquez, Salameh, & Kiritchenko, 2018; .", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 168, |
|
"text": "(Mohammad, Bravo-Marquez, Salameh, & Kiritchenko, 2018;", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contributions can be summarized as below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 The implementation of a social media text processor: A library to help process social media text such as short-forms, emoticons, emojis, misspellings, hash tags, and slangs, as well as tokenization, word normalization, and sentence encoding. \u2022 The implementation of a self-attentive deep learning system: This system can predict valence and intensity with limited corpora and vocabulary, and yet can have acceptable performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our goal is to provide a system that can predict valence and intensity for short text. Figure 1 shows a high-level description of our solution, which consists of two main components, social media text processor (Section 3) and self-attentive hybrid GRU-based network (Section 4.2). ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 87, |
|
"end": 95, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "High-Level Description of Our System", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The social media text processor aims to provide a reliable and fast tokenization. It involves the following preprocessing steps:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Social Media Text Processor", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Use a named entity recognizer (NER) (Finkel, Grenager, & Manning, 2005) to identify entities such as persons, names, and places, and then replace them accordingly. \u2022 Build a vocabulary using an NGram tokenizer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 73, |
|
"text": "(Finkel, Grenager, & Manning, 2005)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Social Media Text Processor", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Tokenize sentences into a set of tokens, and then use them to encode text into a sequence of indices ( ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Social Media Text Processor", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The overall architecture of our SAHGN model is shown in Figure 2 . The main components include 1) a word sequence encoder, 2) a bidirectional GRU-based layer that applies a self-attentive mechanism on the word level, 3) a character-level CNN feature extractor, and 4) an attention with context-aware mechanism.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 56, |
|
"end": 64, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Description", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "A network input is described as a sequence ( ) of tokens (such as words), where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sequence Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "= [ 1 , 2 , \u2026 . , ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sequence Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "and denotes the timestep. is a one-hot input ( ) vector of a fixed length ( ) of tokens. A sequence that exceeds this length is truncated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sequence Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Word encoding. We use a word vocabulary to encode a sequence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sequence Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "has fixed terms to determine the start and end of the sequence, as well as the out of vocabulary (OOV) words. We handle the variable length through padding for short sequences and truncating for long sequences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sequence Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Embedding layer. We apply a pretrained GloVe word embedding (Pennington, Socher, & Manning, 2014) on . GloVe projects these words into a lowdimensional vector representation ( ), where \u2208 and is the word weight embedding matrix. is used to initialize the word embedding layer. We used the official training and development corpora to train the GloVe word embedding with a dimension of 100. The vocabulary size of this model is 8145 words, which is small and poses a major challenge to training, as well as to performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 97, |
|
"text": "(Pennington, Socher, & Manning, 2014)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Sequence Encoder", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Recurrent neural network (RNN) is commonly used for NLP problems (Yin, Kann, Yu, & Sch\u00fctze, 2017; Young, Hazarika, Poria, & Cambria, 2017) , as it enables remembering values over arbitrary time durations. RNN processes every element of an input embedding ( ) sequentially, such that \u210e = tanh ( + \u210e \u22121 ). is the weight matrix between an input and hidden states, while \u210e is the hidden state of the recurrent connection at timestep ( ). The design of the RNN enables variable length processing while preserving the sequence order. However, RNN has many limitations with long sequences, in particular the exponentially growing or decaying gradients. A common way to resolve these issues is by using gating mechanisms, such as LSTM and GRU (Gers, Schmidhuber, & Cummins, 2000; Hochreiter & Schmidhuber, 1997) . We use GRU as it is faster to converge, in addition to being memory efficient.", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 97, |
|
"text": "(Yin, Kann, Yu, & Sch\u00fctze, 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 98, |
|
"end": 138, |
|
"text": "Young, Hazarika, Poria, & Cambria, 2017)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 735, |
|
"end": 771, |
|
"text": "(Gers, Schmidhuber, & Cummins, 2000;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 772, |
|
"end": 803, |
|
"text": "Hochreiter & Schmidhuber, 1997)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attentive GRU-based Mechanism", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Bidirectional GRU layer. In our model, we use bidirectional GRU layers. GRU receives a sequence of tokens as inputs, and then projects word information = (\u210e 1 , \u210e 2 , \u2026 . , \u210e ), where \u210e denotes the hidden state of GRU at a timestep ( ). It captures the temporal and abstract information of sequences in a forward (\u210e ) or reverse (\u210e ) manner. After that, we concatenate forward and backward representations; e.g. \u210e = \u210e || \u210e .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attentive GRU-based Mechanism", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Attention mechanism. Words do not have equal valence weights in sentences. Towards that, we use an attention mechanism to signify the relatively important words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attentive GRU-based Mechanism", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Attention is used to compute the compatibility between a given source ( ) and query ( ). It uses an alignment function ( , ) to measure the level of dependency of to . This function produces an attention weight = ( , ) =1 . Then, a softmax function is applied to produce a probability distribution ( | , ) for each word ( ) of an input ( ). Hence, a bigger weight of indicates a higher importance than other words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attentive GRU-based Mechanism", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The attention alignment approaches have the same implementation, but they mainly differ on how they compute weights. This can be either in an additive manner ( , ) = tanh ( ( + )) (Bahdanau, Cho, & Bengio, 2014) , or a multiplicative manner ( , ) = tanh (\ufffd . \ufffd) (Vaswani et al., 2017) . In our model training, we use an additive attention mechanism, as it helped improve the prediction performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 180, |
|
"end": 211, |
|
"text": "(Bahdanau, Cho, & Bengio, 2014)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 284, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attentive GRU-based Mechanism", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Self-Attention mechanism. In our model training, we have a small number of corpora, which are not sufficient to train an efficient word embedding or alleviate well-known problems such as polysemy. In an effort to overcome such limitations, we use a self-attention mechanism. This approach measures the dependency of different tokens in the same input embedding ( ). It mainly computes attention for each word by replacing and with a set of token pairs ( , ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Self-attentive GRU-based Mechanism", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The CNN encoding layer (Figure 3) takes an input of a sequence ( ) of characters, where = [ 1 , 2 , \u2026 . , ] such that denotes the timestep. is a one-hot input ( ) vector of a fixed length ( ) of characters. CNN usually uses temporal convolutions (timestep-based) rather than spatial convolutions with text analysis.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 23, |
|
"end": 33, |
|
"text": "(Figure 3)", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Character-level CNN", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We mainly use convolutions to extract low-level character information such as misspellings, slangs, and so on.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Character-level CNN", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Character encoding. We define a charset of the size 95, including the upper and lower cases of the English alphabet, special characters, padding, and the start and end of a given input sequence. We need this charset to build a vocabulary, which is used to encode a character sequence. Similarly to the word embedding, we handle the variable length through padding and truncating (Section 4.1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Character-level CNN", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Character embedding layer. We build a character embedding of 32 dimensions. We use a uniform distribution scheme of a range (-0.5 to +0.5) to initialize its weight matrix.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Character-level CNN", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We apply 3 convolutions of 100 features, as well as different filter lengths 2, 3, and 4. Each one-dimensional operation is used, where = 1 ( ), and is the filter length. After that, a max-pooling layer is applied on the feature map to extract abstract information, \ufffd = max( ). Then, we concatenate these feature representations into one output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Character-level CNN", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "As opposed to recurrent layers (Section 4.2), convolutional operations with max-pooling are helpful to extract word features without paying attention to their sequence order (Kalchbrenner, Grefenstette, & Blunsom, 2014) . These features are combined with recurrent features to improve the performance of our model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 219, |
|
"text": "(Kalchbrenner, Grefenstette, & Blunsom, 2014)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Character-level CNN", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Output vectors received from previous steps are concatenated, and then fed into an attention with context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention with Context", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We use a context-aware attention mechanism (Yang et al., 2016) to compute a fixed representa-", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 62, |
|
"text": "(Yang et al., 2016)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention with Context", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "tion ( = \u2211 \u210e =1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention with Context", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": ") of a sequence as the weighted sum of all tokens in that sequence. This representation is used as a classification feature vector to be fed to the final fully-connected sigmoid layer. This layer outputs a continuous value representing the valence of a given sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention with Context", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "In our training, we use mini batch stochastic gradient of the size 32, to minimize the mean-squared error using back-propagation. We use Adam optimizer with a learning rate of 0.001 (Kingma & Ba, 2014) . For training, we use 80% of the training set and 20% for validation. We test and report our results on both development and test sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 201, |
|
"text": "(Kingma & Ba, 2014)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Regularization. We use dropout to randomly drop neurons off the network, which helps preventing co-adaptation of neurons (Srivastava, Hinton, Krizhevsky, Sutskever, & Salakhutdinov, 2014) . Dropout is also applied on the recurrent connection of our GRU-based layers. Additionally, we apply a weight decay approach through setting an L2 regularization penalty (Cortes, Mohri, & Rostamizadeh, 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 187, |
|
"text": "(Srivastava, Hinton, Krizhevsky, Sutskever, & Salakhutdinov, 2014)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 359, |
|
"end": 396, |
|
"text": "(Cortes, Mohri, & Rostamizadeh, 2012)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Hyperparameters. The size of the embedding layer is 200, and of the GRU layers is 150, which becomes 300 for bidirectional GRU. We apply a dropout of 0.4, and a dropout of 0.2 on the recurrent connections. Finally, an L2 regularization of 0.00001 is applied at the loss function.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "We report our results using the Pearson correlation between the prediction and gold rating sets on the test set (all instances). The other one (gold in 0.5-1 shown in Table 2 ) differs in including tweets only with intensity greater than or equal to 0.5.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 174, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Our model performed well on the development set scoring 0.869, while on the testing set, the performance degraded to 0.752. This degradation could be related to the size of the corpus we used to train our word embedding. We also trained only on 80% of the training set. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this paper, we presented a self-attentive hybrid GRU-based network for predicting valence intensity for short text. We used a hybrid approach combining low-character-level features with self-attentive word embedding. Our network uses two different attention mechanisms to signify the relevant and important words, and hence optimize feature representation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "With limited corpora and vocabulary of the size 8152, our model still managed to achieve an optimized feature representation, which achieved excellent results on the development set. However, our model failed to maintain the same performance on the testing set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "For future work, we will explore the performance of our model with larger corpora against the testing set. It would also be interesting to see if the model performs well on other long-text NLP tasks such as topic classification.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research is funded by Natural Sciences and Engineering Research Council of Canada (NSERC), Ontario Centres of Excellence (OCE) and VISR.co.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Neural Machine Translation by Jointly Learning to Align and Translate", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bahdanau, D., Cho, K., & Bengio, Y. (2014). Neural Machine Translation by Jointly Learning to Align and Translate. Retrieved from http://arxiv.org/abs/1409.0473", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "L2 Regularization for Learning Kernels", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L2 Regularization for Learning Kernels. Retrieved from http://arxiv.org/abs/1205.2653", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Incorporating non-local information into information extraction systems by Gibbs sampling", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Grenager", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics -ACL '05", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "363--370", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1219840.1219885" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Finkel, J. R., Grenager, T., & Manning, C. (2005). Incorporating non-local information into information extraction systems by Gibbs sampling. In Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics -ACL '05 (pp. 363-370). Morristown, NJ, USA: Association for Computational Linguistics. https://doi.org/10.3115/1219840.1219885", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Learning to Forget: Continual Prediction with LSTM", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Gers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Cummins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Neural Computation", |
|
"volume": "12", |
|
"issue": "10", |
|
"pages": "2451--2471", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/089976600300015015" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gers, F. A., Schmidhuber, J., & Cummins, F. (2000). Learning to Forget: Continual Prediction with LSTM. Neural Computation, 12(10), 2451-2471. https://doi.org/10.1162/089976600300015015", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Long Short-Term Memory", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural Computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hochreiter, S., & Schmidhuber, J. (1997). Long Short-Term Memory. Neural Computation, 9(8), 1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A Convolutional Neural Network for Modelling Sentences", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Kalchbrenner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "655--665", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P14-1062" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kalchbrenner, N., Grefenstette, E., & Blunsom, P. (2014). A Convolutional Neural Network for Modelling Sentences. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) (pp. 655-665). Stroudsburg, PA, USA: Association for Computational Linguistics. https://doi.org/10.3115/v1/P14-1062", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Adam: A Method for Stochastic Optimization", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kingma, D. P., & Ba, J. (2014). Adam: A Method for Stochastic Optimization. Retrieved from http://arxiv.org/abs/1412.6980", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Semeval-2018 Task 1: Affect in tweets", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Bravo-Marquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Salameh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Workshop on Semantic Evaluation (SemEval-2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad, S. M., Bravo-Marquez, F., Salameh, M., & Kiritchenko, S. (2018). Semeval-2018 Task 1: Affect in tweets. In International Workshop on Semantic Evaluation (SemEval-2018), New Orleans, LA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Understanding Emotions: A Dataset of Tweets to Study Interactions between Affect Categories", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 11th Edition of the Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad, S. M., & Kiritchenko, S. (2018). Understanding Emotions: A Dataset of Tweets to Study Interactions between Affect Categories. In Proceedings of the 11th Edition of the Language Resources and Evaluation Conference (LREC-2018).", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "NRC-Canada: Building the State-of-the-Art in Sentiment Analysis of Tweets", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad, S. M., Kiritchenko, S., & Zhu, X. (2013). NRC-Canada: Building the State-of-the-Art in Sentiment Analysis of Tweets. Retrieved from http://arxiv.org/abs/1308.6242", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Glove: Global Vectors for Word Representation", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pennington, J., Socher, R., & Manning, C. (2014). Glove: Global Vectors for Word Representation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP) (pp. 1532-1543). Stroudsburg, PA, USA: Association for Computational Linguistics. https://doi.org/10.3115/v1/D14-1162", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Dropout: a simple way to prevent neural networks from overfitting", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "The Journal of Machine Learning Research", |
|
"volume": "15", |
|
"issue": "1", |
|
"pages": "1929--1958", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Srivastava, N., Hinton, G., Krizhevsky, A., Sutskever, I., & Salakhutdinov, R. (2014). Dropout: a simple way to prevent neural networks from overfitting. The Journal of Machine Learning Research, 15(1), 1929- 1958.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Lexicon-Based Methods for Sentiment Analysis", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Taboada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Brooke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Tofiloski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Voll", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Stede", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Computational Linguistics", |
|
"volume": "37", |
|
"issue": "2", |
|
"pages": "267--307", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Taboada, M., Brooke, J., Tofiloski, M., Voll, K., & Stede, M. (2011). Lexicon-Based Methods for Sentiment Analysis. Computational Linguistics, 37(2), 267-307.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Attention is All you Need", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems 30 (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., \u2026 Polosukhin, I. (2017). Attention is All you Need. In Advances in Neural Information Processing Systems 30 (NIPS).", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Hierarchical Attention Networks for Document Classification", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Smola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1480--1489", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang, Z., Yang, D., Dyer, C., He, X., Smola, A., & Hovy, E. (2016). Hierarchical Attention Networks for Document Classification. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (pp. 1480-1489).", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Comparative Study of CNN and RNN for Natural Language Processing", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Comparative Study of CNN and RNN for Natural Language Processing. Retrieved from http://arxiv.org/abs/1702.01923", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Recent Trends in Deep Learning Based Natural Language Processing", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hazarika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Young, T., Hazarika, D., Poria, S., & Cambria, E. (2017). Recent Trends in Deep Learning Based Natural Language Processing. Retrieved from http://arxiv.org/abs/1708.02709", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "System architecture.", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "The architecture of Self-Attentive Hybrid GRU-Based Network.", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Character-level CNN.", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"content": "<table><tr><td/><td>), which are fed into the net-</td></tr><tr><td>work.</td><td/></tr><tr><td colspan=\"2\">\u2022 Clean text from accents, punctuations, and</td></tr><tr><td colspan=\"2\">non-Latin characters.</td></tr><tr><td colspan=\"2\">\u2022 Identify emoticons and emojis, and then re-</td></tr><tr><td colspan=\"2\">place them with meaningful text; e.g., replace</td></tr><tr><td colspan=\"2\">the happy face emoticon :) with <happy>.</td></tr><tr><td colspan=\"2\">\u2022 Recognize hashtags, URLs, and then briefly</td></tr><tr><td colspan=\"2\">describe them; e.g. replace #depressed by</td></tr><tr><td colspan=\"2\"><hashtag_start>depressed<hashtag_end>.</td></tr><tr><td colspan=\"2\">\u2022 Identify user reference mentions, and then re-</td></tr><tr><td colspan=\"2\">place them with a person entity; e.g. <person>.</td></tr><tr><td>Text</td><td>@name I am feeling under the weather af-</td></tr><tr><td/><td>ter I met with Carl :'( #sick \\\\u0001F600</td></tr><tr><td>Pro-</td><td><SOS> <reference> I am feeling under the</td></tr><tr><td>cessed</td><td>weather after I met with <person> <cry-</td></tr><tr><td/><td>ing> <hashtag_start> sick <hashtag_end></td></tr><tr><td/><td><grinning_face> <EOS></td></tr><tr><td colspan=\"2\">Table 1: Example of processed text.</td></tr></table>", |
|
"type_str": "table", |
|
"text": "", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Results of valence intensity regression (English).", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |