|
{ |
|
"paper_id": "S19-2006", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:47:43.345411Z" |
|
}, |
|
"title": "ANA at SemEval-2019 Task 3: Contextual Emotion detection in Conversations through hierarchical LSTMs and BERT", |
|
"authors": [ |
|
{ |
|
"first": "Chenyang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Alberta", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Amine", |
|
"middle": [], |
|
"last": "Trabelsi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Alberta", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Osmar", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Za\u00efane", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Alberta", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Elmo", |
|
"middle": [], |
|
"last": "Glove", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Utterance Encoder Utterance Encoder Utterance Encoder", |
|
"institution": "", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Deepmoji", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Utterance Encoder Utterance Encoder Utterance Encoder", |
|
"institution": "", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes the system submitted by ANA Team for the SemEval-2019 Task 3: EmoContext. We propose a novel Hierarchical LSTMs for Contextual Emotion Detection (HRLCE) model. It classifies the emotion of an utterance given its conversational context. The results show that, in this task, our HRCLE outperforms the most recent state-ofthe-art text classification framework: BERT. We combine the results generated by BERT and HRCLE to achieve an overall score of 0.7709 which ranked 5th on the final leader board of the competition among 165 Teams.", |
|
"pdf_parse": { |
|
"paper_id": "S19-2006", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes the system submitted by ANA Team for the SemEval-2019 Task 3: EmoContext. We propose a novel Hierarchical LSTMs for Contextual Emotion Detection (HRLCE) model. It classifies the emotion of an utterance given its conversational context. The results show that, in this task, our HRCLE outperforms the most recent state-ofthe-art text classification framework: BERT. We combine the results generated by BERT and HRCLE to achieve an overall score of 0.7709 which ranked 5th on the final leader board of the competition among 165 Teams.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Social media has been a fertile environment for the expression of opinion and emotions via text. The manifestation of this expression differs from traditional or conventional opinion communication in text (e.g., essays). It is usually short (e.g. Twitter), containing new forms of constructs, including emojis, hashtags or slang words, etc. This constitutes a new challenge for the NLP community. Most of the studies in the literature focused on the detection of sentiments (i.e. positive, negative or neutral) (Mohammad and Turney, 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 511, |
|
"end": 538, |
|
"text": "(Mohammad and Turney, 2013)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recently, emotion classification from social media text started receiving more attention (Yaddolahi et al., 2017; Mohammad et al., 2018) . Emotions have been extensively studied in psychology (Ekman, 1992; Plutchik, 2001) . Their automatic detection may reveal important information in social online environments, like online customer service. In such cases, a user is conversing with an automatic chatbot. Empowering the chatbot with the ability to detect the user's emotion is a step forward towards the construction of an emotionally intelligence agent. Giving the detected emotion, an emotionally intelligent agent would generate an empathetic response. Although its potential convenience, detecting emotion in textual conversation has seen limited attention so far. One of the main challenges is that one users utterance may be insufficient to recognize the emotion (Huang et al., 2018) . The need to consider the context of the conversion is essential in this case, even for human, specifically given the lack of voice modulation and facial expressions. The usage of figurative language, like sarcasm, and the class size's imbalance adds up to this problematic (Chatterjee et al., 2019a ). In this paper, we describe our model, which was proposed for the SemEval 2019-Task 3 competition: Contextual Emotion Detection in Text (Emo-Context). The competition consists in classifying the emotion of an utterance given its conversational context. More formally, given a textual user utterance along with 2 turns of context in a conversation, the task is to classify the emotion of user utterance as Happy, Sad, Angry or Others (Chatterjee et al., 2019b) . The conversations are extracted from Twitter.", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 113, |
|
"text": "(Yaddolahi et al., 2017;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 114, |
|
"end": 136, |
|
"text": "Mohammad et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 192, |
|
"end": 205, |
|
"text": "(Ekman, 1992;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 221, |
|
"text": "Plutchik, 2001)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 871, |
|
"end": 891, |
|
"text": "(Huang et al., 2018)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1167, |
|
"end": 1192, |
|
"text": "(Chatterjee et al., 2019a", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1628, |
|
"end": 1654, |
|
"text": "(Chatterjee et al., 2019b)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We propose an ensemble approach composed of two deep learning models, the Hierarchical LSTMs for Contextual Emotion Detection (HRLCE) model and the BERT model (Devlin et al., 2018) . The BERT is a pre-trained language model that has shown great success in many NLP classification tasks. Our main contribution consists in devising the HRLCE model. Figure 1 illustrates the main components of the HRLCE model. We examine a transfer learning approach with several pre-trained models in order to encode each user utterance semantically and emotionally at the word-level. The proposed model uses Hierarchical LSTMs (Sordoni et al., 2015) followed by a multi-head self attention mechanism (Vaswani et al., 2017 ) for a contextual encoding at the utterances level.", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 180, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 610, |
|
"end": 632, |
|
"text": "(Sordoni et al., 2015)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 683, |
|
"end": 704, |
|
"text": "(Vaswani et al., 2017", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 347, |
|
"end": 355, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The model evaluation on the competition's test set resulted in a 0.7709 harmonic mean of the macro-F1 scores across the categories Happy, Angry, and Sad. This result ranked 5th in the final leader board of the competition among 142 teams with a score above the organizers' baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We use different kinds of embeddings that have been deemed effective in the literature in capturing not only the syntactic or semantic information of the words, but also their emotional content. We breifly describe them in this section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embeddings for semantics and emotion", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "GloVe, (Pennington et al., 2014) is a widely used pre-trained vector representation that captures fine-grained syntactic and semantic regularities. It has shown great success in word similarity tasks and Named Entity Recognition benchmarks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 32, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embeddings for semantics and emotion", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "ELMo, or Embeddings from Language Models, (Peters et al., 2018) are deep contextualized word representations. These representations enclose a polysemy encoding, i.e., they capture the variation in the meaning of a word depending on its context. The representations are learned functions of the input, pre-trained with deep bi-directional LSTM model. It has been shown to work well in practice on multiple language understanding tasks like question answering, entailment and sentiment analysis. In this work, our objective is to detect emotion accurately giving the context. Hence, employing such contextual embedding can be crucial.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embeddings for semantics and emotion", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "DeepMoji (Felbo et al., 2017 ) is a pre-trained model containing rich representations of emotional content. It has been pre-trained on the task of predicting the emoji contained in the text using Bi-directional LSTM layers combined with an attention layer. A distant supervision approach was deployed to collect a massive (1.2 billion Tweets) dataset with diverse set of noisy emoji labels on which DeepMoji is pre-trained. This led to stateof-the art performance when fine-tuning Deep-Moji on a range of target tasks related to sentiment, emotion and sarcasm.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 28, |
|
"text": "(Felbo et al., 2017", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Embeddings for semantics and emotion", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "One of the building component of our proposed model (see Figure 1) is the Hierarchical or Context recurrent encoder-decoder (HRED) (Sordoni et al., 2015) . HRED architecture is used for encoding dialogue context in the task of multi-turn dialogue generation task (Serban et al., 2016) . It has been proven to be effective in capturing the context information of dialogue exchanges. It contains two types of recurrent neural net (RNN) units: encoder RNN which maps each utterance to an utterance vector; context RNN which further processes the utterance vectors. HRED is expected to produce a better representation of the context in dialogues because the context RNN allows the model to represent the information exchanges between the two speakers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 131, |
|
"end": 153, |
|
"text": "(Sordoni et al., 2015)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 263, |
|
"end": 284, |
|
"text": "(Serban et al., 2016)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 66, |
|
"text": "Figure 1)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hierarchical RNN for context", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "BERT, the Bidirectional Encoder Representations for Transformers, (Devlin et al., 2018 ) is a pretrained model producing context representations that can be very convenient and effective. BERT representations can be fine-tuned to many downstream NLP tasks by adding just one additional output layer for the target task, eliminating the need for engineering a specific architecture for a task. Using this setting, it has advanced the stateof-the-art performances in 11 NLP tasks. Using BERT in this work has slightly improved the final result, when we combine it with our HRLCE in an ensemble setting.", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 86, |
|
"text": "(Devlin et al., 2018", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Importance Weighting (Sugiyama and Kawanabe, 2012) is used when label distributions between the training and test sets are generally different, which is the case of the competition datasets (Table 2) . It corresponds to weighting the samples according to their importance when calculating the loss.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 50, |
|
"text": "(Sugiyama and Kawanabe, 2012)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 199, |
|
"text": "(Table 2)", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Importance Weighting", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "A supervised deep learning model can be regarded as a parameterized function f (x; \u03b8). The backpropagation learning algorithm through a differentiable loss is a method of empirical risk minimization (ERM). Denote (", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Weighting", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "x tr i , y tr i ), i \u2208 [1 . . . n tr ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Weighting", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "are pairs of training samples, testing samples are", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Weighting", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "(x te , y te ), i \u2208 [1 . . . n te ].", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Weighting", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The ratio P (x) te /P (x) tr is referred as the importance of a sample x. When the label distribution of training data and testing data are different: P (x te ) = P (x tr ), the training of the model f \u03b8 is then called under covariate shift. In such situation, the parameter\u03b8 should be estimated through importance-weighted ERM:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Weighting", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "arg min \u03b8 1 n tr ntr i=1 P (x te ) P (x tr ) loss(y tr i , f (x tr i ; \u03b8) .", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Importance Weighting", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "3 Models", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Weighting", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Denote the input x = [u 1 , u 2 , u 3 ],", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Weighting", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where u i is the ith penultimate utterance in the dialogue. y is the emotion expressed in u 3 while giving u 1 and u 2 as context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Weighting", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "To justify the effectiveness of the modules in HRLCE, we propose two baseline models: SA-LSTM (SL) and SA-LSTM-DeepMoji (SLD). The SL model is part of the SLD model, while the later one composes the utterance encoder of our HRLCE. Therefore, we illustrate the models consecutively in Sections 3.1, 3.2, and 3.3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Importance Weighting", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Let x be the concatenation of u 1 ,u 2 , and u 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM (SL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Hereby, x = [x 1 , x 2 , \u2022 \u2022 \u2022 , x n ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM (SL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": ", where x i is the ith word in the combined sequence. Denote the pre-trained GloVe model as G. As GloVe model can be directly used by looking up the word x i , we can use G(x i ) to represent its output. On the contrary, ELMo embedding is not just dependent on the word x i , but on all the words of the input sequence. When taking as input the entire sequence x, n vectors can be extracted from the pre-trained ElMo model. Denote the vectors as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM (SL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "E = [E 1 , E 2 , \u2022 \u2022 \u2022 , E n ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM (SL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": ". E i contains both contextual and semantic information of word x i . We use a two-layer bidirectional LSTM as the encoder of the sequence x. For simplicity, we denote it as LST M e . In order to better represent the information of x i , we use the concatenation of G(x i ) and E i as the feature embedding of x i . Therefore, we have the following recurrent progress:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM (SL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h e t = LST M e ([G(x t ); E t ], h e t\u22121 ).", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "SA-LSTM (SL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "h e t is the hidden state of encoder LSTM at time step t, and h e 0 = 0. the n hidden states of encoder given the input x. Self-attention mechanism has been proven to be effective in helping RNN dealing with dependency problems (Lin et al., 2017) . We use the multi-head version of the self-attention (Vaswani et al., 2017) and set the number of channels for each head as 1. Denote the self-attention module as SA, it takes as input all the hidden states of the LSTM and summarizes them into a single vector. This process is represented as h sa x = SA(h e x ). To predict the model, we append a fully connected (FC) layer to project h sa", |
|
"cite_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 246, |
|
"text": "(Lin et al., 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 301, |
|
"end": 323, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM (SL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "x on to the space of emotions. Denote the FC layer as output. Let o SL", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM (SL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "x = output(h sa x ), then the estimated label of x is the arg max i (o SL", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM (SL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "x ), where i is ith value in the vector o SL", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM (SL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "x .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM (SL)", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "SLD is the combination of SA and DeepMoji. An SLD model without the output layer is in fact the utterance encoder of the proposed HRLCE, which is illustrated in the right side of Figure 1 . Denote the DeepMoji model as D, when taking as input x, the output is represented as h d x = D(x). We concatenate h d", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 187, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "SA-LSTM-DeepMoji (SLD)", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "x and h sa x as the feature representation of sequence of x. Same as SL, an FC layer is added in order to predict the label:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM-DeepMoji (SLD)", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "o SLD x = output([h sa x ; h d x ]).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SA-LSTM-DeepMoji (SLD)", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Unlike SL and SLD, the input of HRLCE is not the concatenation of u 1 , u 2 , and u 3 . Following the annotation in Section 3.1 and 3.2, an utterance u i is firstly encoded as h sa u i and h d u i . We use another two layer bidirectional LSTM as the context RNN, denoted as LST M c . Its hidden states are iterated through:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "HRLCE", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "h c t = LST M c ([h sa ut ; h d ut ], h c t\u22121 ),", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "HRLCE", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where h c 0 = 0. The three hidden states", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "HRLCE", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "h c = [h c 1 , h c 2 , h c 3 ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "HRLCE", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": ", are fed as the input to a self-attention layer. The resulting vector SA(h c ) is also projected to the label space by an FC layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "HRLCE", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "BERT (Section 2.3) can take as input either a single sentence or a pair of sentences. A \"sentence\" here corresponds to any arbitrary span of contiguous words. In this work, in order to fine-tune BERT, we concatenate utterances u 1 and u 2 to constitute the first sentence of the pair. u 3 is the second sentence of the pair. The reason behind such setting is that we assume that the target emotion y is directly related to u 3 , while u 1 and u 2 are providing additional context information. This forces the model to consider u 3 differently.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "From the training data we notice that emojis are playing an important role in expressing emotions. We first use ekphrasis package (Baziotis et al., 2017) to clean up the utterances. ekphrasis corrects misspellings, handles textual emotions (e.g. ':)))'), and normalizes tokens (hashtags, numbers, user mentions etc.). In order to keep the semantic meanings of the emojis, we use the emojis package 1 to first convert them into their textual aliases and then replace the \":\" and \" \" with spaces.", |
|
"cite_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 153, |
|
"text": "(Baziotis et al., 2017)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment 4.1 Data preprocessing", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We use PyTorch 1.0 for the deep learning framework, and our code in Python 3.6 can be accessed in GitHub 2 . For fair comparisons, we use the same parameter settings for the common modules that are shared by the SL, SLD, and HRLCE. The dimension of encoder LSTM is set to 1500 per direction; the dimension of context LSTM is set to 800 per direction. We use Adam optimizer with initial learning rate as 5e-4 and a decay ratio of 0.2 after each epoch. The parameters of DeepMoji are set to trainable. We use BERT-Large pre-trained model which contains 24 layers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Environment and hyper-parameters", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "happy angry sad others size Train 14.07% 18.26% 18.11% 49.56% 30160 Dev 5.15% 5.44% 4.54% 84.86% 2755 Test 4.28% 5.57% 4.45% 85.70% 5509 According to the description in (CodaLab, 2019), the label distribution for dev and test sets are roughly 4% for each of the emotions. However, from the dev set ( Table 2) we know that the proportions of each of the emotion categories are better described as %5 each, thereby we use %5 as the empirical estimation of distribution P (x te ). We did not use the exact proportion of dev set as the estimation to prevent the overfitting towards dev set. The sample distribution of the train set is used as P (x tr ). We use Cross Entropy loss for all the aforementioned models, and the loss of the training samples are weighted according to Eq. 1.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 300, |
|
"end": 308, |
|
"text": "Table 2)", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Environment and hyper-parameters", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We run 9-fold cross validation on the training set. Each iteration, 1 fold is used to prevent the models from overfitting while the remaining folds are used for training. Therefore, every model is trained 9 times to ensure stability. The inferences over dev and test sets are performed on each iteration. We use the majority voting strategy to merge the results from the 9 iterations. The results are shown in Table 1 . It shows that the proposed HRLCE model performs the best. The performance of SLD and SL are very close to each other, on the dev set, SLD performs better than SL but they have almost the same overall scores on the test set. The Macro-F1 scores of each emotion category are very different from each other: the classification accuracy for emotion Sad is the highest in most of the cases, while the emotion Happy is the least accurately classified by all the models. We also noticed that the performance on the dev set is generally slightly better than that on the test set.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 410, |
|
"end": 417, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and analysis", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Considering the competitive results generated by BERT, we combined BERT and our proposed model in an ensemble and obtained 0.7709 on the final test leaderboard. From a confusion matrix of our final submission, we notice that there are barely miss-classifications among the three categories (Angry, Sad, and Happy). For example, the emotion Sad is rarely miss-classified as \"Happy\" or \"Angry\". Most of the errors correspond to classifying the emotional utterances in the Others category. We think, as future improvement, the models need to first focus on the binary classification \"Others\" versus \"Not-Others\", then the \"Not-Others\" are classified in their respective emotion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://pypi.org/project/emoji/ 2 https://github.com/chenyangh/SemEval2019Task3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Datastories at semeval-2017 task 4: Deep lstm with attention for message-level and topic-based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Christos", |
|
"middle": [], |
|
"last": "Baziotis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikos", |
|
"middle": [], |
|
"last": "Pelekis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christos", |
|
"middle": [], |
|
"last": "Doulkeridis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "747--754", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christos Baziotis, Nikos Pelekis, and Christos Doulk- eridis. 2017. Datastories at semeval-2017 task 4: Deep lstm with attention for message-level and topic-based sentiment analysis. In Proceedings of the 11th International Workshop on Semantic Eval- uation (SemEval-2017), pages 747-754, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Understanding emotions in text using deep learning and big data", |
|
"authors": [ |
|
{ |
|
"first": "Ankush", |
|
"middle": [], |
|
"last": "Chatterjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Umang", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manoj", |
|
"middle": [], |
|
"last": "Kumar Chinnakotla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radhakrishnan", |
|
"middle": [], |
|
"last": "Srikanth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Michel Galley, and Puneet Agrawal", |
|
"volume": "93", |
|
"issue": "", |
|
"pages": "309--317", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ankush Chatterjee, Umang Gupta, Manoj Kumar Chinnakotla, Radhakrishnan Srikanth, Michel Gal- ley, and Puneet Agrawal. 2019a. Understanding emotions in text using deep learning and big data. Computers in Human Behavior, 93:309-317.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Semeval-2019 task 3: Emocontext: Contextual emotion detection in text", |
|
"authors": [ |
|
{ |
|
"first": "Ankush", |
|
"middle": [], |
|
"last": "Chatterjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kedhar", |
|
"middle": [], |
|
"last": "Nath Narahari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meghana", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Puneet", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of The 13th International Workshop on Semantic Evaluation (SemEval-2019)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ankush Chatterjee, Kedhar Nath Narahari, Meghana Joshi, and Puneet Agrawal. 2019b. Semeval-2019 task 3: Emocontext: Contextual emotion detection in text. In Proceedings of The 13th International Workshop on Semantic Evaluation (SemEval-2019), Minneapolis, Minnesota.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Semeval19 task 3: Emocontext", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Codalab", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "CodaLab. 2019. Semeval19 task 3: Emocon- text. https://competitions.codalab.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "org/competitions/19790#learn_the_ details-data-set-format", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "org/competitions/19790#learn_the_ details-data-set-format.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "An argument for basic emotions", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Ekman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Cognition & emotion", |
|
"volume": "6", |
|
"issue": "3-4", |
|
"pages": "169--200", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Ekman. 1992. An argument for basic emotions. Cognition & emotion, 6(3-4):169-200.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Using millions of emoji occurrences to learn any-domain representations for detecting sentiment, emotion and sarcasm", |
|
"authors": [ |
|
{ |
|
"first": "Bjarke", |
|
"middle": [], |
|
"last": "Felbo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Mislove", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iyad", |
|
"middle": [], |
|
"last": "Rahwan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sune", |
|
"middle": [], |
|
"last": "Lehmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bjarke Felbo, Alan Mislove, Anders S\u00f8gaard, Iyad Rahwan, and Sune Lehmann. 2017. Using millions of emoji occurrences to learn any-domain represen- tations for detecting sentiment, emotion and sar- casm. In Conference on Empirical Methods in Nat- ural Language Processing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Automatic dialogue generation with expressed emotions", |
|
"authors": [ |
|
{ |
|
"first": "Chenyang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Osmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amine", |
|
"middle": [], |
|
"last": "Zaiane", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nouha", |
|
"middle": [], |
|
"last": "Trabelsi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dziri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "16th Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chenyang Huang, Osmar R. Zaiane, Amine Trabelsi, and Nouha Dziri. 2018. Automatic dialogue genera- tion with expressed emotions. In 16th Annual Con- ference of the North American Chapter of the As- sociation for Computational Linguistics (NAACL), New Orleans, USA.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "A structured self-attentive sentence embedding", |
|
"authors": [ |
|
{ |
|
"first": "Zhouhan", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minwei", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cicero", |
|
"middle": [], |
|
"last": "Nogueira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mo", |
|
"middle": [], |
|
"last": "Santos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1703.03130" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhouhan Lin, Minwei Feng, Cicero Nogueira dos San- tos, Mo Yu, Bing Xiang, Bowen Zhou, and Yoshua Bengio. 2017. A structured self-attentive sentence embedding. arXiv preprint arXiv:1703.03130.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Semeval-2018 task 1: Affect in tweets", |
|
"authors": [ |
|
{ |
|
"first": "Saif", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felipe", |
|
"middle": [], |
|
"last": "Bravo-Marquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Salameh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of The 12th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--17", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/S18-1001" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif Mohammad, Felipe Bravo-Marquez, Moham- mad Salameh, and Svetlana Kiritchenko. 2018. Semeval-2018 task 1: Affect in tweets. In Proceed- ings of The 12th International Workshop on Seman- tic Evaluation, pages 1-17. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Crowdsourcing a word-emotion association lexicon", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Computational Intelligence", |
|
"volume": "29", |
|
"issue": "3", |
|
"pages": "436--465", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif M Mohammad and Peter D Turney. 2013. Crowd- sourcing a word-emotion association lexicon. Com- putational Intelligence, 29(3):436-465.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christo- pher D. Manning. 2014. Glove: Global vectors for word representation. In Empirical Methods in Nat- ural Language Processing (EMNLP), pages 1532- 1543.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Matthew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1802.05365" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. arXiv preprint arXiv:1802.05365.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "The nature of emotions: Human emotions have deep evolutionary roots, a fact that may explain their complexity and provide tools for clinical practice", |
|
"authors": [ |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Plutchik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "American Scientist", |
|
"volume": "89", |
|
"issue": "4", |
|
"pages": "344--350", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert Plutchik. 2001. The nature of emotions: Hu- man emotions have deep evolutionary roots, a fact that may explain their complexity and provide tools for clinical practice. American Scientist, 89(4):344- 350.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Building end-to-end dialogue systems using generative hierarchical neural network models", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Iulian V Serban", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Sordoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joelle", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pineau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Thirtieth AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iulian V Serban, Alessandro Sordoni, Yoshua Bengio, Aaron Courville, and Joelle Pineau. 2016. Building end-to-end dialogue systems using generative hier- archical neural network models. In Thirtieth AAAI Conference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "A hierarchical recurrent encoderdecoder for generative context-aware query suggestion", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Sordoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hossein", |
|
"middle": [], |
|
"last": "Vahabi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christina", |
|
"middle": [], |
|
"last": "Lioma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [ |
|
"Grue" |
|
], |
|
"last": "Simonsen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian-Yun", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 24th ACM International on Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "553--562", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandro Sordoni, Yoshua Bengio, Hossein Vahabi, Christina Lioma, Jakob Grue Simonsen, and Jian- Yun Nie. 2015. A hierarchical recurrent encoder- decoder for generative context-aware query sugges- tion. In Proceedings of the 24th ACM International on Conference on Information and Knowledge Man- agement, pages 553-562. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Machine learning in non-stationary environments: Introduction to covariate shift adaptation", |
|
"authors": [ |
|
{ |
|
"first": "Masashi", |
|
"middle": [], |
|
"last": "Sugiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Motoaki", |
|
"middle": [], |
|
"last": "Kawanabe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Masashi Sugiyama and Motoaki Kawanabe. 2012. Ma- chine learning in non-stationary environments: In- troduction to covariate shift adaptation. MIT press.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Current state of text sentiment analysis from opinion to emotion mining", |
|
"authors": [ |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Yaddolahi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ameneh", |
|
"middle": [ |
|
"Gholipour" |
|
], |
|
"last": "Shahraki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Osmar", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Zaiane", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ACM Computing Surveys", |
|
"volume": "50", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ali Yaddolahi, Ameneh Gholipour Shahraki, and Os- mar R. Zaiane. 2017. Current state of text sentiment analysis from opinion to emotion mining. ACM Computing Surveys, 50(2):25:1-25:33.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "An illustration of the HRLCE model", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "Macro-F1 scores and its harmonic means of the four models", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
}, |
|
"TABREF2": { |
|
"text": "Label distribution of train, dev, and test set", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |