|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:04:53.519425Z" |
|
}, |
|
"title": "Multilogue-Net: A Context Aware RNN for Multi-modal Emotion Detection and Sentiment Analysis in Conversation", |
|
"authors": [ |
|
{ |
|
"first": "Aman", |
|
"middle": [], |
|
"last": "Shenoy", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Sardana", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Sentiment Analysis and Emotion Detection in conversation is key in several real-world applications, with an increase in modalities available aiding a better understanding of the underlying emotions. Multi-modal Emotion Detection and Sentiment Analysis can be particularly useful, as applications will be able to use specific subsets of available modalities, as per the available data. Current systems dealing with Multi-modal functionality fail to leverage and capture-the context of the conversation through all modalities, the dependency between the listener(s) and speaker emotional states, and the relevance and relationship between the available modalities. In this paper, we propose an end to end RNN architecture that attempts to take into account all the mentioned drawbacks. Our proposed model, at the time of writing, out-performs the state of the art on a benchmark dataset on a variety of accuracy and regression metrics. * * The following work was pursued when author was an intern at NVIDIA Graphics, Bengaluru", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Sentiment Analysis and Emotion Detection in conversation is key in several real-world applications, with an increase in modalities available aiding a better understanding of the underlying emotions. Multi-modal Emotion Detection and Sentiment Analysis can be particularly useful, as applications will be able to use specific subsets of available modalities, as per the available data. Current systems dealing with Multi-modal functionality fail to leverage and capture-the context of the conversation through all modalities, the dependency between the listener(s) and speaker emotional states, and the relevance and relationship between the available modalities. In this paper, we propose an end to end RNN architecture that attempts to take into account all the mentioned drawbacks. Our proposed model, at the time of writing, out-performs the state of the art on a benchmark dataset on a variety of accuracy and regression metrics. * * The following work was pursued when author was an intern at NVIDIA Graphics, Bengaluru", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Multi-modal Emotion Detection and Sentiment Analysis in conversation is gathering a lot of attention recently considering its potential use cases owing to the rapid growth of online social media platforms such as YouTube, Facebook, Instagram, Twitter etc. , Poria et al., 2016 , Zadeh et al., 2016b , especially knowing that information obtained from any combination of more than one of the available modalities (e.g. text, audio, video) can be used to produce meaningful results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 256, |
|
"end": 276, |
|
"text": ", Poria et al., 2016", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 277, |
|
"end": 298, |
|
"text": ", Zadeh et al., 2016b", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The current state of the art systems on multimodal emotion detection and sentiment analysis do not treat the modalities in accordance to the information they are capable of holding (e.g. textual information is significantly more likely to hold contextual information then audio or video features are), lack an adequate fusion mechanism, and fail to effectively capture the context of a conversation in a multi-modal setting. In addition to the lack of proper usage of the available modalities, models also fail to effectively capture the flow of a conversation, the separation between speaker and listener states, and the emotional effect a speaker's utterance has on the listener (s) in dyadic conversations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our proposed model Multilogue-Net, attempts to embed basic domain knowledge and takes insight from Poria et al. (2019) , assuming that the sentiment or emotion governing a particular utterance predominantly depends on 4 factors -interlocutor state, interlocutor intent, the preceding and future emotions, and the context of the conversation. Interlocutor intent amongst the mentioned is particularly difficult to model due to its dependency of prior knowledge about the speaker, but modelling the other 3 separately, yet in an interrelated manner was theorized to produce meaningful results if managed to be captured effectively. The key intention was to attempt to simulate the setting in which an utterance is said, and use the actual utterance at that point to be able to gain better insights regarding emotion and sentiment of that utterance. The model uses information from all modalities learning multiple state vectors (representing interlocutor state) for a given utterance, followed by a pairwise attention mechanism inspired by Ghosal et al. (2018) , attempting to better capture the relationship between all pairs of the available modalities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 118, |
|
"text": "Poria et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1038, |
|
"end": 1058, |
|
"text": "Ghosal et al. (2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The model uses two gated recurrent units (GRU) (Chung et al., 2014) for each modality for modelling interlocutor state and emotion. Along with these GRU's, the model also uses an interconnected context network, consisting of the same number of GRU's as the number of available modalities, to model a different learned context representation for each modality. The incoming utterance representations and the historical GRU outputs are used at every timestamp to be able to arrive at a prediction for that timestamp.", |
|
"cite_spans": [ |
|
{ |
|
"start": 47, |
|
"end": 67, |
|
"text": "(Chung et al., 2014)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The model produces m different representations at every timestamp (Where m is the number of modalities), where each representation is the emotional state at that timestamp as conveyed by each of the modalities. These m representations are used by the fusion mechanism to incorporate information from each of the m representations to be able to arrive at the final prediction for that timestamp. We understand that the usage of the pairwise attention mechanism, along with the Emotion GRU are what make the model flexible across tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The usage of only the text representation as input to the context GRU's has been observed to be key to the results, as the context of the conversation would be better captured by textual information then it would have with audio or video information. We believe that Multilogue-net performs better than the current state of the art (Ghosal et al., 2018) on multi-modal datasets because of better context representation leveraging all available modalities. 1 The remaining sections of the paper are arranged as follows: Section 2 -discusses related work; Section 3 -discusses the model in detail; Section 4 -provides experimental results, dataset details, and analysis; Section 5 contains our ablation studies and its implications; and finally Section 6 -speaks on potential future work, and concludes our paper.", |
|
"cite_spans": [ |
|
{ |
|
"start": 332, |
|
"end": 353, |
|
"text": "(Ghosal et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 456, |
|
"end": 457, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Multi-modal Emotion recognition and Sentiment Analysis has always attracted attention in multiple fields such as natural language processing, psychology, cognitive science, and so on (Picard, 2010) . Previous works have been done studying factors of variation that have a more direct correlation with emotion, such as Ekman et al. (1992) , who found correlation between emotion and facial cues, and a lot of studies extensively focus on emotions and their relationship with one another such as Plutchik's wheel of emotions, which defines eight primary emotion types, each of which has a multitude of emotions as sub-types.", |
|
"cite_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 197, |
|
"text": "(Picard, 2010)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 318, |
|
"end": 337, |
|
"text": "Ekman et al. (1992)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Early work done to leverage multi-modal information for emotion recognition includes works such as Datcu and Rothkrantz (2012) , who fused acoustic information with visual cues for emotion recognition and Eyben et al. (2010) , who used contextual information for emotion recognition in multi-modal settings. More recently, deep recurrent neural networks have been used to be able make the best of the learned representations of the modalities available to be able to give very effective and accurate emotion and sentiment predictions. successfully used RNN-based deep networks for multi-modal emotion recognition, which was followed by multiple other works Zadeh et al., 2018a; Zadeh et al., 2018c) giving results far better than what was seen before. Recent works also include works such as , who used memory networks for emotion recognition in dyadic conversations, where two distinct memory networks enabled interspeaker interaction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 126, |
|
"text": "Datcu and Rothkrantz (2012)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 224, |
|
"text": "Eyben et al. (2010)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 657, |
|
"end": 677, |
|
"text": "Zadeh et al., 2018a;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 678, |
|
"end": 698, |
|
"text": "Zadeh et al., 2018c)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Some works such as DialogueRNN (Majumder et al., 2018) , though focused on emotion recognition and sentiment analysis using a single modality (text), works very well in a multi-modal setting by just replacing the text representation with a concatenated vector of all the modality representations. DialogueRNN effectively leveraged the separation between the speakers by maintaining two independent gated recurrent units to keep track of the interlocutor states, also effectively capturing context in the conversation, yielding state-of-theart performance on uni-modal data. Even though DialogueRNN was able to give reasonably good results on multi-modal data, the lack of an adequate fusion mechanism and the lack of focus on a multi-modal representation held its multi-modal performance back.", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 54, |
|
"text": "(Majumder et al., 2018)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Apart from the kind of works shown before, where a methodology or a model was proposed, works such as Poria et al. (2019) spoke extensively about the research challenges and advancements in emotion detection in conversation and gave a comprehensive overview of the problem. Most recently Ghosal et al. (2018) introduced the idea of learning the relationship between pairs of all available modalities using pairwise attention, in a multimodal setting, where similar attributes learned by multiple modalities are emphasized and differences between the modality representations are diminished. Pairwise attention proved to be incredibly effective yielding state-of-the-art performance on multi-modal data with just simple representations for each modality.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 121, |
|
"text": "Poria et al. (2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 308, |
|
"text": "Ghosal et al. (2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "3 Proposed Methodology", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Let there be a P number of participants p 1 , p 2 , ..., p P in the conversation. The problem is defined such that for every utterance u 1 , u 2 , ..., u N uttered by any participant(s), a sentiment score is allotted along with a predicted emotion label (one of happy, sad, angry, surprise, disgust, and fear). Each utterance corresponds to a particular participant of the conversation, allowing this formulation of the problem to also capture the average sentiment of a participant in the conversation. Predictions over utterances also avoid problems such as classification during long moments of silence when predictions are made for a fixed time interval, and is also mostly common practice.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "For every utterance u t (p), where p is the party who uttered the utterance, there exist three independent representations , t t \u2208 R Dt , a t \u2208 R Da , and v t \u2208 R Dv , and are obtained using the feature extractors further explained in section 4.2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "This gives us our overall formulation of the problem, which is to be able to learn a function which would take as input three independent representations of a particular utterance, information regarding the previous emotional state of the participant, and a representation of the current context of the conversation -to be able to map to an output prediction of a sentiment score and emotion label.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Details regarding how these representations are updated and how the output is generated using these inputs are described in detail below.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Modelling was done under the underlying assumption that the sentiment or emotion of an utterance predominantly depends on four factors as mentioned before:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Details", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Interlocutor State \u2022 Interlocutor Intent", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Details", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Context of the conversation until that point", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Details", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Previous interlocutor states and emotions of a particular participant in the conversation", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Details", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The proposed model attempts to model three out of the mentioned four explicitly, and assume that interlocutor intent will be modelled implicitly during model training. Interlocutor state is modelled using a state GRU (will be referred to as sGRU ), A context GRU is used to keep track of the context of the conversation (cGRU ), and an emotion GRU (eGRU ) is used to keep track of the emotional state of that particular participant. Finally, a pairwise attention mechanism, which uses the emotion representation of all modalities at a particular timestamp is used to leverage the important modalities and relevant combination of the modalities for emotion or sentiment prediction at that timestamp. Every utterance has three independent feature representations (text, audio, and video features), t t \u2208 R Dt , a t \u2208 R Da , and v t \u2208 R Dv . Each of these feature representations are treated and operated on independently until the pairwise attention mechanism. The model consists of two GRU's (state GRU, and emotion GRU) for every modality and participant, and a context GRU for each modality common to all participants in the conversation (If p is the number of participants and m is the number of modalities, the model would have a total of 2mp + m GRU's). The inputs at the current timestamp and the previous state, context, and emotion representations are operated on to be able to arrive at the prediction at that timestamp. Figure 1 describes the updates at a particular timestamp and the role of each GRU is further explained below.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1429, |
|
"end": 1437, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Details", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The Context GRU (cGRU ) for each modality aims to capture the context of the conversation by jointly encoding the utterance representation of that modality (at timestamp t in the given diagram) (t t \u2208 R Dt , a t \u2208 R Da , or v t \u2208 R Dv ) and the previous timestamp speaker state GRU output of that modality. This accounts for inter-speaker and inter-utterance dependencies to produce an effective context rep- resentation. The current utterance t t , a t , or v t , changes the state of that speaker from (s t t , s a t ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context GRU (cGRU )", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "s v t ) to (s t t+1 , s a t+1 , s v t+1 ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context GRU (cGRU )", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "To capture this change in context we use GRU cell cGRU having output size", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context GRU (cGRU )", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "D c , using t t , a t , or v t and (s t t , s a t , s v t ) as: c t t+1 = cGRU (c t t , (t t \u2295 s t t )) (1) c a t+1 = cGRU (c a t , (a t \u2295 s a t )) (2) c v t+1 = cGRU (c v t , (v t \u2295 s v t ))", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Context GRU (cGRU )", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Where D c is the size of the context vectors c t t+1 , c a t+1 , and c v t+1 .D t , D a , and D v are the sizes of utterance representations of text, audio, and video respectively.\u2295 represents the concatenation operation, D s is the size of all the state vectors s t t+1 , s a t+1 , and s v t+1 ; and all GRU weight and biases shapes are such that they produce the expected shape of outputs taking the given shape of inputs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context GRU (cGRU )", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "The network keeps track of the participants involved in a conversation by employing a p * m number of (sGRU )'s, where p is the number participants in the conversation and m is the number of available modalities.The sGRU associated with a participant outputs fixed size vectors which serve as an encoding to represent the interlocutor state, and are directly used for both emotion and sentiment prediction, and updating the context vectors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "All the state vectors are initialized to null at the first timestamp. For a timestamp t, the state vector of participant p and modality m \u2208 {t, a, v} is updated using the input feature representation of that modality and simple attention over all the context vectors until that timestamp. The simple attention mechanism over all the context vectors is described by the following equations:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b1 = sof tmax(m T t W \u03b1 [c m 1 , c m 2 , ..., c m t ])", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "att t = \u03b1[c m 1 , c m 2 , ..., c m t ] T", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "m T t \u2208 {t T t , a T t , v T t }, W \u03b1 \u2208 R Dt,a,v\u00d7Dc , \u03b1 T \u2208 R (t\u22121)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": ", and att t \u2208 R Dc . In equation 4, we calculate attention scores over all previous context representations of all previous utterances, highlighting the relative importance of all the previous context vectors to m t . A softmax layer is applied to amplify this relative importance, and finally equation 5 the final output of attention over context att t is calculated by pooling the previous context vectors with \u03b1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "We then employ sGRU t,a,v to update s t,a,v GRU cells sGRU t t , sGRU a t , and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "sGRU v t , each of output size D s . s t t+1 = sGRU (s t t , (t t \u2295 att t t+1 )) (6) s a t+1 = sGRU (s a t , (a t \u2295 att a t+1 ))", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s v t+1 = sGRU (s v t , (v t \u2295 att v t+1 ))", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Where D s is the size of all the state vectors s t t+1 , s a t+1 , and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "s v t+1 .D t , D a , D v", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "are the sizes of utterance representations of text, audio, and video respectively.\u2295 represents concatenation operation, and all GRU weights shapes are such that they produce the expected shape of outputs taking the given shape of inputs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "The intended purpose of using this as the input to sGRU t,a,v is to model the dependency of the speaker state on the context of the conversation as understood by the utterances until that point, along with the utterance representation at that point. The output of the sGRU for modality m and timestamp t serves as an encoding of the speaker state as conveyed by modality m, at time t.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "State GRU (sGRU )", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "The emotion GRU serves as the decoder for the encoding produced by the state GRU. The emotion GRU uses the previous timestamp eGRU output, and the encoding provided by sGRU to produce an emotion or sentiment representation which is further used by the pairwise attention mechanism to be able to produce the relevant output for prediction. At timestamp (t + 1) the emotion vectors are updated as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotion GRU (eGRU )", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "e t t+1 = eGRU (e t t , s t t+1 )", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Emotion GRU (eGRU )", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "e a t+1 = eGRU (e a t , s a t+1 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotion GRU (eGRU )", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "e v t+1 = eGRU (e v t , s v t+1 )", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Emotion GRU (eGRU )", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "Where D e is the size of all the emotion vectors e t t+1 , e a t+1 , and e v t+1 .D t , D a , andD v are the sizes of utterance representations of text, audio, and video respectively.D e is the size of the state vectors s t t+1 , s a t+1 , and s v t+1 ; and all GRU weights shapes are such that they produce the expected shape of outputs taking the given shape of inputs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotion GRU (eGRU )", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "The emotion GRU acts as a decoder to the encoding produced by the associated state GRU, producing a vector which can be used for both sentiment and emotion prediction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotion GRU (eGRU )", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "The emotion GRU for each timestamp will produce an m number of vectors (where m is the number of modalities available). Pairwise attention is then used over these m vectors to produce the final prediction output. In particular pairwise attention is calculated over the following pairs in our case -(e v , e t ), (e t , e a ), and (e a , e v ). Pairwise attention for pair (e v , e t ) would be calculated as follows: Figure 3 : Pairwise attention mechanism used as the fusion mechanism followed by the final prediction layer", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 417, |
|
"end": 425, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pairwise Attention Mechanism", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "B 1 = e v .(e t ) T , B 2 = e t .(e v ) T", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Pairwise Attention Mechanism", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "N 1 = sof tmax(B 1 ), N 2 = sof tmax(B 2 ) (13) O 1 = N 1 .e t , O 2 = N 2 .e v", |
|
"eq_num": "(14)" |
|
} |
|
], |
|
"section": "Pairwise Attention Mechanism", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "A 1 = O 1 e v , A 2 = O 2 e t", |
|
"eq_num": "(15)" |
|
} |
|
], |
|
"section": "Pairwise Attention Mechanism", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "pairwise(e v , e t ) = A 1 \u2295 A 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Attention Mechanism", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "Where B 1 , B 2 \u2208 R De\u00d7De ; N 1 , N 2 \u2208 R De\u00d7De ; A 1 , A 2 \u2208 R De\u00d7De ; and pairwise(e v , e t ) \u2208 R De\u00d72De ; represents element-wise product; and \u2295 represents concatenation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Attention Mechanism", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "A complete analysis on the pairwise attention mechanism has been done by Ghosal et al. (2018) , where the role of each one of the intermediate variables has been described. These equations (12, 13, 14, 15, 16) calculate m C 2 pairwise fusion representations, which are further concatenated to make the final prediction as described below.", |
|
"cite_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 93, |
|
"text": "Ghosal et al. (2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Attention Mechanism", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "The prediction layer varies based on whether a sentiment or emotion prediction is expected. For sentiment prediction first all three pairs of pairwise attention i.e. pairwise(e v , e t ), pairwise(e a , e t ), and pairwise(e v , e a ) at that timestamp are concatenated along with the emotion GRU outputs at that timestamp (e t t , e a t , and e v t ) and the concatenated layer is passed through a fully connected layer followed by a sof tmax or tanh layer based on the nature of the expected prediction. For sentiment prediction between -1 and +1 at timestamp t the output layer would equate as follows: pw = pw(e v , e t ) \u2295 pw(e a , e t ) \u2295 pw(e v , e a ) (17)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Final Predictions", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L t = pw \u2295 e t t \u2295 e a t \u2295 e v t", |
|
"eq_num": "(18)" |
|
} |
|
], |
|
"section": "Final Predictions", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "pred", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Final Predictions", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "sentiment(t) = tanh(W L L t )", |
|
"eq_num": "(19)" |
|
} |
|
], |
|
"section": "Final Predictions", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "Where pairwise(e v , e t ) has been represented as pw(e v , e t ); and W L \u2208 R 9De\u00d71 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Final Predictions", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "For emotion prediction we use a fully connected layer along with a final sof tmax layer to calculate 6 emotion class probabilities from L t .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Final Predictions", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "l t = ReLU (W l L t + b l )", |
|
"eq_num": "(20)" |
|
} |
|
], |
|
"section": "Final Predictions", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P t = sof tmax(W smax l t + b smax )", |
|
"eq_num": "(21)" |
|
} |
|
], |
|
"section": "Final Predictions", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "pred emotion(t) = argmax(P t )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Final Predictions", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "Where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Final Predictions", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "W l \u2208 R D l \u00d79De ; b l =\u2208 R D l ; W smax \u2208 R c\u00d7D l ; b smax \u2208 R c and P t \u2208 R c", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Final Predictions", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "Fairly standard practices have been employed for the training of the model. Categorical cross-entropy has been used along with L2-regularization as the loss function during training for emotion prediction, to maximize likelihood over each of the classes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "3.2.6" |
|
}, |
|
{ |
|
"text": "Mean Square Error (MSE) along with L2 regularization has been employed as loss function during training for sentiment regression. The usage of a saturating output layer and a loss function that does not undo the saturation, leads to the model to stop training when it makes extreme predictions (close to -1 or +1) due to very small gradients. Using initialization strategies that start at smaller model weights, mini-batch gradient descent-based Adam (Kingma and Ba, 2014) optimizer, and using L2 regularization is used to avoid this failure mode.", |
|
"cite_spans": [ |
|
{ |
|
"start": 451, |
|
"end": 472, |
|
"text": "(Kingma and Ba, 2014)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "3.2.6" |
|
}, |
|
{ |
|
"text": "4 Experiments, Datasets, and Results", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training", |
|
"sec_num": "3.2.6" |
|
}, |
|
{ |
|
"text": "We evaluate our model using two benchmark datasets -CMU Multi-modal Opinion-level Sentiment Intensity (CMU-MOSI) (Zadeh et al., 2016a) and the recently published CMU Multi-modal Opinion Sentiment and Emotion Intensity (CMU-MOSEI) dataset (Zadeh et al., 2018b) . ", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 134, |
|
"text": "(Zadeh et al., 2016a)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 238, |
|
"end": 259, |
|
"text": "(Zadeh et al., 2018b)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In CMU-MOSEI dataset labels are in a continuous range of -3 to +3 and are accompanied by an emotion label being one of six emotions. However, in this work we also project the instances of CMU-MOSEI in a two-class classification setup with values \u2265 0 signifies positive sentiments and values < 0 signify negative sentiments. We have called this A2 accuracy (accuracy with 2 classes).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CMU-MOSEI", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "Along with this we have also shown results for continuous range prediction between -3 and +3, and emotion prediction with the 6 emotion labels for each utterance in CMU-MOSEI. We have used A2 as a metric to be consistent with the previous published works on CMU-MOSEI dataset (Ghosal et al., 2018; Zadeh et al., 2018b) . CMU-MOSEI has further been used for other comprehensive experiments due to its large sizer and easier feature extraction 4.2 Uni-modal Feature Extraction", |
|
"cite_spans": [ |
|
{ |
|
"start": 276, |
|
"end": 297, |
|
"text": "(Ghosal et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 318, |
|
"text": "Zadeh et al., 2018b)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CMU-MOSEI", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "We use the CMU-Multi-modal Data SDK (Zadeh et al., 2018b) for feature extraction. For MOSEI dataset, sentiment label-level features were provided where text features used were GloVe embeddings (Pennington et al., 2014) , visual features extracted by Facet (St\u00f6ckli et al., 2017 ) & acoustic features by OpenSMILE (Eyben et al., 2010) . Thereafter, we compute the average of sentiment label-level features in an utterance to obtain the utterance-level features. For each sentiment labellevel feature, the dimension of the feature vector is set to 300 (text), 35 (visual) & 384 (acoustic).", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 57, |
|
"text": "(Zadeh et al., 2018b)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 218, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 256, |
|
"end": 277, |
|
"text": "(St\u00f6ckli et al., 2017", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 313, |
|
"end": 333, |
|
"text": "(Eyben et al., 2010)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CMU-MOSEI", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "In contrast, for MOSI dataset we use utterance level features provided in . These utterance-level features represent the outputs of a convolutional neural network (Karpathy et al., 2014) , 3D convolutional neural network (Ji et al., 2010 ) & openSMILE (Eyben et al., 2010) Table 2 : Multilogue-Net performance on CMU-MOSEI Sentiment Labels compared to previous stateof-the-art models on regression and accuracy Metrics. All metrics apart from MAE represents higher values for better results, MAE represents lower values for better results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 163, |
|
"end": 186, |
|
"text": "(Karpathy et al., 2014)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 221, |
|
"end": 237, |
|
"text": "(Ji et al., 2010", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 272, |
|
"text": "(Eyben et al., 2010)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 273, |
|
"end": 280, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "CMU-MOSI", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "We evaluate our proposed approach on CMU-MOSI (test-set) on accuracy and F1 score, and CMU-MOSEI (dev-set) on accuracy, F1 score, mean absolute error (M AE), pearson score (r), and accuracy's on the emotion labels. Due to the lack of speaker information in CMU-MOSI we were not able to use the CMU-Multi-modal Data SDK for sentiment label extraction, to be able to evaluate our approach on CMU-MOSI on mean absolute error and Pearson score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Results have also been reported for usage of two of the three available modalities. Uni-modal performance has not been reported as the focus of the paper is the effective usage of multi-modal data. In a uni-modal setting the model would not be using the fusion mechanism and the output would be equivalent to having a few dense layers after the emotion GRU to directly output the final prediction. F1 scores have not been mentioned by most previous models being used for comparison, but have been reported for Multilogue-Net for additional comparison to any future models using CMU-MOSI dataset. Net on CMU-MOSI dataset, comparing to the current state of the art (Ghosal et al., 2018) , previous state-of-the-art , and Dia-logueRNN (Majumder et al., 2018 ) (Multi-modal performance of DialogueRNN has not been reported by Majumder et al. (2018) , and we have run these experiments additionally for a better comparative study, where concatenating the input representations has been used as a fusion mechanism). Our model consistently outperforms the previous state-of-the-art but performs better only on one of the subsets of the modalities when compared to the current state-of-the-art.", |
|
"cite_spans": [ |
|
{ |
|
"start": 663, |
|
"end": 684, |
|
"text": "(Ghosal et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 732, |
|
"end": 754, |
|
"text": "(Majumder et al., 2018", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 822, |
|
"end": 844, |
|
"text": "Majumder et al. (2018)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "In comparison to MMMU-BA our model also lacks in Multi-modal performance. We theorize that the model performance is lacking because of the low number of training examples (CMU-MOSI consists only of 93 conversations out of which 62 were used for training), in contrast to our model which has a high capacity (Relative to models being compared with). Since Multilogue-Net learns a lot of intermediate representations in order to make a prediction, it would need a larger dataset with more variability to be able to learn meaningful representations. The proposition that performance lacks due to a lack of training examples is backed by the results on CMU-MOSEI (demonstrated in a comparative setting in Table 2 and 3) where the model consistently outperforms the current stateof-the-art on most metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 701, |
|
"end": 708, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "On CMU-MOSEI, our model seems to perform very consistently on both sentiment and emotion labels. The model outperforms the current state of the art on all but one metric (both classification and accuracy) on sentiment labels in the tri-modal setting. Multilogue-Net also outperforms the current state of the art on the emotion labels by a considerable margin (This is also attributed to the fact that not a lot of models have presented results on these labels).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Similar observations are made in both datasets, where the tri-modal metrics show the best performance, and audio + video show the worst relative performance (suggesting the importance of text in a multi-modal setting). Textual information seems to be the guiding factor for multi-modal performance, with video and audio features simply acting as a push to the uni-modal performance on text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We theorize that the performance of Multilogue-Net is majorly attributed to its increased capacity as compared to previous models. Effective usage of this increased capacity, using representations inspired from a basic understanding of conversation, along with a larger dataset for training have been key in achieving the improved results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Until now, some architectural considerations, such as the use of eGRU and the fusion mechanism, have been briefly explained but not empirically justified. This section aims to get empirical evidence regarding the effectiveness of these modules. Since our model completely hinges around the usage of the context and state GRU's, our ablation studies and analysis have focused on the fusion mechanism and emotion GRU (eGRU ) only.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ablation Studies and Analysis", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The effectiveness of the fusion mechanism can be very easily examined by observing the results of the model on both tasks \u2212 Sentiment Regression and Emotion Recognition, with and without the fusion mechanism. Table 4 shows these results on CMU-MOSEI modality subsets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 209, |
|
"end": 216, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fusion Mechanism", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The bi-modal results in table 4 involve evaluating the pairwise attention module only once (Since there is only one pair available), directly followed by the prediction layer. The tri-modal case on the other hand involves evaluating the pairwise attention module thrice (Once for each pair). In general, the number of times this module will have to be evaluated for m modalities is m C 2 , which raises Table 4 : Multilogue-Net performance on CMU-MOSEI with and without the fusion mechanism -for 'without' fusion we have concatenated all the representations and directly passed them to the prediction layer. a fair concern regarding the trade-off between the additional computational cost and performance.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 403, |
|
"end": 410, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fusion Mechanism", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We empirically observe that the additional computational cost can be considered negligible in context of the increased performance, largely attributing to the non-parametric nature of the fusion mechanism and the relatively small number of additional parameters in the prediction layer (6D e for the sentiment regression; 36D e for emotion recognition).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fusion Mechanism", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The fusion mechanism seems to clearly be beneficial in all of the reported cases apart from video + audio, implying that the fusion mechanism is useful only in the cases the text representation is used. This further strengthens our claim that the text representation guides tri-modal performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fusion Mechanism", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Unlike as done with the fusion mechanism, the effectiveness of the eGRU cannot be examined by evaluating metrics with and without it. Removing the Emotion GRU would clearly be detrimental to the results, and would not convey the intention of having it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotion GRU (eGRU )", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The primary intention of having the eGRU can be considered to be maintaining consistency between tasks. To better understand what this means table 5 quantitatively demonstrates this effect. The model was trained separately for Emotion Detection and Sentiment Regression tasks. After both the models were trained satisfactorily, a particular sample from the test set (test sample 6) was inferred on. We then retrieved the intermediate text repre-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotion GRU (eGRU )", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Euclidean Distance Sample 6 with t = 4 s t 4 4.6 units c t 4 6.1 units e t 4 26.4 units sentations (e t 4 , c t 4 , and s t 4 ; superscript t indicating text modality) at a particular timestamp (t = 4) for both models on that sample. The Euclidean Distance between these two sets of representations (one for each task) was evaluated and have been shown in table 5, where we can clearly observe that the euclidean distance between the emotion representations is much larger as compared to the state and context representations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "This shows that for both tasks, interlocutor state and context representations are relatively similar to each other, whereas the emotion state representation is more varied and task dependant. This not only allows us to use the same cGRU and sGRU weights across tasks, but would also allow us to train for multiple tasks in parallel using a different eGRU for each task -giving us consistent and accurate predictions across multiple tasks. Analysis of such a network, and whether training for multiple tasks in parallel aids one another, has not been covered in this paper and is left to our future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, we have presented an RNN architecture for multi-modal sentiment analysis and emotion detection in conversation. In contrast to the current state-of-the-art models, our model focuses on effectively capturing the context of a conversation and treats each modality independently, taking into account the information a particular modality is capable of holding. Our model consistently performs well on benchmark datasets such as CMU-MOSI and CMU-MOSEI in any multi-modal setting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The model can be further extended to have better feature extractors, and increase both the number of modalities and the number of participants in the conversation. Due to the lack of availability of datasets consisting of these extensions with emotion or sentiment labels, we have left this to our future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "A basic model and training implementation of Multilogue-Net can be found at https://github.com/ amanshenoy/multilogue-net.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "t to s t,a,vt+1 on the basis of incoming utterance representations for each modality m T t \u2208 {t T t , a T t , v T t } and the context representations att t t , att a t , and att v t using", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Multimodal sentiment analysis with wordlevel fusion and reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Minghai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sen", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [ |
|
"Pu" |
|
], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tadas", |
|
"middle": [], |
|
"last": "Baltru\u0161aitis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 19th ACM International Conference on Multimodal Interaction", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "163--171", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minghai Chen, Sen Wang, Paul Pu Liang, Tadas Bal- tru\u0161aitis, Amir Zadeh, and Louis-Philippe Morency. 2017. Multimodal sentiment analysis with word- level fusion and reinforcement learning. In Proceed- ings of the 19th ACM International Conference on Multimodal Interaction, pages 163-171.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Empirical evaluation of gated recurrent neural networks on sequence modeling", |
|
"authors": [ |
|
{ |
|
"first": "Junyoung", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caglar", |
|
"middle": [], |
|
"last": "Gulcehre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, and Y. Bengio. 2014. Empirical evaluation of gated recurrent neural networks on sequence modeling.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Semantic audiovisual data fusion for automatic emotion recognition", |
|
"authors": [ |
|
{ |
|
"first": "Dragos", |
|
"middle": [], |
|
"last": "Datcu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e9on", |
|
"middle": [], |
|
"last": "Rothkrantz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1002/9781118910566.ch16" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dragos Datcu and L\u00e9on Rothkrantz. 2012. Semantic audiovisual data fusion for automatic emotion recog- nition.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Facial expressions of emotion: An old controversy and new findings: Discussion", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Ekman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edmund", |
|
"middle": [], |
|
"last": "Rolls", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Perrett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ellis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Royal Society of London Philosophical Transactions Series B", |
|
"volume": "335", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1098/rstb.1992.0008" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Ekman, Edmund Rolls, David Perrett, and H. Ellis. 1992. Facial expressions of emotion: An old con- troversy and new findings: Discussion. Royal Soci- ety of London Philosophical Transactions Series B, 335:69-.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "opensmile -the munich versatile and fast open-source audio feature extractor", |
|
"authors": [ |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Eyben", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "W\u00f6llmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bj\u00f6rn", |
|
"middle": [], |
|
"last": "Schuller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1459--1462", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1873951.1874246" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Florian Eyben, Martin W\u00f6llmer, and Bj\u00f6rn Schuller. 2010. opensmile -the munich versatile and fast open-source audio feature extractor. pages 1459- 1462.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Contextual inter-modal attention for multi-modal sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Deepanway", |
|
"middle": [], |
|
"last": "Ghosal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shad", |
|
"middle": [], |
|
"last": "Md", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dushyant", |
|
"middle": [], |
|
"last": "Akhtar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chauhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3454--3466", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deepanway Ghosal, Md Shad Akhtar, Dushyant Chauhan, Soujanya Poria, and Pushpak Bhat- tacharyya Asif Ekbal. 2018. Contextual inter-modal attention for multi-modal sentiment analysis. In Pro- ceedings of the 2018 Conference on Empirical Meth- ods in Natural Language Processing, pages 3454- 3466.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Conversational memory network for emotion recognition in dyadic dialogue videos", |
|
"authors": [ |
|
{ |
|
"first": "Devamanyu", |
|
"middle": [], |
|
"last": "Hazarika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Zimmermann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "2018", |
|
"issue": "", |
|
"pages": "2122--2132", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1193" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Devamanyu Hazarika, Soujanya Poria, Amir Zadeh, Erik Cambria, Louis-Philippe Morency, and Roger Zimmermann. 2018. Conversational memory net- work for emotion recognition in dyadic dialogue videos. volume 2018, pages 2122-2132.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "3d convolutional neural networks for human action recognition", |
|
"authors": [ |
|
{ |
|
"first": "Shuiwang", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [ |
|
"Yu" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "35", |
|
"issue": "", |
|
"pages": "495--502", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TPAMI.2012.59" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuiwang Ji, Wei Xu, Ming Yang, and Kai Yu. 2010. 3d convolutional neural networks for human action recognition. volume 35, pages 495-502.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Large-scale video classification with convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Andrej", |
|
"middle": [], |
|
"last": "Karpathy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Toderici", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanketh", |
|
"middle": [], |
|
"last": "Shetty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Leung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Sukthankar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Fei-Fei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1725--1732", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/CVPR.2014.223" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrej Karpathy, George Toderici, Sanketh Shetty, Thomas Leung, Rahul Sukthankar, and Li Fei-Fei. 2014. Large-scale video classification with convolu- tional neural networks. pages 1725-1732.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "Diederik", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Dialoguernn: An attentive rnn for emotion detection in conversations", |
|
"authors": [ |
|
{ |
|
"first": "Navonil", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devamanyu", |
|
"middle": [], |
|
"last": "Hazarika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Gelbukh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Navonil Majumder, Soujanya Poria, Devamanyu Haz- arika, Rada Mihalcea, Alexander Gelbukh, and Erik Cambria. 2018. Dialoguernn: An attentive rnn for emotion detection in conversations.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christoper", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christoper Manning. 2014. Glove: Global vectors for word rep- resentation. volume 14, pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Affective computing: From laughter to ieee", |
|
"authors": [ |
|
{ |
|
"first": "Rosalind", |
|
"middle": [], |
|
"last": "Picard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "IEEE Transactions on Affective Computing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "11--17", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/T-AFFC.2010.10" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rosalind Picard. 2010. Affective computing: From laughter to ieee. IEEE Transactions on Affective Computing, 1:11-17.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Context-dependent sentiment analysis in user-generated videos", |
|
"authors": [ |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devamanyu", |
|
"middle": [], |
|
"last": "Hazarika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navonil", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "873--883", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Soujanya Poria, Erik Cambria, Devamanyu Hazarika, Navonil Majumder, Amir Zadeh, and Louis-Philippe Morency. 2017. Context-dependent sentiment anal- ysis in user-generated videos. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics, pages 873-883.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Convolutional mkl based multimodal emotion recognition and sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iti", |
|
"middle": [], |
|
"last": "Chaturvedi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Hussain", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "IEEE 16th International Conference on Data Mining (ICDM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "439--448", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Soujanya Poria, Iti Chaturvedi, Erik Cambria, and Amir Hussain. 2016. Convolutional mkl based mul- timodal emotion recognition and sentiment analysis. In IEEE 16th International Conference on Data Min- ing (ICDM), pages 439-448.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Emotion recognition in conversation: Research challenges, datasets, and recent advances", |
|
"authors": [ |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navonil", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1905.02947" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Soujanya Poria, Navonil Majumder, Rada Mihalcea, and Eduard Hovy. 2019. Emotion recognition in conversation: Research challenges, datasets, and re- cent advances. arXiv:1905.02947.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Facial expression analysis with affdex and facet: A validation study", |
|
"authors": [ |
|
{ |
|
"first": "Sabrina", |
|
"middle": [], |
|
"last": "St\u00f6ckli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Schulte-Mecklenbeck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Borer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Samson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Behavior Research Methods", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3758/s13428-017-0996-1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sabrina St\u00f6ckli, Michael Schulte-Mecklenbeck, Stefan Borer, and Andrea Samson. 2017. Facial expression analysis with affdex and facet: A validation study. Behavior Research Methods, 50.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Tensor fusion network for multimodal sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minghai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1103--1114", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Zadeh, Minghai Chen, Soujanya Poria, Erik Cam- bria, and Louis-Philippe Morency. 2017. Tensor fusion network for multimodal sentiment analysis. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 1103-1114.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Navonil Mazumder, Soujanya Poria, Erik Cambria, and Louis-Philippe Morency. 2018a. Memory fusion network for multi-view sequential learning", |
|
"authors": [ |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Zadeh, Paul Liang, Navonil Mazumder, Soujanya Poria, Erik Cambria, and Louis-Philippe Morency. 2018a. Memory fusion network for multi-view se- quential learning.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Multimodal language analysis in the wild: Cmu-mosei dataset and interpretable dynamic fusion graph", |
|
"authors": [ |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2236--2246", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1208" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Zadeh, Paul Liang, Soujanya Poria, Erik Cam- bria, and Louis-Philippe Morency. 2018b. Multi- modal language analysis in the wild: Cmu-mosei dataset and interpretable dynamic fusion graph. pages 2236-2246.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Multi-attention recurrent network for human communication comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prateek", |
|
"middle": [], |
|
"last": "Vij", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Zadeh, Paul Liang, Soujanya Poria, Prateek Vij, Erik Cambria, and Louis-Philippe Morency. 2018c. Multi-attention recurrent network for human com- munication comprehension. Proceedings of the 2018 AAAI Conference on Artificial Intelligence, 2018.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Mosi: Multimodal corpus of sentiment intensity and subjectivity analysis in online opinion videos", |
|
"authors": [ |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rowan", |
|
"middle": [], |
|
"last": "Zellers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eli", |
|
"middle": [], |
|
"last": "Pincus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Zadeh, Rowan Zellers, Eli Pincus, and Louis- Philippe Morency. 2016a. Mosi: Multimodal cor- pus of sentiment intensity and subjectivity analysis in online opinion videos.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Multimodal sentiment intensity analysis in videos: Facial gestures and verbal messages", |
|
"authors": [ |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Zadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rowan", |
|
"middle": [], |
|
"last": "Zellers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eli", |
|
"middle": [], |
|
"last": "Pincus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Intelligent Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "82--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amir Zadeh, Rowan Zellers, Eli Pincus, and Louis- Philippe Morency. 2016b. Multimodal sentiment in- tensity analysis in videos: Facial gestures and verbal messages. Intelligent Systems, IEEE, pages 82-88.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "Description of all the state updates at timestamp t for a single participant p 1", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "State updates and final prediction output in a conversation between two participants p 1 and p 2 , where the updates of each participant at a timestamp is as given infigure 1", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "Multilogue-Net performance on CMU-MOSI in comparison with the current and previous state-ofthe-art on the dataset. A2 indicating accuracy with 2 classes, and F1 indicating F1 score .", |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"text": "72.8 69.1 76.6 62.0 89.9 66.3 66.3 60.4 66.9 53.7 85.5 Multilogue-Net 83.1 80.9 90.3 87.3 89.7 87.0 70.0 68.4 76.1 74.5 87.4 84.0", |
|
"content": "<table><tr><td>shows the performance of Multilogue-</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"text": "Multilogue-Net performance on MOSEI Emotion Labels compared with that of Graph-MFN on weighted accuracy and F1 score. MOSEI Emotion label results were presented by only one model, and comprehensive results have not been published for the same.", |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"text": "Euclidean Distance between the same representations for Sentiment Regression as compared to Emotion Detection. (Distances have been converted to units for convenience and easier comparison)", |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |