ACL-OCL / Base_JSON /prefixL /json /lincr /2020.lincr-1.5.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2020",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T12:10:44.936999Z"
},
"title": "Does History Matter? Using Narrative Context to Predict the Trajectory of Sentence Sentiment",
"authors": [
{
"first": "Liam",
"middle": [],
"last": "Watson",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Electrical Engineering and Computer Science Queen's University Belfast",
"location": {
"country": "Northern Ireland"
}
},
"email": "[email protected]"
},
{
"first": "Anna",
"middle": [],
"last": "Jurek-Loughrey",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Electrical Engineering and Computer Science Queen's University Belfast",
"location": {
"country": "Northern Ireland"
}
},
"email": ""
},
{
"first": "Barry",
"middle": [],
"last": "Devereux",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Electrical Engineering and Computer Science Queen's University Belfast",
"location": {
"country": "Northern Ireland"
}
},
"email": "[email protected]"
},
{
"first": "Brian",
"middle": [],
"last": "Murphy",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Electrical Engineering and Computer Science Queen's University Belfast",
"location": {
"country": "Northern Ireland"
}
},
"email": "[email protected]"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "While there is a rich literature on the tracking of sentiment and emotion in texts, modelling the emotional trajectory of longer narratives, such as literary texts, poses new challenges. Previous work in the area of sentiment analysis has focused on using information from within a sentence to predict a valence value for that sentence. We propose to explore the influence of previous sentences on the sentiment of a given sentence. In particular, we investigate whether information present in a history of previous sentences can be used to predict a valence value for the following sentence. We explored both linear and non-linear models applied with a range of different feature combinations. We also looked at different context history sizes to determine what range of previous sentence context was the most informative for our models. We establish a linear relationship between sentence context history and the valence value of the current sentence and demonstrate that sentences in closer proximity to the target sentence are more informative. We show that the inclusion of semantic word embeddings further enriches our model predictions.",
"pdf_parse": {
"paper_id": "2020",
"_pdf_hash": "",
"abstract": [
{
"text": "While there is a rich literature on the tracking of sentiment and emotion in texts, modelling the emotional trajectory of longer narratives, such as literary texts, poses new challenges. Previous work in the area of sentiment analysis has focused on using information from within a sentence to predict a valence value for that sentence. We propose to explore the influence of previous sentences on the sentiment of a given sentence. In particular, we investigate whether information present in a history of previous sentences can be used to predict a valence value for the following sentence. We explored both linear and non-linear models applied with a range of different feature combinations. We also looked at different context history sizes to determine what range of previous sentence context was the most informative for our models. We establish a linear relationship between sentence context history and the valence value of the current sentence and demonstrate that sentences in closer proximity to the target sentence are more informative. We show that the inclusion of semantic word embeddings further enriches our model predictions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "The experience of emotion plays a major role in the way people understand and engage with stories. In works of literary fiction, it is the affective trajectory of the story (the emotional journey that the reader is taken on) that propels the plot forward. People read stories because they are emotionally invested in the fates of the characters. In Natural Language Processing (NLP), there is a rich literature on using lexical, semantic and structural information to infer an emotional tag or value for sentences and short passages (Pang et al., 2008; Cambria, 2016; Mohammad, 2016; Liu, 2010) . However, modelling the emotional trajectory of narratives poses new challenges -a model must be able to account for both the long distance effects of previous discourse on the reader, and the contextually subtle ways in which the high-level information conveyed by a text can influence the reader's emotional state.",
"cite_spans": [
{
"start": 533,
"end": 552,
"text": "(Pang et al., 2008;",
"ref_id": "BIBREF13"
},
{
"start": 553,
"end": 567,
"text": "Cambria, 2016;",
"ref_id": "BIBREF0"
},
{
"start": 568,
"end": 583,
"text": "Mohammad, 2016;",
"ref_id": "BIBREF11"
},
{
"start": 584,
"end": 594,
"text": "Liu, 2010)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "The field of sentiment analysis (i.e. the task of \"automatically determining valence, emotions, and other affectual states from text\" (Mohammad, 2016) ) has begun to answer the question of how we can evaluate the emotional content of text, particularly with regard to commercial domains and social media. For example, work on sentiment analysis has focused on product or movie reviews (Mohammad, 2016; Liu, 2010; Socher et al., 2013; Tai et al., 2015) or on the analysis of twitter feeds (Liu, 2010; Zimbra et al., 2018) . Recent work using deep learning, and in particular recurrent neural networks (RNN) such as Long Short-Term Memory (LSTM) networks (Hochreiter and Schmidhuber, 1997) , and Transformer networks (Vaswani et al., 2017) has facilitated a significant increase in the performance of sentiment classification of texts and, given the ability of such networks to represent information over long sequences (Socher et al., 2013; Tai et al., 2015; Jiang et al., 2019) , they show particular promise for modelling high-level properties of natural discourse, such as literary texts. Most of the work on sentiment analysis makes use of large, readily available corpora of labelled data, which contain short samples of text (e.g. tweets or movie reviews) and associated explicit rating values (e.g. 5-star rating systems for movie and product reviews, or emoticons or hashtags used to summarise or emphasise the emotional content of a tweet (Liu, 2010; Mohammad, 2016; Socher et al., 2013; Tai et al., 2015) . However, no large dataset of literary text annotated for emotional content exists, and so in this study we start by developing a method which can learn to predict the emotional content at a particular point in a story given the preceding context and existing word-level resources (such as hand-tailored sentiment dictionaries, and corpus-derived word-embeddings). In particular, in order to determine how the sentiment of the text changes over time we must evaluate the sentiment of each new sentence as it arises within the context of the text that has come before. Our approach conceives the problem of modelling the emotional trajectory of narrative as consisting of two distinct questions:",
"cite_spans": [
{
"start": 134,
"end": 150,
"text": "(Mohammad, 2016)",
"ref_id": "BIBREF11"
},
{
"start": 385,
"end": 401,
"text": "(Mohammad, 2016;",
"ref_id": "BIBREF11"
},
{
"start": 402,
"end": 412,
"text": "Liu, 2010;",
"ref_id": "BIBREF7"
},
{
"start": 413,
"end": 433,
"text": "Socher et al., 2013;",
"ref_id": "BIBREF16"
},
{
"start": 434,
"end": 451,
"text": "Tai et al., 2015)",
"ref_id": "BIBREF17"
},
{
"start": 488,
"end": 499,
"text": "(Liu, 2010;",
"ref_id": "BIBREF7"
},
{
"start": 500,
"end": 520,
"text": "Zimbra et al., 2018)",
"ref_id": "BIBREF22"
},
{
"start": 653,
"end": 687,
"text": "(Hochreiter and Schmidhuber, 1997)",
"ref_id": "BIBREF2"
},
{
"start": 715,
"end": 737,
"text": "(Vaswani et al., 2017)",
"ref_id": "BIBREF18"
},
{
"start": 918,
"end": 939,
"text": "(Socher et al., 2013;",
"ref_id": "BIBREF16"
},
{
"start": 940,
"end": 957,
"text": "Tai et al., 2015;",
"ref_id": "BIBREF17"
},
{
"start": 958,
"end": 977,
"text": "Jiang et al., 2019)",
"ref_id": "BIBREF4"
},
{
"start": 1447,
"end": 1458,
"text": "(Liu, 2010;",
"ref_id": "BIBREF7"
},
{
"start": 1459,
"end": 1474,
"text": "Mohammad, 2016;",
"ref_id": "BIBREF11"
},
{
"start": 1475,
"end": 1495,
"text": "Socher et al., 2013;",
"ref_id": "BIBREF16"
},
{
"start": 1496,
"end": 1513,
"text": "Tai et al., 2015)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "1. Can the sentiment of a given sentence be determined by a previous history of sentences?",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "2. How much history should be included to be optimally informative?",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "We focus on modelling emotional valence at the sentence level. Explicitly, we model the valence of any given sentence in a sequence of sentences making up a narrative using the preceding context. We explore various sizes of sentence history context window and the effects of incorporating semantic information through the inclusion of pretrained word embeddings of various dimensions. To our knowledge, very little previous work has directly examined the influence of sentence history on the current sentence's valence as we do in this paper. Jockers (2015) takes a simple sum of word valences as representative of sentence valence and then employs a number of different smoothing functions to allow for the effects of history. takes a mean of all word valence values as representative of the valence value for different chunks of text (e.g. sentence, paragraph, and chapter-level chunks). In this work, we choose sentence-level sentiment as the best basic unit of measurement for emotional content. We model sentencelevel valence using a lexicon of sentiment , where the sentence-level valence is estimated as the mean of the sentence's word valences as found in the lexicon. While we are aware that a sentence valence rating based on a mean of the constituent word ratings taken from a lexicon is not state-of-the-art in sentiment analysis, the approach is validated by work in psychology Whissell, 2003; Bestgen, 1994) and offers a computationally inexpensive way to begin this exploratory work, in the absence of large labelled datasets.",
"cite_spans": [
{
"start": 1391,
"end": 1406,
"text": "Whissell, 2003;",
"ref_id": "BIBREF20"
},
{
"start": 1407,
"end": 1421,
"text": "Bestgen, 1994)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "Most work in the field of sentiment analysis has focused on product reviews, tweets, and emails, and has been focused on determining opinions towards certain targets (e.g. the new iPhone, or President Obama) (Mohammad, 2016; Liu, 2010; Mohammad et al., 2013) . Liu (2010) surveys the field of sentiment analysis with a focus on opinion mining -determining users opinions about goods or services by analyzing reviews. Mohammad et al. (2013) trained two SVM classifiers for two different sentiment tasks; the first of these was a message level sentiment prediction task and the second a term-level task. They achieved state-of-theart performance on both tasks using two lexicons generated from tweets (the first using tweets with sentiment hashtags to generate the lexicon, the second using tweets with emoticons). The use of such lexicons of affect, where each entry is annotated with a valence value, is commonplace in sentiment analysis. As well being automatically generated, as in the tweet lexicons (Mohammad, 2016) , lexicons may also be created by human annotation (usually gathered using online tools such as Mechanical Turk). There are several prominent sentiment lexicons that differ in their contents and methods of compilation. The NRC Emotion Lexicon, known as Emolex (Mohammad and Turney, 2010) , is a list of 14,182 English words and their associations with eight basic emotions (anger, fear, anticipation, trust, surprise, sadness, joy, and disgust) and two sentiments (negative and positive). The terms in EmoLex are carefully chosen to include some of the most frequent English nouns, verbs, adjectives, and adverbs. The Opinion Lexicon (Liu et al., 2005) consists of a list of 6800 positive and negative sentiment words. This lexicon only consists of words believed to be associated with either polarity and does not contain any neutral words. AFINN (Nielsen, 2011 ) is a list of English words rated for valence on a scale of -5 (negative) to +5(positive). The words were manually labeled by Finn\u00c5rup Nielsen (the author) in 2009-2011. There are two versions of this lexicon -AFINN-96 (1468 unique words and phrases) and AFINN-111 (the newest version with 2477 words and phrases). There are also lexicons available from studies on emotion in psychol-ogy, most notably the Revised Dictionary of Affect in Language (DAL) ). Whissell's DAL consists of 8742 English words which have been rated for their activation, evaluation and imagery. Each of these dimensions was rated along a three point scale: (1) Unpleasant, (2) In between, (3) Pleasant; (1) Passive, (2) In between, (3) Active;",
"cite_spans": [
{
"start": 208,
"end": 224,
"text": "(Mohammad, 2016;",
"ref_id": "BIBREF11"
},
{
"start": 225,
"end": 235,
"text": "Liu, 2010;",
"ref_id": "BIBREF7"
},
{
"start": 236,
"end": 258,
"text": "Mohammad et al., 2013)",
"ref_id": "BIBREF9"
},
{
"start": 261,
"end": 271,
"text": "Liu (2010)",
"ref_id": "BIBREF7"
},
{
"start": 417,
"end": 439,
"text": "Mohammad et al. (2013)",
"ref_id": "BIBREF9"
},
{
"start": 1003,
"end": 1019,
"text": "(Mohammad, 2016)",
"ref_id": "BIBREF11"
},
{
"start": 1280,
"end": 1307,
"text": "(Mohammad and Turney, 2010)",
"ref_id": "BIBREF8"
},
{
"start": 1654,
"end": 1672,
"text": "(Liu et al., 2005)",
"ref_id": "BIBREF6"
},
{
"start": 1868,
"end": 1882,
"text": "(Nielsen, 2011",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2."
},
{
"text": "(1) Hard to imaging, (2) In between, (3) Easy to imagine. It was comprised of frequently occurring words in a number of sources including an established corpus of 1,000,000 words (Francis and Kucera, 1979) , samples of writing generated by adolescents, and juvenile literature. When tested against a corpus of 350,000 English words gathered from many different sources, the DAL demonstrated a matching rate of 90%, suggesting that we can expect 9 out of every 10 words in any given English language text to have rating data in DAL (Whissell, 2009) . There is some work to demonstrate that there is a correlation between these lexical affective word ratings and subjective passage ratings (Bestgen, 1994; Whissell, 2003; Hsu et al., 2015) . However, these studies have relied on carefully chosen text inputs and have avoided complicating issues such as negation and irony, etc., which are commonplace in natural discourse.",
"cite_spans": [
{
"start": 179,
"end": 205,
"text": "(Francis and Kucera, 1979)",
"ref_id": "BIBREF1"
},
{
"start": 531,
"end": 547,
"text": "(Whissell, 2009)",
"ref_id": null
},
{
"start": 688,
"end": 703,
"text": "(Bestgen, 1994;",
"ref_id": null
},
{
"start": 704,
"end": 719,
"text": "Whissell, 2003;",
"ref_id": "BIBREF20"
},
{
"start": 720,
"end": 737,
"text": "Hsu et al., 2015)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2."
},
{
"text": "While there have been a few studies into emotion in literary texts (Bestgen, 1994; Mohammad, 2012; Whissell, 2003; Hsu et al., 2015) , these have largely focused on detecting discrete emotions (love, anger, fear etc.) and centred almost exclusively on classifying texts (or sections of text) into these discrete groups. Mohammad (2012) compared the polarity and emotional word density (defined as the number of emotion words per X-words) of novels and fairy tales in English. Using the NRC Emotion lexicon, Mohammad and Turney (2010) labelled words in novels and fairy tales with polarity and discreet emotions such as joy, sadness, and so on. They then used an emotion analyser tool to make certain inferences from the data; for example, counting the instances of words related to particular emotions, and comparing the emotional distributions of different words across different genres. However, this work focused on discreet emotions (joy, anger, etc.) using associated emotion words, which can enlighten us in terms of literary criticism or text classification, summarization, etc., but which are not sufficient to help us to effectively model the emotion of a text in a way comparable to how a person experiences it over time as a story unfolds, or how it is constructed in the brain. Reagan et al. (2016) investigated the emotional arcs of narrative fiction using a sliding window of sentences. What all of the aforementioned approaches have in common is that they consider the task of investigating valence and emotion in literature as a classification problem. The goal is to assign a given text or segment of text with a valence label which can then be used to derive some insight into the author's opinion regarding some product or issue, or to bring some quantitative insight to bear on studies in literary criticism. In this study, in contrast, we aim to model the changing experience of emotion during the course of reading a text. For this reason we frame the problem as a regression task, where we aim to predict a real number (measuring the degree of positive or negative emotion) for each sentence in the sequence of sentences making up the narrative.",
"cite_spans": [
{
"start": 67,
"end": 82,
"text": "(Bestgen, 1994;",
"ref_id": null
},
{
"start": 83,
"end": 98,
"text": "Mohammad, 2012;",
"ref_id": "BIBREF10"
},
{
"start": 99,
"end": 114,
"text": "Whissell, 2003;",
"ref_id": "BIBREF20"
},
{
"start": 115,
"end": 132,
"text": "Hsu et al., 2015)",
"ref_id": "BIBREF3"
},
{
"start": 1290,
"end": 1310,
"text": "Reagan et al. (2016)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2."
},
{
"text": "We aim to predict the valence of each sentence using information extracted from the history preceding that sentence. For this purpose, we train machine learning models that assign an emotion value to each sentence given information available in the preceding context. There are three key challenges that need to be addressed. First, identifying the features of the preceding context that are relevant to this sentence-by-sentence valence assignment task. Second, identifying what size of context history is most informative. And third, determining the type of machine learning model which performs best in predicting these sentence valences. As a first step, we investigate the degree to which the relationship between current sentence valence and sentence context history information can be modelled using linear methods. We apply two models to this task -linear regression and a linear support vector regressor. In the second part of the study, we investigate whether the application of non-linear methods to the same feature sets can better model the relationship between the sentence context history and the current sentence valence. We implement these non-linear models using a random forest regressor.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Methodology",
"sec_num": "3."
},
{
"text": "To train these models we explore a number of different feature combinations, to determine which kinds of information are most important for predicting sentence-level valence.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Methodology",
"sec_num": "3."
},
{
"text": "We explore the scope of context relevant to inferring sentence valence, investigating different sizes of sentence context history and a variety of feature sets of different dimensionalities. This first stage of our study therefore focuses on the exploration of eighteen different feature sets combined in the following ways: (1) a history of sentence valence scores only (over a number of history window sizes, spanning 10, 50 and 100 sentences), and (2) a history of sentence valence combined with semantic information (i.e. pre-trained semantic word embeddings in the form of 50, 100, 200 and 300 dimension GloVe word embeddings (Pennington et al., 2014) , and 300 dimension FastText word embeddings (trained on subword information) (Bojanowski et al., 2017) again over the same number of context history window sizes (10, 50 and 100 sentences). The 18 different feature set combinations investigated correspond to the rows of the results table below (Table 1) .",
"cite_spans": [
{
"start": 631,
"end": 656,
"text": "(Pennington et al., 2014)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [
{
"start": 953,
"end": 962,
"text": "(Table 1)",
"ref_id": null
}
],
"eq_spans": [],
"section": "Methodology",
"sec_num": "3."
},
{
"text": "Project Gutenberg (https://www.gutenberg.org/) provides access to thousands of public domain books (copyright expired) in plain-text format. We selected a corpus of 100 books (643,352 sentences) in total. We split these, by book, into 72 training texts (476,891 sentences, 74% of our corpus) and 28 test texts (166461 sentences, 26% of our corpus). The texts were split in this way to preserve the natural boundaries between books. These books were chosen as they represent pieces of literary fiction for children which would be well in common narrative techniques such as the use of irony, metaphors and imagery, and creative language.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Text Used",
"sec_num": "4.1."
},
{
"text": "These are important features of literary language which can prove challenging for sentiment analysis systems based on a simple literal interpretation of sentences.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Text Used",
"sec_num": "4.1."
},
{
"text": "In training our models, we used information about emotional content derived from Whissell (1989) 's Dictionary of Affect in Language (the Revised DAL) , discussed in Section 2. We generated sentence-by-sentence valence ratings for our target texts using the Whissell lexicon. The valence for each sentence is estimated by averaging over the valence values for the constituent words in the sentence. We then took these sentence-level valence ratings as the target values we hoped to predict.",
"cite_spans": [
{
"start": 81,
"end": 96,
"text": "Whissell (1989)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Lexicons and lexical embeddings",
"sec_num": "4.2."
},
{
"text": "We explored three different machine learning models: Linear Regression, Linear Support Vector Regression and Random Forest Regression. The results from these models (R 2 values for predictions on the test set) are displayed in Table 1 below. We also present two figures which each illustrate different patterns observable from the data. Figure 1 illustrates the difference in performance of each of the machine learning models tested, across each of the different context windows. Figure 2 shows the difference in performance on each feature set across all of the models tested. ",
"cite_spans": [],
"ref_spans": [
{
"start": 227,
"end": 235,
"text": "Table 1",
"ref_id": null
},
{
"start": 338,
"end": 346,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 482,
"end": 490,
"text": "Figure 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "5."
},
{
"text": "Our study has focused on two central questions -firstly, to establish whether linear or non-linear methods are best suited to modelling this type of relationship and, secondly, to determine what kind of features extracted from the historical content are the most effective in training the machine learning models. This second question of finding an optimal feature set can be sub-divided into two smaller problems: (a) assessing whether the inclusion of semantic information in the form of pre-trained word-embeddings adds more relevant information to the model training, and (b) determining if there is an optimal size of sentence history context that should be included to generate the best predictions for each model. From the results presented in Table 1 , we can see that there is a small linear relationship between sentence valence history and the valence of the current sentence. This relationship is statistically significant at p = 0.0001. While these results clearly show that we have captured a real linear effect between valence history and current sentence valence, the magnitude of explained variance is small. The application of non-linear methods does not improve performance. However, we can discern an important pattern in these results regarding the influence of sentence history context on our model predictions. We can see from Table 1 that across all models and feature sets, the best results are generated using a sentence history context of 10 sentences, which confirms our intuition that sentences closer to the sentence being predicted should bear more on its valence value than sentences further back in the history. This information is summarised in Figure 1 where we have taken an average across all feature sets for each model to illustrate this trend. Figure 2 depicts a summarisation of the relative contribution of each of the feature sets averaged across all of the models implemented and all of the context history sizes employed. We can see from this illustration that while all of the feature sets ultimately result in models which exhibit similar performance, in general, the inclusion of the semantic word embeddings does add slightly to the predictive power of the models.",
"cite_spans": [],
"ref_spans": [
{
"start": 751,
"end": 758,
"text": "Table 1",
"ref_id": null
},
{
"start": 1350,
"end": 1357,
"text": "Table 1",
"ref_id": null
},
{
"start": 1679,
"end": 1687,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 1784,
"end": 1792,
"text": "Figure 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Discussion",
"sec_num": "6."
},
{
"text": "In this paper we proposed to investigate whether information present in a history of previous sentences can be used to predict a valence value for the following sentence in context. We explored both linear and non-linear methods and a range of different feature combinations. We also looked at different context history sizes to determine what range of previous sentences was most informative for our models.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "7."
},
{
"text": "In conclusion, we have established a linear relationship between sentence context history and the valence value of the current sentence. We have demonstrated that the sentences in closer proximity to the target sentence are more informative. We have also shown that the inclusion of semantic word embeddings does seem to enrich our model predictions. We have therefore established a firm base for further explorations of valence in literature which should be characterised by further investigations of potentially optimally informative feature sets and the application of models capable of better capturing the complex, non-linearities inherent in literary text, such as LSTM artificial neural networks.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "7."
},
{
"text": "Bestgen, Y. (1994) . Can emotional valence in stories be determined from words? Cognition & Emotion, 8(1):21-36. Bojanowski, P., Grave, E., Joulin, A., and Mikolov, T. (2017) . Enriching word vectors with subword information. Transactions of the Association for Computational Linguistics, 5:135-146.",
"cite_spans": [
{
"start": 12,
"end": 18,
"text": "(1994)",
"ref_id": null
},
{
"start": 129,
"end": 174,
"text": "Grave, E., Joulin, A., and Mikolov, T. (2017)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Bibliographical References",
"sec_num": "8."
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Affective computing and sentiment analysis",
"authors": [
{
"first": "E",
"middle": [],
"last": "Cambria",
"suffix": ""
}
],
"year": 2016,
"venue": "IEEE Intelligent Systems",
"volume": "31",
"issue": "2",
"pages": "102--107",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Cambria, E. (2016). Affective computing and sentiment analysis. IEEE Intelligent Systems, 31(2):102-107.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Brown Corpus Manual: Manual of Information to accompany A Standard Corpus of Present-Day Edited American English, for use with Digital Computers",
"authors": [
{
"first": "W",
"middle": [
"N"
],
"last": "Francis",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Kucera",
"suffix": ""
}
],
"year": 1979,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Francis, W. N. and Kucera, H. (1979). Brown Corpus Manual: Manual of Information to accompany A Stan- dard Corpus of Present-Day Edited American English, for use with Digital Computers. Department of Linguis- tics, Brown University, Providence, USA.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Long shortterm memory",
"authors": [
{
"first": "S",
"middle": [],
"last": "Hochreiter",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Schmidhuber",
"suffix": ""
}
],
"year": 1997,
"venue": "Neural computation",
"volume": "9",
"issue": "8",
"pages": "1735--1780",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hochreiter, S. and Schmidhuber, J. (1997). Long short- term memory. Neural computation, 9(8):1735-1780.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "The emotion potential of words and passages in reading harry potter-an fmri study",
"authors": [
{
"first": "C.-T",
"middle": [],
"last": "Hsu",
"suffix": ""
},
{
"first": "A",
"middle": [
"M"
],
"last": "Jacobs",
"suffix": ""
},
{
"first": "F",
"middle": [
"M"
],
"last": "Citron",
"suffix": ""
},
{
"first": "Conrad",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2015,
"venue": "Brain and language",
"volume": "142",
"issue": "",
"pages": "96--114",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hsu, C.-T., Jacobs, A. M., Citron, F. M., and Conrad, M. (2015). The emotion potential of words and passages in reading harry potter-an fmri study. Brain and language, 142:96-114.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Transformer based memory network for sentiment analysis of web comments",
"authors": [
{
"first": "M",
"middle": [],
"last": "Jiang",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "X",
"middle": [],
"last": "Shi",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Zhang",
"suffix": ""
}
],
"year": 2019,
"venue": "IEEE Access",
"volume": "7",
"issue": "",
"pages": "179942--179953",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jiang, M., Wu, J., Shi, X., and Zhang, M. (2019). Trans- former based memory network for sentiment analysis of web comments. IEEE Access, 7:179942-179953.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Revealing sentiment and plot arcs with the syuzhet package. (blog)",
"authors": [
{
"first": "M",
"middle": [
"L M"
],
"last": "Jockers",
"suffix": ""
}
],
"year": 2015,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jockers, M. L. M. (2015). Revealing sentiment and plot arcs with the syuzhet package. (blog), february 2, 2015.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Opinion observer: analyzing and comparing opinions on the web",
"authors": [
{
"first": "B",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Hu",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Cheng",
"suffix": ""
}
],
"year": 2005,
"venue": "WWW '05: Proceedings of the 14th international conference on World Wide Web",
"volume": "",
"issue": "",
"pages": "342--351",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Liu, B., Hu, M., and Cheng, J. (2005). Opinion observer: analyzing and comparing opinions on the web. In WWW '05: Proceedings of the 14th international conference on World Wide Web, pages 342-351, New York, NY, USA. ACM.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Sentiment analysis and subjectivity. Handbook of natural language processing",
"authors": [
{
"first": "B",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2010,
"venue": "",
"volume": "2",
"issue": "",
"pages": "627--666",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Liu, B. (2010). Sentiment analysis and subjectivity. Hand- book of natural language processing, 2(2010):627-666.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Emotions evoked by common words and phrases: Using mechanical turk to create an emotion lexicon",
"authors": [
{
"first": "S",
"middle": [],
"last": "Mohammad",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Turney",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the NAACL HLT 2010 Workshop on Computational Approaches to Analysis and Generation of Emotion in Text",
"volume": "",
"issue": "",
"pages": "26--34",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mohammad, S. and Turney, P. (2010). Emotions evoked by common words and phrases: Using mechanical turk to create an emotion lexicon. In Proceedings of the NAACL HLT 2010 Workshop on Computational Approaches to Analysis and Generation of Emotion in Text, pages 26- 34, Los Angeles, CA, June. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Nrccanada: Building the state-of-the-art in sentiment analysis of tweets",
"authors": [
{
"first": "S",
"middle": [],
"last": "Mohammad",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Kiritchenko",
"suffix": ""
},
{
"first": "X",
"middle": [],
"last": "Zhu",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the Seventh International Workshop on Semantic Evaluation",
"volume": "2",
"issue": "",
"pages": "321--327",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mohammad, S., Kiritchenko, S., and Zhu, X. (2013). Nrc- canada: Building the state-of-the-art in sentiment analy- sis of tweets. In Second Joint Conference on Lexical and Computational Semantics (* SEM), Volume 2: Proceed- ings of the Seventh International Workshop on Semantic Evaluation (SemEval 2013), pages 321-327.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "From once upon a time to happily ever after: Tracking emotions in mail and books",
"authors": [
{
"first": "S",
"middle": [
"M"
],
"last": "Mohammad",
"suffix": ""
}
],
"year": 2012,
"venue": "Decision Support Systems",
"volume": "53",
"issue": "4",
"pages": "730--741",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mohammad, S. M. (2012). From once upon a time to hap- pily ever after: Tracking emotions in mail and books. Decision Support Systems, 53(4):730-741.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Sentiment analysis: Detecting valence, emotions, and other affectual states from text",
"authors": [
{
"first": "S",
"middle": [
"M"
],
"last": "Mohammad",
"suffix": ""
}
],
"year": 2016,
"venue": "Emotion measurement",
"volume": "",
"issue": "",
"pages": "201--237",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mohammad, S. M. (2016). Sentiment analysis: Detecting valence, emotions, and other affectual states from text. In Emotion measurement, pages 201-237. Elsevier.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "A new anew: Evaluation of a word list for sentiment analysis in microblogs",
"authors": [
{
"first": "F",
"middle": [
"A"
],
"last": "Nielsen",
"suffix": ""
}
],
"year": 2011,
"venue": "CEUR Workshop Proceedings",
"volume": "718",
"issue": "",
"pages": "93--98",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nielsen, F. A. (2011). A new anew: Evaluation of a word list for sentiment analysis in microblogs. In Matthew Rowe, et al., editors, MSM, volume 718 of CEUR Work- shop Proceedings, pages 93-98. CEUR-WS.org.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Opinion mining and sentiment analysis",
"authors": [
{
"first": "B",
"middle": [],
"last": "Pang",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Lee",
"suffix": ""
}
],
"year": 2008,
"venue": "Foundations and Trends R in Information Retrieval",
"volume": "2",
"issue": "1-2",
"pages": "1--135",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pang, B., Lee, L., et al. (2008). Opinion mining and senti- ment analysis. Foundations and Trends R in Information Retrieval, 2(1-2):1-135.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Glove: Global vectors for word representation",
"authors": [
{
"first": "J",
"middle": [],
"last": "Pennington",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Socher",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Manning",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
"volume": "",
"issue": "",
"pages": "1532--1543",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pennington, J., Socher, R., and Manning, C. (2014). Glove: Global vectors for word representation. In Pro- ceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532- 1543, Doha, Qatar, October. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "The emotional arcs of stories are dominated by six basic shapes",
"authors": [
{
"first": "A",
"middle": [
"J"
],
"last": "Reagan",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Mitchell",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Kiley",
"suffix": ""
},
{
"first": "C",
"middle": [
"M"
],
"last": "Danforth",
"suffix": ""
},
{
"first": "P",
"middle": [
"S"
],
"last": "Dodds",
"suffix": ""
}
],
"year": 2016,
"venue": "EPJ Data Science",
"volume": "5",
"issue": "1",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Reagan, A. J., Mitchell, L., Kiley, D., Danforth, C. M., and Dodds, P. S. (2016). The emotional arcs of stories are dominated by six basic shapes. EPJ Data Science, 5(1):31.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Recursive deep models for semantic compositionality over a sentiment treebank",
"authors": [
{
"first": "R",
"middle": [],
"last": "Socher",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Perelygin",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Chuang",
"suffix": ""
},
{
"first": "C",
"middle": [
"D"
],
"last": "Manning",
"suffix": ""
},
{
"first": "A",
"middle": [
"Y"
],
"last": "Ng",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Potts",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 2013 conference on empirical methods in natural language processing",
"volume": "",
"issue": "",
"pages": "1631--1642",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Socher, R., Perelygin, A., Wu, J., Chuang, J., Manning, C. D., Ng, A. Y., and Potts, C. (2013). Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 conference on em- pirical methods in natural language processing, pages 1631-1642.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Improved semantic representations from tree-structured long short-term memory networks",
"authors": [
{
"first": "K",
"middle": [
"S"
],
"last": "Tai",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Socher",
"suffix": ""
},
{
"first": "C",
"middle": [
"D"
],
"last": "Manning",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing",
"volume": "1",
"issue": "",
"pages": "1556--1566",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tai, K. S., Socher, R., and Manning, C. D. (2015). Improved semantic representations from tree-structured long short-term memory networks. In Proceedings of the 53rd Annual Meeting of the Association for Computa- tional Linguistics and the 7th International Joint Confer- ence on Natural Language Processing (Volume 1: Long Papers), pages 1556-1566.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Attention is all you need",
"authors": [
{
"first": "A",
"middle": [],
"last": "Vaswani",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "Shazeer",
"suffix": ""
},
{
"first": "N",
"middle": [],
"last": "Parmar",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Uszkoreit",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Jones",
"suffix": ""
},
{
"first": "A",
"middle": [
"N"
],
"last": "Gomez",
"suffix": ""
},
{
"first": "L",
"middle": [
"U"
],
"last": "Kaiser",
"suffix": ""
},
{
"first": "I",
"middle": [],
"last": "Polosukhin",
"suffix": ""
}
],
"year": 2017,
"venue": "Advances in Neural Information Processing Systems 30",
"volume": "",
"issue": "",
"pages": "5998--6008",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L. u., and Polosukhin, I. (2017). Attention is all you need. In I. Guyon, et al., editors, Advances in Neural Information Processing Sys- tems 30, pages 5998-6008. Curran Associates, Inc.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "The dictionary of affect in language",
"authors": [
{
"first": "C",
"middle": [
"M"
],
"last": "Whissell",
"suffix": ""
}
],
"year": 1989,
"venue": "The measurement of emotions",
"volume": "",
"issue": "",
"pages": "113--131",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Whissell, C. M. (1989). The dictionary of affect in lan- guage. In The measurement of emotions, pages 113-131. Elsevier.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Readers' opinions of romantic poetry are consistent with emotional measures based on the dictionary of affect in language",
"authors": [
{
"first": "C",
"middle": [],
"last": "Whissell",
"suffix": ""
}
],
"year": 2003,
"venue": "Perceptual and motor skills",
"volume": "96",
"issue": "3",
"pages": "990--992",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Whissell, C. (2003). Readers' opinions of romantic po- etry are consistent with emotional measures based on the dictionary of affect in language. Perceptual and motor skills, 96(3):990-992.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Whissell's dictionary of affect in language: Technical manual and user's guide",
"authors": [
{
"first": "C",
"middle": [],
"last": "Whissell",
"suffix": ""
}
],
"year": 2010,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Whissell, C. (2010). Whissell's dictionary of affect in lan- guage: Technical manual and user's guide. Laurentian University.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "The state-of-the-art in twitter sentiment analysis: A review and benchmark evaluation",
"authors": [
{
"first": "D",
"middle": [],
"last": "Zimbra",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Abbasi",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Zeng",
"suffix": ""
},
{
"first": "Chen",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "",
"suffix": ""
}
],
"year": 2018,
"venue": "ACM Transactions on Management Information Systems (TMIS)",
"volume": "9",
"issue": "2",
"pages": "1--29",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zimbra, D., Abbasi, A., Zeng, D., and Chen, H. (2018). The state-of-the-art in twitter sentiment analysis: A re- view and benchmark evaluation. ACM Transactions on Management Information Systems (TMIS), 9(2):1-29.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Whissell's dictionary of affect in language: Technical manual and user's guide",
"authors": [
{
"first": "C",
"middle": [],
"last": "Whissell",
"suffix": ""
}
],
"year": 2010,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Whissell, C. (2010). Whissell's dictionary of affect in lan- guage: Technical manual and user's guide. Laurentian University.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"uris": null,
"text": "Performance (R 2 values on the test set) of all machine learning models across all context sizes, averaged over all feature sets.",
"num": null,
"type_str": "figure"
},
"FIGREF1": {
"uris": null,
"text": "Contribution of each different feature combination to model performance; averaged over all model sets.",
"num": null,
"type_str": "figure"
}
}
}
}