|
{ |
|
"paper_id": "D08-1015", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:30:39.966755Z" |
|
}, |
|
"title": "Ranking Reader Emotions Using Pairwise Loss Minimization and Emotional Distribution Regression", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Hsin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Taiwan University No", |
|
"location": { |
|
"addrLine": "1 Roosevelt Rd. Sec. 4", |
|
"settlement": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yih", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Taiwan University No", |
|
"location": { |
|
"addrLine": "1 Roosevelt Rd. Sec. 4", |
|
"settlement": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hsin-Hsi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National Taiwan University No", |
|
"location": { |
|
"addrLine": "1 Roosevelt Rd. Sec. 4", |
|
"settlement": "Taipei", |
|
"country": "Taiwan" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper presents two approaches to ranking reader emotions of documents. Past studies assign a document to a single emotion category, so their methods cannot be applied directly to the emotion ranking problem. Furthermore, whereas previous research analyzes emotions from the writer's perspective, this work examines readers' emotional states. The first approach proposed in this paper minimizes pairwise ranking errors. In the second approach, regression is used to model emotional distributions. Experiment results show that the regression method is more effective at identifying the most popular emotion, but the pairwise loss minimization method produces ranked lists of emotions that have better correlations with the correct lists.", |
|
"pdf_parse": { |
|
"paper_id": "D08-1015", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper presents two approaches to ranking reader emotions of documents. Past studies assign a document to a single emotion category, so their methods cannot be applied directly to the emotion ranking problem. Furthermore, whereas previous research analyzes emotions from the writer's perspective, this work examines readers' emotional states. The first approach proposed in this paper minimizes pairwise ranking errors. In the second approach, regression is used to model emotional distributions. Experiment results show that the regression method is more effective at identifying the most popular emotion, but the pairwise loss minimization method produces ranked lists of emotions that have better correlations with the correct lists.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Emotion analysis is an increasingly popular research topic due to the emergence of large-scale emotion data on the web. Previous work primarily studies emotional contents of texts from the writer's perspective, where it is typically assumed that a writer expresses only a single emotion in a document. Unfortunately, this premise does not hold when analyzing a document from the reader's perspective, because readers rarely agree unanimously on the emotion that a document instills. Figure 1 . Emotional responses of 626 people after reading a Yahoo! News article about an Iranian refugee mother and her two children who finally reunited with their family in the March of 2007 after been stranded in a Moscow airport for 10 months due to false passports.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 483, |
|
"end": 491, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "readers' responses are distributed among different emotion categories. In fact, none of the emotions in Figure 1 has a majority (i.e., more than 50%) of the votes. Intuitively, it is better to provide a ranking of emotions according to their popularity rather than associating a single reader emotion with a document. As a result, current writer-emotion analysis techniques for classifying a document into a single emotion category are not suitable for analyzing reader emotions. New methods capable of ranking emotions are required. Reader-emotion analysis has potential applications that differ from those of writer-emotion analysis. For example, by integrating emotion ranking into information retrieval, users will be able to retrieve documents that contain relevant contents and at the same time produce desired feelings. In addition, reader-emotion analysis can assist writers in foreseeing how their work will influence readers emotionally.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 112, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we present two approaches to ranking reader emotions. The first approach is inspired by the success of the pairwise loss minimization framework used in information retrieval to rank documents. Along a similar line, we devise a novel scheme to minimize the number of incorrectly-ordered emotion pairs in a document. In the second approach, regression is used to model reader-emotion distributions directly. Experiment results show that the regression method is more effective at identifying the most popular emotion, but the pairwise loss minimization method produces ordered lists of emotions that have better correlations with the correct lists.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of this paper is organized as follows. Section 2 describes related work. In Section 3, details about the two proposed approaches are provided. Section 4 introduces the corpus and Section 5 presents how features are extracted from the corpus. Section 6 shows the experiment procedures and results. Section 7 concludes the paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Only a few studies in the past deal with the reader aspect of emotion analysis. For example, Lin et al. (2007; 2008) classify documents into readeremotion categories. Most previous work focuses on the writer's perspective. Pang et al. (2002) design an algorithm to determine whether a document's author expresses a positive or negative sentiment. They discover that using Support Vector Machines (SVM) with word unigram features results in the best performance. Since then, more work has been done to find features better than unigrams. In (Hu et al., 2005) , word sentiment information is exploited to achieve better classification accuracy.", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 110, |
|
"text": "Lin et al. (2007;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 111, |
|
"end": 116, |
|
"text": "2008)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 223, |
|
"end": 241, |
|
"text": "Pang et al. (2002)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 540, |
|
"end": 557, |
|
"text": "(Hu et al., 2005)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Experiments have been done to extract emotional information from texts at granularities finer than documents. Wiebe (2000) investigates the subjectivity of words, whereas Aman and Szpakowicz (2007) manually label phrases with emotional categories. In 2007, the SemEval-2007 workshop organized a task on the unsupervised annotation of news headlines with emotions (Strapparava and Mihalcea, 2007) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 110, |
|
"end": 122, |
|
"text": "Wiebe (2000)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 395, |
|
"text": "(Strapparava and Mihalcea, 2007)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As for the task of ranking, many machinelearning algorithms have been proposed in information retrieval. These techniques generate ranking functions which predict the relevance of a document. One class of algorithms minimizes the errors resulting from ordering document pairs incorrectly. Examples include (Joachims, 2002) , (Freund et al., 2003) and (Qin et al., 2007) . In particular, the training phase of the Joachims' Ranking SVM (Joachims, 2002) is formulated as the following SVM optimization problem:", |
|
"cite_spans": [ |
|
{ |
|
"start": 306, |
|
"end": 322, |
|
"text": "(Joachims, 2002)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 346, |
|
"text": "(Freund et al., 2003)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 351, |
|
"end": 369, |
|
"text": "(Qin et al., 2007)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 451, |
|
"text": "(Joachims, 2002)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "min \u2211 + k j i C k j i , , T 2 1 , , \u03be \u03be w w w, subject to: 0 : 1 )) , ( ) , ( ( : | ) , ( ), , ( , , , , T , , \u2265 \u2200 \u2200 \u2200 \u2212 \u2265 \u03a6 \u2212 \u03a6 > \u2208 \u2200 k j i k j i j k i k j k i k j k i k k j i d q d q s s V d q d q \u03be \u03be w (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "where V is the training corpus, \u03a6(q k , d i ) is the feature vector of document d i with respect to query q k , s k,i is the relevance score of d i with respect to q k , w is a weight vector, C is the SVM cost parameter, and \u03be i,j,k are slack variables. The set of constraints at (1) means that document pairwise orders should be preserved. Unfortunately, the above scheme for exploiting pairwise order information cannot be applied directly to the emotion ranking task, because the task requires us to rank emotions within a document rather than provide a ranking of documents. In particular, the definitions of \u03a6", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "(q k ,d i ), \u03a6(q k ,d j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": ", s k,i and s k,j do not apply to emotion ranking. In the next section, we will show how the pairwise loss minimization concept is adapted for emotion ranking.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this section, we provide the formal description of the reader-emotion ranking problem. Then we describe the pairwise loss minimization (PLM) approach and the emotional distribution regression (EDR) approach to ranking emotions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking Reader Emotions", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The reader emotion ranking problem is defined as follows. Let D = {d 1 , d 2 , \u2026, d N } be the document space, and E = {e 1 , e 2 , \u2026, e M } be the emotion space. Let f i : E \u2192 \u211c be the emotional probability function of d i \u2208D. That is, f i (e j ) outputs the fraction of readers who experience emotion e j after reading document d i . Our goal is to find a function r : D \u2192 E M such that r(d i ) = (e \u03c0(1) , e \u03c0(2) , \u2026, e \u03c0(M) ) where \u03c0 is Input: Set of emotion ordered pairs P 1. G \u2190 a graph with emotions as vertices and no edge 2. while (P \u2260 \u2205) 3. remove (e j ,e k ) with the highest confidence from P 4. if adding edge (e j ,e k ) to G produces a loop 5. then add (e k ,e j ) to G 6. else add (e j ,e k ) to G 7. return topological sort of G a permutation on {1, 2, \u2026, M}, and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Specification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "f i (e \u03c0(1) ) \u2265 f i (e \u03c0(2) ) \u2265 \u2026 \u2265 f i (e \u03c0(M) ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Specification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As explained in Section 2, the information retrieval framework for exploiting pairwise order information cannot be applied directly to the emotion ranking problem. Hence, we introduce a novel formulation of the emotion ranking problem into an SVM optimization problem with constraints based on pairwise loss minimization. Algorithm 1. Merge Pairwise Orders.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Loss Minimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We now describe how we rank the emotions of a previously unseen document using the M(M -1)/2 pairwise ranking functions g jk created during the training phase. First, all of the pairwise ranking functions are applied to the unseen document, which generates the relative orders of every pair of emotions. These pairwise orders need to be combined together to produce a ranked list of all the emotions. Algorithm 1 does exactly this.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Loss Minimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Whereas Ranking SVM generates only a single ranking function, our method creates a pairwise ranking function g jk : D \u2192 \u211c for each pair of emotions e j and e k , aiming at satisfying the maximum number of the inequalities:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Loss Minimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In Algorithm 1, the confidence of an emotion ordered pair at Line 3 is the probability value returned by a LIBSVM classifier for predicting the order. LIBSVM's method for generating this probability is described in (Wu et al., 2003) . Lines 4 and 5 resolve the problem of conflicting emotion ordered pairs forming a loop in the ordering of emotions. The ordered list of emotions returned by Algorithm 1 at Line 7 is the final output of the PLM method.", |
|
"cite_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 232, |
|
"text": "(Wu et al., 2003)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Loss Minimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2200d i \u2208D | f i (e j ) > f i (e k ) : g jk (d i ) > 0 \u2200d i \u2208D | f i (e j ) < f i (e k ) : g jk (d i ) < 0", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Loss Minimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In other words, we want to minimize the number of incorrectly-ordered emotion pairs. We further require g jk (d i ) to have the linear form", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Loss Minimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "w T \u2126(d i ) + b,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Loss Minimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where w is a weight vector, b is a constant, and \u2126(d i ) is the feature vector of d i . Details of feature extraction will be presented in Section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Loss Minimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As Joachims (2002) points out, the above type of problem is NP-Hard. However, an approximate solution to finding g ik can be obtained by solving the following SVM optimization problem:", |
|
"cite_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 18, |
|
"text": "Joachims (2002)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Loss Minimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In the second approach to ranking emotions, we use regression to model f i directly. A regression function h j : D \u2192 \u211c is generated for each e j \u2208E by learning from the examples (\u2126", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotional Distribution Regression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "(d i ), f i (e j )) for all documents d i in the training corpus. min \u2211 + i b C i \u03be \u03be , w w w T 2 1 , subject to: 0 : 1 ) ) ( ( : ) ( ) ( | 1 ) ( : ) ( ) ( | T T \u2265 \u2200 \u2212 \u2265 + \u2126 \u2212 < \u2208 \u2200 \u2212 \u2265 + \u2126 > \u2208 \u2200 i i i k i j i i i i k i j i i i b d e f e f Q d b d e f e f Q d \u03be \u03be \u03be w w", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotional Distribution Regression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The regression framework we adopt is Support Vector Regression (SVR), which is a regression analysis technique based on SVM (Sch\u00f6lkopf et al., 2000) . We require h j to have the form", |
|
"cite_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 148, |
|
"text": "(Sch\u00f6lkopf et al., 2000)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotional Distribution Regression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "w T \u2126(d i ) + b.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotional Distribution Regression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Finding h j is equivalent to solving the following optimization problem:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotional Distribution Regression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where C is the SVM cost parameter, \u03be i are slack variables, and Q is the training corpus. We assume each document d i \u2208Q is labeled with f i (e j ) for every emotion e j \u2208E.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotional Distribution Regression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "min ) ( 2 , 1 , T 2 1 , , 2 , 1 , i i b C i i \u03be \u03be \u03be \u03be , + + \u2211 w w w subject to:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotional Distribution Regression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "When formulated as an SVM optimization problem, finding g jk is equivalent to training an SVM classifier for classifying a document into the e j or e k category. Hence, we use LIBSVM, which is an SVM implementation, to obtain the solution. 1 0 , : Figure 2 . News articles in the entire corpus grouped by the percentage of votes received by the most popular emotion.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 248, |
|
"end": 256, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Emotional Distribution Regression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": ") ( ) ) ( ( ) ) ( ( ) ( : 2 , 1 , 2 , T 1 , T \u2265 \u2200 \u2212 \u2265 \u2212 + \u2126 \u2212 \u2265 + \u2126 \u2212 \u2208 \u2200 i i i j i i i i j i i i e f b d b d e f Q d \u03be \u03be \u03be \u03b5 \u03be \u03b5 w w", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotional Distribution Regression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where C is the cost parameter, \u03b5 is the maximum difference between the predicted and actual values we wish to maintain, \u03be i,1 and \u03be i,2 are slack variables, and Q is the training corpus. To solve the above optimization problem, we use SVM light 's SVR implementation. 2 When ranking the emotions of a previously unseen document d k , we sort the emotions e j \u2208E in descending order of h j (d k ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 268, |
|
"end": 269, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Emotional Distribution Regression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The training and test corpora used in this study comprise Chinese news articles from Yahoo! Kimo News 3 , which allows a user to cast a vote for one of eight emotions to express how a news article makes her feel. Each Yahoo! news article contains a list of eight emotions at the bottom of the webpage. A reader may select one of the emotions and click on a submit button to submit the emotion. As with many websites which collect user responses, such as the Internet Movie Database, users are not forced to submit their responses. After submitting a response, the user can view a distribution of emotions indicating how other readers feel about the same article. Figure 1 shows the voting results of a Yahoo! news article.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 663, |
|
"end": 671, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Constructing the Corpus", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The eight available emotions are happy, sad, angry, surprising, boring, heartwarming, awesome, and useful. Useful is not a true emotion. Rather, it means that a news article contains practical information. The value f i (e j ) is derived by normalizing the number of votes for emotion e j in document d i by the total number votes in d i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Constructing the Corpus", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The entire corpus consists of 37,416 news articles dating from January 24, 2007 to August 7, 2007. News articles prior to June 1, 2007 form the training corpus (25,975 articles), and the remaining ones form the test corpus (11,441 articles). We collect articles a week after their publication dates to ensure that the vote counts have stabilized.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Constructing the Corpus", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "As mentioned earlier, readers rarely agree unanimously on the emotion of a document. Figure 2 illustrates this. In 41% of all the news articles in the entire corpus, the most popular emotion receives less than 60% of the votes.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 94, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Constructing the Corpus", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "After obtaining news articles, the next step is to determine how to convert them into feature vectors for SVM and SVR. That is, we want to instantiate \u2126. For this purpose, three types of features are extracted.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting Features", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The first feature type consists of Chinese character bigrams, which are taken from the headline and content of each news article. The presence of a bigram is indicated by a binary feature value.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting Features", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Chinese words form the second type of features. Unlike English words, consecutive Chinese words in a sentence are not separated by spaces. To deal with this problem, we utilize Stanford NLP Group's Chinese word segmenter to split a sentence into words. 4 As in the case of bigrams, binary feature values are used.", |
|
"cite_spans": [ |
|
{ |
|
"start": 253, |
|
"end": 254, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting Features", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We use character bigram features in addition to word features to increase the coverage of Chinese words. A Chinese word is formed by one or more contiguous Chinese characters. As mentioned earlier, Chinese words in a sentence are not separated by any boundary symbol (e.g., a space), so a Chinese word segmentation tool is always required to extract words from a sentence. However, a word segmenter may identify word boundaries erroneously, resulting in the loss of correct Chinese words. This problem is particularly severe if there are a lot of out-of-vocabulary words in a dataset. In Chinese, around 70% of all Chinese words are Chinese character bigrams (Chen et al., 1997) . Thus, using Chinese character bigrams as features will allow us to identify a lot of Chinese words, which when combined with the words extracted by the word segmenter, will give us a wider coverage of Chinese words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 659, |
|
"end": 678, |
|
"text": "(Chen et al., 1997)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting Features", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The third feature type is extracted from news metadata. A news article's metadata are its news NDCG@k is used because ACC@k has the disadvantage of not taking emotional distributions into account. Take Figure 1 as an example. In the figure, heartwarming and happy have 31.3% and 30.7% of the votes, respectively. Since the two percentages are very close, it is reasonable to say that predicting happy as the first item in a ranked list may also be acceptable. However, doing so would be completely incorrect according to ACC@k. In contrast, NDCG@k would consider it to be partially correct, and the extent of correctness depends on how much heartwarming and happy's percentages of votes differ. To be exact, if happy is predicted as the first item, then the corresponding NDCG@1 would be 30.7% / 31.3% = 0.98. category, agency, hour of publication, reporter, and event location. Examples of news categories include sports and political. Again, we use binary feature values. News metadata are used because they may contain implicit emotional information.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 210, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Extracting Features", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The experiments are designed to achieve the following four goals: (i) to compare the ranking performance of different methods, (ii) to analyze the pairwise ranking quality of PLM, (iii) to analyze the distribution estimation quality of EDR, and (iv) to compare the ranking performance of different feature sets. The Yahoo! News training and test corpora presented in Section 4 are used in all experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The third metric is SACC@k, or set accuracy at k. It is a variant of ACC@k. According to SACC@k, a predicted ranked list is correct if the set of its first k items is the same as the true ranked list's set of first k items. In effect, SACC@k evaluates a ranking method's ability to place the top k most important items in the first k positions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We employ three metrics as indicators of ranking quality: ACC@k, NDCG@k and SACC@k.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics for Ranking", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "ACC@k stands for accuracy at position k. According to ACC@k, a predicted ranked list is correct if the list's first k items are identical (i.e., same items in the same order) to the true ranked list's first k items. If two emotions in a list have the same number of votes, then their positions are interchangeable. ACC@k is computed by dividing the number of correctly-predicted instances by the total number of instances.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics for Ranking", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "SVM and SVR are employed in PLM and EDR, respectively. Both SVM and SVR have the adjustable C cost parameter, and SVR has an additional \u03b5 parameter. To estimate the optimal C value for a combination of SVM and features, we perform 4fold cross-validation on the Yahoo! News training corpus, and select the C value which results in the highest binary classification accuracy during crossvalidation. The same procedure is used to estimate the best C and \u03b5 values for a combination of SVR and features. The C-\u03b5 pair which results in the lowest mean squared error during cross-validation is chosen. The candidate C values for both SVM and SVR are 2 -10 , 2 -9 , \u2026, 2 -6 . The candidate \u03b5 values for SVR are 10 -2 and 10 -1 . All cross-validations are performed solely on the training data. The test data are not used to tune the parameters. Also, SVM and SVR allow users to specify the type of kernel to use. Linear kernel is selected for both SVM and SVR. NDCG@k, or normalized discounted cumulative gain at position k (J\u00e4rvelin and Kek\u00e4l\u00e4inen, 2002) , is a metric frequently used in information retrieval to judge the quality of a ranked list when multiple levels of relevance are considered. This metric is defined as", |
|
"cite_spans": [ |
|
{ |
|
"start": 1015, |
|
"end": 1046, |
|
"text": "(J\u00e4rvelin and Kek\u00e4l\u00e4inen, 2002)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tuning SVM and SVR Parameters", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "\u2211 = + = k i i k i rel z k 1 2 ) 1 ( log @ NDCG", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tuning SVM and SVR Parameters", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "where rel i is the relevance score of the predicted item at position i, and z k is a normalizing factor which ensures that a correct ranked list has an NDCG@k value of 1. In the emotion ranking problem, rel i is the percentage of reader votes received by the emotion at position i. Note that the log 2 (i+1) value in the denominator is a discount factor which decreases the weights of items ranked later in a list. NDCG@k has the range [0, 1], where 1 is the best. In the experiment results, NDCG@k values are averaged over all instances in the test corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Tuning SVM and SVR Parameters", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "The nearest neighbor (NN) method is used as the baseline. The ranked emotion list of a news article in the test corpus is predicted as follows. First, the test news article is compared to every training news article using cosine similarity, which is defined as In Figure 3 , EDR's ACC@1 (0.751) is higher than those of PLM and NN, and the differences are statistically significant with p-value < 0.01. So, EDR is the best method at predicting the most popular emotion. However, PLM has the best ACC@k for k \u2265 2, and the differences from the other two methods are all significant with p-value < 0.01. This means that PLM's predicted ranked lists better resemble the true ranked lists. Figure 3 displays a sharp decrease in ACC@k values as k increases. This trend indicates the hardness of predicting a ranked list correctly. Looking from a different angle, the ranking task under the ACC@k metric is equivalent to the classification of news articles into one of 8!/(8 -k)! classes, where we regard each unique emotion sequence of length k as a class. In fact, computing ACC@8 for a ranking method is the same as evaluating the method's ability to classify a news article into one of 8! = 40,320 classes. So, producing a completelycorrect ranked list is a difficult task.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 264, |
|
"end": 272, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 684, |
|
"end": 692, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Nearest Neighbor Baseline", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "| | | | | | ) , ( cos i i j i j i D D D D d d \u00d7 \u2229 =", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Nearest Neighbor Baseline", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "In Figure 4 , all of PLM and EDR's NDCG@k improvements over NN are statistically significant with p-value < 0.01. For some values of k, the difference in NDCG@k between PLM and EDR is not significant. The high NDCG@k values (i.e., greater than 0.8) of PLM and EDR imply that although it is difficult for PLM and EDR to generate completely-correct ranked lists, these two methods are effective at placing highly popular emotions to the beginning of ranked lists.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Nearest Neighbor Baseline", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "In Figure 5 , PLM outperforms the other two methods for 2 \u2264 k \u2264 7, and the differences are all statistically significant with p-value < 0.01. For small values of k (e.g., 2 \u2264 k \u2264 3), PLM's higher SACC@k values mean that PLM is better at placing the highly popular emotions in the top positions of a ranked list.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Nearest Neighbor Baseline", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "To further compare PLM and EDR, we examine their performance on individual test instances. Figure 6 shows the percentage of test instances where both PLM and EDR give incorrect lists, only PLM gives correct lists, only EDR gives ranked lists, and both methods give correct lists. The \"Only PLM Correct\" and \"Only EDR Correct\" categories are nonzero, so neither PLM nor EDR is always better than the other.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 99, |
|
"text": "Figure 6", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Nearest Neighbor Baseline", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "In summary, EDR is the best at predicting the most popular emotion according to ACC@1, NDCG@1 and SACC@1. However, PLM generates ranked lists that better resemble the correct ranked lists according to ACC@k and SACC@k for k \u2265 2. Further analysis shows that neither method is always better than the other.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Nearest Neighbor Baseline", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "In this subsection, we evaluate the performance of PLM in predicting pairwise orders. We first examine the quality of ranked lists generated by PLM in terms of pairwise orders. To do this, we use Kendall's \u03c4 b correlation coefficient, which is a statistical measure for determining the correlation between two ranked lists when there may be ties between two items in a list (Liebetrau, 1983) . The value of \u03c4 b is determined based on the number of concordant pairwise orders and the number of discordant pairwise orders between two ranked lists. Therefore, this measure is appropriate for evaluating the effectiveness of PLM at predicting pairwise orders correctly. \u03c4 b has the range [-1, 1] , where 1 means a perfect positive correlation, and -1 means two lists are the reverse of each other.", |
|
"cite_spans": [ |
|
{ |
|
"start": 374, |
|
"end": 391, |
|
"text": "(Liebetrau, 1983)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 684, |
|
"end": 691, |
|
"text": "[-1, 1]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Ranking Quality of PLM", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "When computing \u03c4 b of two ranked lists, we also calculate a p-value to indicate whether the correlation is statistically significant.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Ranking Quality of PLM", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "We compute \u03c4 b statistics between a predicted ranked list and the corresponding true ranked list. Table 1 shows the results. In Table 1 , numbers in the \"Average \u03c4 b \" and \"Average p-value\" columns are averaged over all test instances. The statistics for EDR and NN are also included for comparison. From the table, we see that PLM has the highest average \u03c4 b value and the lowest average p-value, so PLM is better at preserving pairwise orders than EDR and NN methods. This observation verifies that PLM's minimization of pairwise loss leads to better prediction of pairwise orders.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 105, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 128, |
|
"end": 135, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pairwise Ranking Quality of PLM", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "We now look at the individual performance of the 28 pairwise emotion rankers g jk . As mentioned in Section 3.2, each pairwise emotion ranker g jk is equivalent to a binary classifier for classifying a document into the e j or e k category. So, we look at their classification accuracies in Table 2 . In the table, accuracy ranges from 0.75 for the awesomesurprising pair to 0.91 for the useful-boring pair.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 291, |
|
"end": 298, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pairwise Ranking Quality of PLM", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "From the psychological perspective, the relatively low accuracy of the awesome-surprising pair is expected, because awesome is surprising in a positive sense. So, readers should have a hard time distinguishing between these two emotions. And the SVM classifier, which models reader responses, should also find it difficult to discern these two emotions. Based on this observation, we suspect that the pairwise classification performance actually reflects the underlying emotional ambiguity experienced by readers. To verify this, we quantify the degree of ambiguity between two emotions, and compare the result to pairwise classification accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Ranking Quality of PLM", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "To quantify emotional ambiguity, we introduce the concept of discrimination value between two emotions e j and e k in a document d i , which is defined as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Ranking Quality of PLM", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": ") ( ) ( ) ( ) ( k i j i k i j i e f e f e f e f + \u2212", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pairwise Ranking Quality of PLM", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "where f i is the emotional probability function defined in Section 3.1. Intuitively, the larger the discrimination value is, the smaller the degree of ambiguity between two emotions is. Figure 7 shows the relationship between pairwise classification accuracy and the average discrimination value of the corresponding emotion pair. The general pattern is that as accuracy increases, the discrimination value also increases. To provide concrete evidence, we use Pearson's product-moment correlation coefficient, which has the range of [-1, 1] , where 1 means a perfect positive correlation (Moore, 2006) . The coefficient for the data in Figure 7 is 0.726 with p-value < 0.01. Thus, pairwise emotion classification accuracy reflects the emotional ambiguity experienced by readers. In summary, PLM's pairwise loss minimization leads to better pairwise order predictions than EDR and NN. Also, the pairwise classification results reveal the inherent ambiguity between emotions.", |
|
"cite_spans": [ |
|
{ |
|
"start": 533, |
|
"end": 540, |
|
"text": "[-1, 1]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 588, |
|
"end": 601, |
|
"text": "(Moore, 2006)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 186, |
|
"end": 194, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 636, |
|
"end": 644, |
|
"text": "Figure 7", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pairwise Ranking Quality of PLM", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "In this subsection, we evaluate EDR's performance in estimating the emotional probability function f i .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distribution Estimation Quality of EDR", |
|
"sec_num": "6.6" |
|
}, |
|
{ |
|
"text": "With the prior knowledge that a news article's f i values sum to 1 over all emotions, and f i is between 0 and 1, we adjust EDR's f i predictions to produce proper distributions. It is done as follows. A predicted f i value greater than 1 or less than 0 is set to 1 and 0, respectively. Then the predicted f i values are normalized to sum to 1 over all emotions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distribution Estimation Quality of EDR", |
|
"sec_num": "6.6" |
|
}, |
|
{ |
|
"text": "NN's distribution estimation performance is included for comparison. For NN, the predicted f i values of a test article are taken from the emotional distribution of the most similar training article. Figure 8 shows the mean squared error of EDR and NN for predicting f i . In the figure, the error generated by EDR is less than those by NN, and all the differences are statistically significant with pvalue < 0.01. Thus, EDR's use of regression leads to better estimation of f i than the NN. Figure 9 shows each of the three feature type's ACC@k for predicting test instances' ranked lists when PLM is used. The feature comparison graph for EDR is not shown, because it exhibits a very similar trend as PLM. For both PLM and EDR, bigrams are better than words, which are in turn better than news metadata. In Figure 9 , the combination of all three feature sets achieves the best performance. For both PLM and EDR, the improvements in ACC@k of using all features over words and metadata are all significant with p-value < 0.01, and the improvements over bigrams are significant for k \u2264 2. Hence, in general, it is better to use all three feature types together.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 200, |
|
"end": 208, |
|
"text": "Figure 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 492, |
|
"end": 500, |
|
"text": "Figure 9", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 809, |
|
"end": 817, |
|
"text": "Figure 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Distribution Estimation Quality of EDR", |
|
"sec_num": "6.6" |
|
}, |
|
{ |
|
"text": "This paper presents two methods to ranking reader emotions. The PLM method minimizes pairwise loss, and the EDR method estimates emotional distribution through regression. Experiments with significant tests show that EDR is better at predicting the most popular emotion, but PLM produces ranked lists that have higher correlation with the correct lists. We further verify that PLM has better pairwise ranking performance than the other two methods, and EDR has better distribution estimation performance than NN.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "As for future work, there are several directions we can pursue. An observation is that PLM exploits pairwise order information, whereas EDR exploits emotional distribution information. We plan to combine these two methods together. Another research direction is to improve EDR by finding better features. We would also like to integrate emotion ranking into information retrieval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "http://www.csie.ntu.edu.tw/~cjlin/libsvm/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://svmlight.joachims.org/ 3 http://tw.news.yahoo.com", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://nlp.stanford.edu/software/segmenter.shtml", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We are grateful to the Computer and Information Networking Center, National Taiwan University, for the support of high-performance computing facilities. The research in this paper was partially supported by National Science Council, Taiwan, under the contract NSC 96-2628-E-002-240-MY3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Percentage of Votes Received by Most Popular Emotion Number of News Articles References Saima Aman and Stan Szpakowicz", |
|
"authors": [], |
|
"year": 2007, |
|
"venue": "Proceedings of 10th International Conference on Text, Speech and Dialogue", |
|
"volume": "4629", |
|
"issue": "", |
|
"pages": "196--205", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Percentage of Votes Received by Most Popular Emotion Number of News Articles References Saima Aman and Stan Szpakowicz. 2007. Identifying Expressions of Emotion in Text. In Proceedings of 10th International Conference on Text, Speech and Dialogue, Lecture Notes in Computer Science 4629, 196-205. Springer, Plze\u0148, CZ.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Chinese Text Retrieval wihtout using a Dictionary", |
|
"authors": [ |
|
{ |
|
"first": "Aitao", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianzhang", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liangjie", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frederic", |
|
"middle": [], |
|
"last": "Gey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Meggs", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of 20th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "42--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aitao Chen, Jianzhang He, Liangjie Xu, Frederic Gey, and Jason Meggs. 1997. Chinese Text Retrieval wihtout using a Dictionary. In Proceedings of 20th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, 42-49. Association for Computing Machinery, Phila- delphia, US.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "An Efficient Boosting Algorithm for Combining Preferences", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Freund", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raj", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Iyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Schapire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoram", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "933--969", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Freund, Raj D. Iyer, Robert E. Schapire, and Yoram Singer. 2003. An Efficient Boosting Algorithm for Combining Preferences. Journal of Machine Learning Research, 4, 933-969.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A New Method for Sentiment Classification in Text Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianyong", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoming", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bingzhen", |
|
"middle": [], |
|
"last": "Pei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruzhan", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of 2nd International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Hu, Jianyong Duan, Xiaoming Chen, Bingzhen Pei, and Ruzhan Lu. 2005. A New Method for Sentiment Classification in Text Retrieval. In Proceedings of 2nd International Joint Conference on Natural Lan- guage Processing, 1-9. Jeju Island, KR.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Cumulative Gain-based Evaluation of IR Techniques", |
|
"authors": [ |
|
{ |
|
"first": "Kalervo", |
|
"middle": [], |
|
"last": "J\u00e4rvelin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaana", |
|
"middle": [], |
|
"last": "Kek\u00e4l\u00e4inen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACM Transactions on Information Systems", |
|
"volume": "20", |
|
"issue": "4", |
|
"pages": "422--446", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kalervo J\u00e4rvelin and Jaana Kek\u00e4l\u00e4inen. Cumulative Gain-based Evaluation of IR Techniques. 2002. ACM Transactions on Information Systems, 20(4), 422-446.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Optimizing Search Engines using Clickthrough Data", |
|
"authors": [ |
|
{ |
|
"first": "Thorsten", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of 8th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thorsten Joachims. 2002. Optimizing Search Engines using Clickthrough Data. In Proceedings of 8th ACM SIGKDD International Conference on Knowl- edge Discovery and Data Mining. Association for Computing Machinery, Edmonton, CA.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Measures of Association", |
|
"authors": [ |
|
{ |
|
"first": "Albert", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Liebetrau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1983, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Albert M. Liebetrau. 1983. Measures of Association. Sage Publications, Newbury Park, US.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "What Emotions do News Articles Trigger in their Readers?", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changhua", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hsin-Hsi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of 30th ACM SIGIR Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "733--734", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin H. Lin, Changhua Yang, and Hsin-Hsi Chen. 2007. What Emotions do News Articles Trigger in their Readers? In Proceedings of 30th ACM SIGIR Conference, 733-734. Association for Computing Machinery, Amsterdam, NL.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Emotion Classification of Online News Articles from the Reader's Perspective", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changhua", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hsin-Hsi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of International Conference on Web Intelligence. Institute of Electrical and Electronics Engineers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin H. Lin, Changhua Yang, and Hsin-Hsi Chen. 2008. Emotion Classification of Online News Articles from the Reader's Perspective. In Proceedings of In- ternational Conference on Web Intelligence. Institute of Electrical and Electronics Engineers, Sydney, AU.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The Basic Practice of Statistics", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Moore", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Moore. 2006. The Basic Practice of Statistics. W.H. Freeman and Company, New York, US.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Thumbs up? Sentiment Classification Using Machine Learning Techniques", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shivakumar", |
|
"middle": [], |
|
"last": "Vaithyanathan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of 2002 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "79--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Pang, Lillian Lee, and Shivakumar Vaithyanathan. 2002. Thumbs up? Sentiment Classification Using Machine Learning Techniques. In Proceedings of 2002 Conference on Empirical Methods in Natural Language Processing, 79-86. Association for Com- putational Linguistics, Philadelphia, US.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Ranking with Multiple Hyperplanes", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tie-Yan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu-Dong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "De-Sheng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of 30 th ACM SIGIR Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "279--286", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Qin, Tie-Yan Liu, Wei Lai, Xu-Dong Zhang, De- Sheng Wang, and Hang Li. 2007. Ranking with Mul- tiple Hyperplanes. In Proceedings of 30 th ACM SIGIR Conference, 279-286. Association for Com- puting Machinery, Amsterdam, NL.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "New Support Vector Algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Bernhard", |
|
"middle": [], |
|
"last": "Sch\u00f6lkopf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Smola", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Williamson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Barlett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Neural Computation", |
|
"volume": "12", |
|
"issue": "5", |
|
"pages": "1207--1245", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bernhard Sch\u00f6lkopf, Alex J. Smola, Robert C. William- son, and Peter L. Barlett. 2000. New Support Vector Algorithms. Neural Computation, 12(5), 1207-1245.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "SemEval-2007 Task 14: Affective Text", |
|
"authors": [ |
|
{ |
|
"first": "Carlo", |
|
"middle": [], |
|
"last": "Strapparava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of 4th International Workshop on Semantic Evaluations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carlo Strapparava and Rada Mihalcea. 2007. SemEval- 2007 Task 14: Affective Text. In Proceedings of 4th International Workshop on Semantic Evaluations. Prague, CZ.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Learning Subjective Adjectives from Corpora", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Janyce", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wiebe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of 17th Conference of the American Association for Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "735--740", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Janyce M. Wiebe. 2000. Learning Subjective Adjectives from Corpora. In Proceedings of 17th Conference of the American Association for Artificial Intelligence, 735-740. AAAI Press, Austin, US.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Probability Estimates for Multi-class Classification by Pairwise Coupling", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ting-Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chih-Jen", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruby", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "975--1005", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ting-Fan Wu, Chih-Jen Lin, and Ruby C. Weng. Prob- ability Estimates for Multi-class Classification by Pairwise Coupling. 2004. Journal of Machine Learn- ing Research, 5, 975-1005.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Figure 1illustrates this phenomenon. In the figure,", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Performance of PLM and EDR.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "where d i and d j are two news articles, and D i and D j are sets of Chinese character bigrams in d i and d j , respectively. The ranked emotion list of the training article having the highest cosine similarity with the test article is used as the predicted ranked list. Figures 3 to 5 show the performance of different ranking methods on the test corpus. For both PLM and EDR, all of the bigram, word, and news metadata features are used.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"text": "Accuracy of pairwise emotion classification and the corresponding average discrimination value.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"text": "Mean squared error of NN and EDR for estimating the emotional distributions of the test corpus. PLM performance using different features.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"html": null, |
|
"text": "Kendall's \u03c4 b statistics.", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Method</td><td/><td/><td/><td>Average \u03c4 b</td><td/><td/><td colspan=\"2\">Average p-value</td></tr><tr><td>PLM</td><td/><td/><td/><td>0.584</td><td/><td/><td/><td>0.068</td></tr><tr><td>EDR</td><td/><td/><td/><td>0.474</td><td/><td/><td/><td>0.114</td></tr><tr><td>NN</td><td/><td/><td/><td>0.392</td><td/><td/><td/><td>0.155</td></tr><tr><td/><td/><td>He</td><td>Su</td><td>Sa</td><td>Us</td><td>Ha</td><td>Bo</td><td>An</td></tr><tr><td>Aw</td><td colspan=\"2\">0.80</td><td>0.75</td><td>0.78</td><td>0.77</td><td>0.82</td><td>0.76</td><td>0.79</td></tr><tr><td>He</td><td/><td/><td>0.79</td><td>0.81</td><td>0.78</td><td>0.81</td><td>0.89</td><td>0.81</td></tr><tr><td>Su</td><td/><td/><td/><td>0.82</td><td>0.78</td><td>0.80</td><td>0.82</td><td>0.82</td></tr><tr><td>Sa</td><td/><td/><td/><td/><td>0.78</td><td>0.80</td><td>0.84</td><td>0.82</td></tr><tr><td>Us</td><td/><td/><td/><td/><td/><td>0.82</td><td>0.91</td><td>0.82</td></tr><tr><td>Ha</td><td/><td/><td/><td/><td/><td/><td>0.83</td><td>0.79</td></tr><tr><td>Bo</td><td/><td/><td/><td/><td/><td/><td/><td>0.80</td></tr><tr><td colspan=\"9\">Table 2. Classification accuracies of SVM pairwise</td></tr><tr><td colspan=\"9\">emotion classifiers on the test corpus. He = heartwarm-</td></tr><tr><td colspan=\"9\">ing, Su = surprising, Sa = sad, Us = useful, Ha = happy,</td></tr><tr><td colspan=\"5\">Bo = boring, and An = angry.</td><td/><td/><td/></tr><tr><td>Average Discrimination</td><td>Value of Emotion Pair</td><td>0.58 0.63 0.68 0.73</td><td/><td/><td/><td/><td/></tr><tr><td/><td/><td>0.53</td><td/><td/><td/><td/><td/></tr><tr><td/><td/><td>0.75</td><td/><td>0.8</td><td>0.85</td><td/><td>0.9</td></tr><tr><td/><td/><td/><td colspan=\"5\">Accuracy of Pairwise Emotion Classification</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |