|
{ |
|
"paper_id": "S16-1012", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:26:17.004743Z" |
|
}, |
|
"title": "aueb.twitter.sentiment at SemEval-2016 Task 4: A Weighted Ensemble of SVMs for Twitter Sentiment Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Stavros", |
|
"middle": [], |
|
"last": "Giorgis", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Apostolos", |
|
"middle": [], |
|
"last": "Rousas", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Pavlopoulos", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ion", |
|
"middle": [], |
|
"last": "Androutsopoulos", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes the system with which we participated in SemEval-2016 Task 4 (Sentiment Analysis in Twitter) and specifically the Message Polarity Classification subtask. Our system is a weighted ensemble of two systems. The first one is based on a previous sentiment analysis system and uses manually crafted features. The second system of our ensemble uses features based on word embeddings. Our ensemble was ranked 5th among 34 teams. The source code of our system is publicly available.", |
|
"pdf_parse": { |
|
"paper_id": "S16-1012", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes the system with which we participated in SemEval-2016 Task 4 (Sentiment Analysis in Twitter) and specifically the Message Polarity Classification subtask. Our system is a weighted ensemble of two systems. The first one is based on a previous sentiment analysis system and uses manually crafted features. The second system of our ensemble uses features based on word embeddings. Our ensemble was ranked 5th among 34 teams. The source code of our system is publicly available.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "This paper describes the system with which we participated in SemEval-2016 Task 4 (Sentiment Analysis in Twitter) and specifically the Message Polarity Classification subtask (Nakov et al., 2016) . In this subtask, each tweet is classified as expressing a positive, negative, or no opinion (neutral). Our system is a weighted ensemble of two systems. The first one is based on a previous sentiment analysis system (Karampatsis et al., 2014) and uses manually crafted features. The second system of our ensemble uses features based on word embeddings (Mikolov et al., 2013; Pennington et al., 2014) . Our ensemble was ranked 5th among 34 teams.", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 195, |
|
"text": "(Nakov et al., 2016)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 414, |
|
"end": 440, |
|
"text": "(Karampatsis et al., 2014)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 550, |
|
"end": 572, |
|
"text": "(Mikolov et al., 2013;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 573, |
|
"end": 597, |
|
"text": "Pennington et al., 2014)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Section 2 discusses the datasets we used to train and tune our ensemble. Sections 3 and 4 describe our ensemble and its performance respectively. Finally, Section 5 concludes and discusses future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "For system training and tuning we used 19,305 tweets from the 2016 datasets provided by the organisers of SemEval-2016 Task 4, as well as data from SemEval-2013 Task 2. Specifically, the datasets were: The organisers also provided 6,908 tweets from old SemEval data, to allow system evaluation during development. These data could not be used directly for training or tuning and were the following: ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "-TW train16 : train data for SemEval-2016 Task 4, -", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "-TW devtest13 : dev-test data for SemEval-2013 Task 2, -", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Avg(C SP1 , C SP2 ) SD1 SD2 SP1 SP2 Subjectivity Detection Sentiment Polarity C SD1 C SD2 C SP1 C SP2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Figure 1: Ensemble of two sentiment polarity classifiers, SP1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "and SP2, which are influenced by two subjectivity detection classifiers, SD1 and SD2, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The main objective of SemEval-2016 Task 4 is to detect sentiment polarity, i.e., to identify whether a message (tweet) expresses positive, negative or no sentiment at all. We used a weighted ensemble of two sentiment polarity classifiers, namely SP1 and SP2 ( Figure 1 ), each influenced by a subjectivity detection classifier, SD1 and SD2, respectively. A correlation analysis between the confidence scores of SP1 and SP2 (C SP 1 and C SP 2 respectively) revealed that the two systems make different mistakes, which motivated combining them in an ensemble. Given a message and the confidence scores of the two systems (i.e., C SP 1 and C SP 2 ), the ensemble computes a new confidence score for every sentiment label (C pos , C neg and C neu ) as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 260, |
|
"end": 268, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System Overview", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "C pos = C SP 1@pos \u2022 w pos + C SP 2@pos \u2022 (1 \u2212 w pos ) C neg = C SP 1@neg \u2022 w neg + C SP 2@neg \u2022 (1 \u2212 w neg ) C neu = C SP 1@neu \u2022 w neu + C SP 2@neu \u2022 (1 \u2212 w neu )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Overview", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "where w pos , w neg , w neu are weights tuned on the development data. The sentiment with the highest confidence score is assigned to each tweet. 1 Below, we describe the two Sentiment Polarity classifiers, along with the two subjectivity detection classifiers that influence them.", |
|
"cite_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 147, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Overview", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "First, each message is preprocessed by a a Twitter specific tokeniser and part-of-speech (POS) tagger (Owoputi et al., 2013) to obtain the tokens and 1 Tuning led to wpos = wneg = wneu = 0.66. the corresponding POS tags, which are necessary for some features. 2 Then, we extract features, which can be categorized as follows: 3", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 124, |
|
"text": "(Owoputi et al., 2013)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP1 and SD1", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 features based on morphology,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP1 and SD1", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 POS based features,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP1 and SD1", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 sentiment lexicon based features,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP1 and SD1", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 negation based features,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP1 and SD1", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 features based on clusters of tweets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP1 and SD1", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We used a linear SVM classifier (Vapnik, 1998; Cristianini and Shawe-Taylor, 2000; Joachims, 2002) trained on three labels, namely, positive, negative and neutral. 4 As already mentioned, SP1 is influenced by a subjectivity detection classifier called SD1. That is, SP1 uses as a feature the confidence score of SD1. SD1 is also a linear SVM classifier, which is trained on data of two labels, neutral and subjective (i.e., positive or negative). 5 The higher the confidence score of SD1 the more likely it is for the message to express sentiment (positive or negative). Apart from the score of SD1 (which was used by SP1), SP1 and SD1 used the same features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 46, |
|
"text": "(Vapnik, 1998;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 47, |
|
"end": 82, |
|
"text": "Cristianini and Shawe-Taylor, 2000;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 83, |
|
"end": 98, |
|
"text": "Joachims, 2002)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 164, |
|
"end": 165, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 447, |
|
"end": 448, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP1 and SD1", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The second system of our ensemble uses word embeddings (Mikolov et al., 2013; Pennington et al., 2014) . We use the centroid of the word embeddings of each tweet as the feature vector of the tweet. The centroid of a tweet (message) M is computed as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 77, |
|
"text": "(Mikolov et al., 2013;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 78, |
|
"end": 102, |
|
"text": "Pennington et al., 2014)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP2 and SD2", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "M = 1 |M | |M | i=1 w i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP2 and SD2", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "2 No lemmatization or stemming was used and tokens could be words, emoticons, hashtags, etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP2 and SD2", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "3 All the features of SP1 are described in detail in a publicly available report, accompanying the source code of the system. The code and the report are available at https://github. com/nlpaueb/aueb.twitter.sentiment. 4 We used the SVM implementation of Scikit Learn (Pedregosa et al., 2011; Fan et al., 2008) . The same implementation was used for all our SVM classifiers. The optimal C value was found to be 0.00341, by using 5-fold cross validation on where |M | is the number of tokens in M and w i is the embedding of word w i . 6 We used the 200dimensional word vectors for Twitter produced by GloVe (Pennington et al., 2014) . 7 As with SP1, SP2 incorporates the confidence score of SD2 as a feature. SD2 is a classifier trained on neutral and subjective data (positive or negative), again with centroid feature vectors. Given a message M , the confidence score of SD2 for M was added as a feature to its centroid and the resulting 201-dimension feature vector was used as input to SP2. 8 SP2 was then trained on the same three classes as SP1 (positive, negative, neutral). 9", |
|
"cite_spans": [ |
|
{ |
|
"start": 219, |
|
"end": 220, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 292, |
|
"text": "(Pedregosa et al., 2011;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 293, |
|
"end": 310, |
|
"text": "Fan et al., 2008)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 535, |
|
"end": 536, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 607, |
|
"end": 632, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 635, |
|
"end": 636, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SP2 and SD2", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Our system was ranked 5th among 34 teams. 10 All teams were ranked by their score on the Twitter2016 Task 4 test dataset. Table 1 shows our rankings on each dataset. Below we discuss the results of our ensemble and we show how the subjectivity detection classifiers affect our system. 6 We allow multiple word occurrences in a sentence, while we ignore words without embeddings. 7 The word vectors were pre-trained on a 2 billion tweets corpus. See http://nlp.stanford.edu/projects/ glove/.", |
|
"cite_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 286, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 379, |
|
"end": 380, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 122, |
|
"end": 129, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments & Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "8 The confidence scores of SD1 and SD2 were exponentially normalized (Bishop, 2006) . 9 The optimal C values were found to be 1.40688 for SD2 and 7.39618 for SP2, by using 5-fold cross validation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 83, |
|
"text": "(Bishop, 2006)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 86, |
|
"end": 87, |
|
"text": "9", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments & Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "10 http://alt.qcri.org/semeval2016/task4/ data/uploads/semeval2016_task4_results.pdf A strict two-stage approach, like the one suggested by Karampatsis et al. (2014) , discards messages the sentiment detection (SD) classifier (first stage) decides they do not express sentiment, and classifies the rest as positive or negative. However, errors of the first stage propagate to the second, thus, playing a significant role in overall performance. We extend their approach and attempt to use the results of a subjectivity detection stage in a less rigorous manner; i.e., as a confidence factor along with various other features. Recall that our SD1 is actually the first stage of the system of Karampatsis et al. (2014) , and that we use the confidence of SD1 as feature of SP1. Table 2 shows that SP1 (with the confidence of SP1 as a feature) outperforms the strict two-stage approach by 4.57%, yielding an increase in the ranking by 12 positions. Another interesting observation is that SP2 (with the confidence of SD2 as a feature) achieves a score only 1.9% lower than SP1 (with SD1) yielding a ranking around the middle of the list. This is achieved by using only features based on word embeddings along with the confidence of SD2 and no sophisticated feature engineering at all. A final, and also very interesting observation is that when we use an ensemble of SP1 and SP2, the results improve yielding a 5th place in the ranking.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 165, |
|
"text": "Karampatsis et al. (2014)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 691, |
|
"end": 716, |
|
"text": "Karampatsis et al. (2014)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 776, |
|
"end": 783, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments & Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this paper we presented the system with which we participated in the Message Polarity Classification subtask of SemEval-2016 Task 4. We used a weighted ensemble of two systems each operating in two stages. In a first, subjectivity detection stage, each message is assigned a confidence score representing the probability that the message expresses an opinion. This probability is then used as a feature by a classifier that detects sentiment. We used two different systems, one based on previous work by Karampatsis et al. (2014) (SP1 with the confidences of SD1 as a feature) and a second system that represents the messages by the centroids of their word embeddings (SP2 with the confidence of SD2 as a feature). The two systems are then combined with a weighted linear ensemble scheme in order to get the final sentiment label. Our experiments show that using the confidence of the subjectivity detection stage as a feature instead of using a strict two-stage ap-proach can lead to an improved performance. Also, the ensemble performs better than any of its two systems on their own.", |
|
"cite_spans": [ |
|
{ |
|
"start": 507, |
|
"end": 532, |
|
"text": "Karampatsis et al. (2014)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and future work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Despite the encouraging results of our approach (5th among 34 participating teams), there is still much room for improvement. A better continuous space vector representation of the messages might improve SD2 and SP2. Much research has been conducted recently on obtaining better continuous space vector representations of sentences (Le and Mikolov, 2014; Kiros et al., 2015; Hill et al., 2016) instead of centroid vectors. Another direction for future work would be to investigate replacing the SVM classifiers by multilayer perceptrons, possibly on top of recurrent neural nets that would compute vector representations of sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 332, |
|
"end": 354, |
|
"text": "(Le and Mikolov, 2014;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 355, |
|
"end": 374, |
|
"text": "Kiros et al., 2015;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 393, |
|
"text": "Hill et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions and future work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The optimal C value for SD1 was found to be 0.00195, by using 5-fold cross validation on TWtrain16.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was carried out during the BSc projects of the first two authors, which were co-supervised by the other three authors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Pattern Recognition and Machine Learning (Information Science and Statistics)", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bishop", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Bishop. 2006. Pattern Recognition and Ma- chine Learning (Information Science and Statistics). Springer-Verlag New York, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "An Introduction to Support Vector Machines and Other Kernel-based Learning Methods", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Cristianini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Shawe-Taylor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N. Cristianini and J. Shawe-Taylor. 2000. An In- troduction to Support Vector Machines and Other Kernel-based Learning Methods. Cambridge Univer- sity Press.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Liblinear: A library for large linear classification", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Hsieh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X.-R", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "The Journal of Machine Learning Research", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "1871--1874", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. E. Fan, K. W. Chang, C. J. Hsieh, X.-R. Wang, and C. J. Lin. 2008. Liblinear: A library for large linear classification. The Journal of Machine Learning Re- search, 9:1871-1874.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Learning distributed representations of sentences from unlabelled data", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Korhonen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1602.03483" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Hill, K. Cho, and A. Korhonen. 2016. Learning dis- tributed representations of sentences from unlabelled data. arXiv preprint arXiv:1602.03483.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Learning to Classify Text Using Support Vector Machines: Methods, Theory, Algorithms. Kluwer", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Joachims. 2002. Learning to Classify Text Using Sup- port Vector Machines: Methods, Theory, Algorithms. Kluwer.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "AUEB: Two stage sentiment analysis of social network messages", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Karampatsis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Pavlopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Malakasiotis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 8th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "114--118", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Karampatsis, J. Pavlopoulos, and P. Malakasiotis. 2014. AUEB: Two stage sentiment analysis of social network messages. In Proceedings of the 8th Interna- tional Workshop on Semantic Evaluation, pages 114- 118, Dublin, Ireland.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Skip-thought vectors", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Torralba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Urtasun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Fidler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1506.06726" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Kiros, Y. Zhu, R. Salakhutdinov, R. S. Zemel, A. Tor- ralba, R. Urtasun, and S. Fidler. 2015. Skip-thought vectors. arXiv preprint arXiv:1506.06726.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Distributed representations of words and phrases", |
|
"authors": [ |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 31th International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1188--1196", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Q. Le and T. Mikolov. 2014. Distributed representations of words and phrases. In Proceedings of the 31th In- ternational Conference on Machine Learning, pages 1188-1196, Beijing, China.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1301.3781" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikolov, K. Chen, G. Corrado, and J. Dean. 2013. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "SemEval-2016 task 4: Sentiment analysis in Twitter", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Sebastiani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 10th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Nakov, A. Ritter, S. Rosenthal, V. Stoyanov, and F. Se- bastiani. 2016. SemEval-2016 task 4: Sentiment anal- ysis in Twitter. In Proceedings of the 10th Interna- tional Workshop on Semantic Evaluation, San Diego, California.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Improved partof-speech tagging for online conversational text with word clusters", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Owoputi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "O'connor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "380--390", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "O. Owoputi, B. O'Connor, C. Dyer, K. Gimpel, N. Schneider, and N. A. Smith. 2013. Improved part- of-speech tagging for online conversational text with word clusters. In Proceedings of the 2013 Confer- ence of the North American Chapter of the Associa- tion for Computational Linguistics: Human Language Technologies, pages 380-390, Atlanta, Georgia.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Scikit-learn: Machine learning in Python", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pedregosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Varoquaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gramfort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Thirion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Grisel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Blondel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Prettenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Dubourg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Vanderplas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Cournapeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Brucher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Perrot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Duchesnay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2825--2830", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duches- nay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825- 2830.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Pennington, R. Socher, and C. D. Manning. 2014. Glove: Global vectors for word representation. In Pro- ceedings of the 2014 Conference on Empirical Meth- ods in Natural Language Processing, pages 1532- 1543, Doha, Qatar.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Statistical learning theory", |
|
"authors": [ |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V. Vapnik. 1998. Statistical learning theory. John Wiley.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF3": { |
|
"content": "<table><tr><td>Strict 2 stages</td><td colspan=\"2\">Train data Dev data 62.60% 58.50%</td><td>Tweet2016 54.83% (19/34)</td></tr><tr><td>SP1 (with SD1)</td><td>68.00%</td><td>64.70%</td><td>59.40% (7/34)</td></tr><tr><td>SP2 (with SD2)</td><td>60.80%</td><td>59.00%</td><td>57.50% (15/34)</td></tr><tr><td>ENS</td><td>68.40%</td><td>65.80%</td><td>60.52% (5/34)</td></tr></table>", |
|
"html": null, |
|
"text": "Rankings of our system", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Average F 1 scores of SP1, SP2, ENS (our ensemble) and a strict two-stage system.", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |