|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T09:42:57.592619Z" |
|
}, |
|
"title": "Applying Transformers and Aspect-based Sentiment Analysis approaches on Sarcasm Detection", |
|
"authors": [ |
|
{ |
|
"first": "Taha", |
|
"middle": [], |
|
"last": "Shangipour", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Iran University of Science and Technology Tehran", |
|
"location": { |
|
"country": "Iran" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Soroush", |
|
"middle": [], |
|
"last": "Javdan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Iran University of Science and Technology Tehran", |
|
"location": { |
|
"country": "Iran" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Behrouz", |
|
"middle": [], |
|
"last": "Minaei-Bidgoli", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Iran University of Science and Technology Tehran", |
|
"location": { |
|
"country": "Iran" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Sarcasm is a type of figurative language broadly adopted in social media and daily conversations. The sarcasm can ultimately alter the meaning of the sentence, which makes the opinion analysis process error-prone. In this paper, we propose to employ bidirectional encoder representations transformers (BERT), and aspect-based sentiment analysis approaches in order to extract the relation between context dialogue sequence and response and determine whether or not the response is sarcastic. The best performing method of ours obtains an F1 score of 0.73 on the Twitter dataset and 0.734 over the Reddit dataset at the second workshop on figurative language processing Shared Task 2020.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Sarcasm is a type of figurative language broadly adopted in social media and daily conversations. The sarcasm can ultimately alter the meaning of the sentence, which makes the opinion analysis process error-prone. In this paper, we propose to employ bidirectional encoder representations transformers (BERT), and aspect-based sentiment analysis approaches in order to extract the relation between context dialogue sequence and response and determine whether or not the response is sarcastic. The best performing method of ours obtains an F1 score of 0.73 on the Twitter dataset and 0.734 over the Reddit dataset at the second workshop on figurative language processing Shared Task 2020.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "We are living in the age of social media. Many consider it as a revolution. Social media creates a variety of new possibilities; for instance, today, people can express their thought with just a tap of a finger. In the twitter platform, people are twitting around 500 million tweets per day, and it is estimated that over 2.8 million comments are posted on the Reddit every single day. This vast amount of data present an enormous opportunity for businesses, and researchers alogn with a significant number of challenges. Many companies and researchers have been interested in these data to investigate the opinion, emotions, and other aspects of them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The usage of informal language and noisy content within social media presents many difficulties toward the opinion and emotion analysis problems. One of the main challenges in this criteria is the appearance of figurative language such as sarcasm. The sarcasm can alter the meaning of the sentence ultimately, and consequently, make the opinion analysis process error-prone. For instance, criticism may use positive words to convey a negative message. In recent years there was a growing trend to address the Sarcasm Detection problem among Natural Language Processing (NLP) researchers. Many approaches tackle the Sarcasm Detection problem by considering contextual information, instead of using utterance solely. For instance, Bamman and Smith (2015) utilized author context along with the environment and audience context, and Mishra et al. (2016) used cognitive features, Ghosh et al. (2018) made use of conversational context. The Sarcasm Detection shared task 1 is aimed to detect sarcasm based on the conversation context. Given the current utterance and conversation history, the models are expected to decide if the utterance is sarcastic or not. We test our models on the dataset from both Twitter and Reddit. Both utterance and the conversation history have been used as input. We applied the transformer-based model and adopted aspect-based sentiment analysis approaches to address the problem.", |
|
"cite_spans": [ |
|
{ |
|
"start": 830, |
|
"end": 850, |
|
"text": "Mishra et al. (2016)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 876, |
|
"end": 895, |
|
"text": "Ghosh et al. (2018)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The remnant of this paper is organized as follows: Section 2 reviews related work. Section 3 describes the datasets. Section 4 explains our methodology. Section 5 shows the results of each dataset in detail. Lastly, section 6 provides conclusions and our plans for future research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There have been several attempts to solve the Sarcasm Detection problem with rule-based approaches. Bharti et al. (2015) presented two rulebased classifiers for two different types of tweet structure, the first one used to detect sarcasm in tweets that have a contradiction between negative sentiment and positive situation. The second classifier applied to tweets that start with interjection words. The former classifier applied parsed-based lexicon generation to identify phrases that display sentiment, and indicate the sarcastic label whenever a negative phrase occurs in a positive sentence. The latest classifier used interjections and intensifiers that occur together in order to predict sarcasm. Maynard and Greenwood (2014) suggests that hashtag sentiment is an essential symbol of sarcasm, and authors often used hashtags to emphasize sarcasm. They propose the tweet is sarcastic whenever the hashtags' sentiments do not agree with the rest of the tweet.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Besides the requirement for in-depth knowledge of the domain and much manual work, rule-based methods are usually not the best performers in terms of prediction quality. Because of the high cost of the rule-based methods, many researchers put there focus on machine learning approaches. Different types of models and features have been adopted to tackle this problem. Mukherjee and Bala (2017) addressed the problem in both supervised and unsupervised settings. They utilized Na\u00efve Bayes as a classifier and C-means clustering, which is one of the most widely used fuzzy clustering algorithms. Joshi et al. (2016) adopted a sequence labeling techniques (SVM-HMM and SEARN) and indicated that sequence labeling algorithms outperform the classification algorithms in conversational data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 368, |
|
"end": 393, |
|
"text": "Mukherjee and Bala (2017)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "With the development of computational hardware and deep learning in recent years, many deep learning methods have been proposed to address the sarcasm detection problem. Amir et al. (2016) proposed a Convolutional Neural Network-based architecture that jointly learns and exploits embeddings for the users' content and utterances. Ghosh and Veale (2016) used the complication of the Convolutional Neural Network and Recurrent Neural Network. They used two layers of Convolutional Neural Network, followed by two layers of Long Short-Term Memory(LSTM). The output of LSTM layers fed to a Fully Connected Neural Network in order to produce a higher-order feature set. Diao et al. (2020) proposed a novel multi-dimension question answering network in order to detect sarcasm. They utilized conversation context information. A deep memory network based on BiLSTM and attention mechanisms have been adopted to extract the factors of sarcasm.", |
|
"cite_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 188, |
|
"text": "Amir et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 331, |
|
"end": 353, |
|
"text": "Ghosh and Veale (2016)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 666, |
|
"end": 684, |
|
"text": "Diao et al. (2020)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Two corpora were used in Sarcasm Detection shared task, which both of them are balanced. The Twitter corpus consists of 5000 data samples for the train and 1800 for the test set. On the other hand, Reddit corpus contains 4400 data samples for the train and 1800 for the test set. Training datasets have four columns:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 ID: a unique identifier for each data sample Moreover, we used a balanced dataset proposed at (Khodak et al., 2017) with 1 million data samples over Reddit comments as an additional dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 96, |
|
"end": 117, |
|
"text": "(Khodak et al., 2017)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this section we describe models and technquies that we used to address sarcasm detection problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Hashtag segmentation: the hashtag is a type of metadata used on social media starting with a number sign, #, which helps users find messages with the same topic. We apply word segmentation on hashtags, for example '#BlackHistoryMonth' is segmented as 'Black History Month.'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Misc.: We removed all of the @USER mentions and <URL> tags.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preprocessing", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "GloVe (Pennington et al., 2014) is an unsupervised method for extracting word vector representation for our raw data. We also employed Fasttext (Mikolov et al., 2018) embedding because they are derived from character n-gram and thus are useful for misspelled words and social media contents.", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 31, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 144, |
|
"end": 166, |
|
"text": "(Mikolov et al., 2018)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Embedding:", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "NBSVM: We used the NBSVM model introduced by Wang and Manning (2012) , which is a combination of Na\u00efve Bayes and support vector machine, and is known as an excellent baseline for many NLP tasks. As input, we utilized the TF-IDF matrix with character n-gram features with n-gram range from 2 to 6. We applied this method over both datasets. Also, we tried different input data for the NBSVM model. The different combinations of 'response' column, 'context' column have been used as input.", |
|
"cite_spans": [ |
|
{ |
|
"start": 45, |
|
"end": 68, |
|
"text": "Wang and Manning (2012)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models:", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "BERT: Bidirectional Encoder Representation from Transformer (BERT) (Devlin et al., 2018) was released by the Google research team and achieved state of the art in many NLP tasks. BERT is pretrained on a huge corpus of data sources. As input, we experiment with the 'response' column solely, 'context' column solely, and the concatenation of 'context' and 'response' column. We trained the model with three epochs, a batch size of 8, and a learning rate of 2e-5. For maximum sequence length, the 128 yield best result.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 88, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models:", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We used logits from the final layer of BERT as input for a support vector machine model with a linear kernel. We trained the model with three epochs, a batch size of 8, and a learning rate of 2e-5. For maximum sequence length, the 128 yield best result.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT-SVM:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used logits from the final layer of BERT as input for a logistic regression model. We trained the model with three epochs, a batch size of 8, and a learning rate of 2e-5. For maximum sequence length, the 128 yield best result.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT-LR:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "XLNET: XLNet ) is a generalized autoregressive pretraining method. Since it outperforms BERT on 20 different NLP tasks, we train this method over the Reddit dataset. We trained the model with three epochs, a batch size of 8, and a learning rate of 2e-5. For maximum sequence length, the 128 yield best result.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT-LR:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Bi-GRU-CNN+BiLSTM-CNN: CNN is suitable for detecting patterns, and by changing kernel sizes, it also can detect different patterns regardless of their positions. RNN is a sequence of network blocks linked to each other, and each of them passes a message to the next one, this feature enables the network to demonstrate dynamic temporal behavior for a time sequence. We employ a neural network architecture built on top of a concatenation of glove embedding and the fastText embedding, both of them with 300 dimensions. Then, the network splits into two parallel parts. The first part combines a bidirectional gated recurrent unit (GRU) with 128 hidden units and a convolutional layer with a kernel size of 2 and 64 hidden units. The second part combines a BiLSTM with 128 hidden units and a convolutional layer with a kernel size of 2 and 64 hidden units. Finally, we concatenate global max pooling and global average pooling of parallel parts and feed them to a dense layer and then through a softmax layer for the classification purpose.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT-LR:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "After reviewing some aspect-based sentiment analysis methods, we found some similarities between these models and sarcasm detection problems, so we attempted to change aspect-based sentiment analysis and adapt it sufficiently to address the Sarcasm Detection problem. For all the following models, the number of training epochs has been set to 10.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT-LR:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "IAN: IAN (Ma et al., 2017) has two attention layers that learn context and target interactively and make representation for both separately. We replace the context of the ABSA with the last dialogue in the 'context' column of the sarcasm datasets and target with the 'response' column. We utilized 300 hidden units for both LSTM and attention parts. We run this method on both datasets. LCF-BERT: LCF-BERT (Zeng et al., 2019 ) is a method based on multi-head self-attention, it employs context features dynamic mask (CDM) and context features dynamic weighted (CDW) layers along with a BERT-shared layer to extract longterm internal dependencies of local context and global context in aspect-based sentiment classification problem. We alter the model input so it can perform on the sarcastic dataset. As input, we used 'response' and the last dialogue in the 'context' column. The BERT-base-uncased with a maximum sequence length of 80 has been used as a BERT-shared layer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 26, |
|
"text": "(Ma et al., 2017)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 406, |
|
"end": 424, |
|
"text": "(Zeng et al., 2019", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT-LR:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BERT-AEN: AEN-BERT (Song et al., 2019) is another method proposed for the aspect-based sentiment classification that we borrowed for this task. This method introduces an attentional encoder network as a solution for the RNN problem with longterm pattern recognition. It also applies a BERTbase-uncased pre-trained model with a maximum sequence length of 80. We also used hidden units of 300 for the attention part. We modify the model input so it can work on the sarcastic dataset. As input, we used 'response' and the last dialogue in the 'context' column.", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 38, |
|
"text": "(Song et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BERT-LR:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "On twitter corpus, the performance of NBSVM as a simple model is quite impressive. As long as the data is from social media and might contain informal and misspelling content using character n-gram TFIDT matrix can yield excellent performance. As features, we used different combinations of 'response' column, 'context' column. However, taking the 'response' column as a feature solely produced the best result. We also test models with and without preprocessing steps. However, adding the preprocessing step did not show a significate change in the results. As expected, BERT achieved the second-best position on the scoreboard. We used a different set of features, but again using the 'response' column solely scored the best among others. Furthermore, LCF-BERT, which is an aspect-based sentiment classification method, scored the best on the Twitter dataset because aspect-based sentiment classification methods consider input data as two different sections and try to learn them interactively. The complete results with more details are shown in Table. 1. Unlike our experience on the Twitter dataset, NBSVM did not perform well on the Reddit dataset. It appears that the Reddit dataset is more complicated and challenging than twitter. However, using an additional dataset, around 1 million data points, boosted the NBSVM result around 7 percent. For NBSVM, we used the same feature as it was used for the twitter dataset. BERT performance was the best on this dataset, regardless of additional data. For BERT-SVM and BERT-LR, we only utilized the 'response' column as input. Moreover, for XL-Net, we used the 'response' column with 100,000 random data points from the additional dataset. Furthermore, for the aspect-based sentiment analysis models, we used the last dialogue 'context' column and 'response' column as our input. The complete results with more details are shown in Table. 2. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1051, |
|
"end": 1057, |
|
"text": "Table.", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1887, |
|
"end": 1893, |
|
"text": "Table.", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Our proposed methods ranked 5 out of 37 groups for the Reddit dataset and ranked 25 out of 36 for the Twitter dataset. This result shows the strength of the BERT pre-trained model on sarcasm detection and its combination with aspect-based sentiment analysis models, which take data as two separate parts and learn them interactively. Also, additional data can improve performance slightly better. It is noteworthy to mention that NBSVM performance as a simple baseline with the TFIDF matrix with character n-gram was quite impressive. For future work, a combination of contextual and character-based embedding could lead to better performance. Moreover, since social media content usually contains misspelling and informal data, more complicated preprocessing techniques like social media content normalization might be more helpful than proposed techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://competitions.codalab.org/competitions/22247", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Modelling context with user embeddings for sarcasm detection in social media", |
|
"authors": [ |
|
{ |
|
"first": "Silvio", |
|
"middle": [], |
|
"last": "Amir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Byron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paula Carvalho M\u00e1rio J", |
|
"middle": [], |
|
"last": "Lyu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Silva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1607.00976" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Silvio Amir, Byron C Wallace, Hao Lyu, and Paula Car- valho M\u00e1rio J Silva. 2016. Modelling context with user embeddings for sarcasm detection in social me- dia. arXiv preprint arXiv:1607.00976.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Contextualized sarcasm detection on twitter", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Bamman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Noah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Ninth International AAAI Conference on Web and Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Bamman and Noah A Smith. 2015. Contextual- ized sarcasm detection on twitter. In Ninth Interna- tional AAAI Conference on Web and Social Media.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Parsing-based sarcasm sentiment recognition in twitter data", |
|
"authors": [ |
|
{ |
|
"first": "Korra", |
|
"middle": [], |
|
"last": "Santosh Kumar Bharti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjay", |
|
"middle": [], |
|
"last": "Sathya Babu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jena", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "2015 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1373--1380", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Santosh Kumar Bharti, Korra Sathya Babu, and San- jay Kumar Jena. 2015. Parsing-based sarcasm senti- ment recognition in twitter data. In 2015 IEEE/ACM International Conference on Advances in Social Net- works Analysis and Mining (ASONAM), pages 1373- 1380. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A multidimension question answering network for sarcasm detection", |
|
"authors": [ |
|
{ |
|
"first": "Yufeng", |
|
"middle": [], |
|
"last": "Diao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongfei", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaochao", |
|
"middle": [], |
|
"last": "Fan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghe", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kan", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yufeng Diao, Hongfei Lin, Liang Yang, Xiaochao Fan, Yonghe Chu, Kan Xu, and Di Wu. 2020. A multi- dimension question answering network for sarcasm detection. IEEE Access.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Fracking sarcasm using neural network", |
|
"authors": [ |
|
{ |
|
"first": "Aniruddha", |
|
"middle": [], |
|
"last": "Ghosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tony", |
|
"middle": [], |
|
"last": "Veale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 7th workshop on computational approaches to subjectivity, sentiment and social media analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "161--169", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aniruddha Ghosh and Tony Veale. 2016. Fracking sar- casm using neural network. In Proceedings of the 7th workshop on computational approaches to sub- jectivity, sentiment and social media analysis, pages 161-169.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Sarcasm analysis using conversation context", |
|
"authors": [ |
|
{ |
|
"first": "Debanjan", |
|
"middle": [], |
|
"last": "Ghosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Smaranda", |
|
"middle": [], |
|
"last": "Alexander R Fabbri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Muresan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Computational Linguistics", |
|
"volume": "44", |
|
"issue": "4", |
|
"pages": "755--792", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Debanjan Ghosh, Alexander R Fabbri, and Smaranda Muresan. 2018. Sarcasm analysis using conversa- tion context. Computational Linguistics, 44(4):755- 792.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Harnessing sequence labeling for sarcasm detection in dialogue from tv series 'friends", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vaibhav", |
|
"middle": [], |
|
"last": "Tripathi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Carman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "146--155", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Joshi, Vaibhav Tripathi, Pushpak Bhat- tacharyya, and Mark Carman. 2016. Harnessing se- quence labeling for sarcasm detection in dialogue from tv series 'friends'. In Proceedings of The 20th SIGNLL Conference on Computational Natural Lan- guage Learning, pages 146-155.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A large self-annotated corpus for sarcasm", |
|
"authors": [ |
|
{ |
|
"first": "Mikhail", |
|
"middle": [], |
|
"last": "Khodak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikunj", |
|
"middle": [], |
|
"last": "Saunshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiran", |
|
"middle": [], |
|
"last": "Vodrahalli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.05579" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikhail Khodak, Nikunj Saunshi, and Kiran Vodrahalli. 2017. A large self-annotated corpus for sarcasm. arXiv preprint arXiv:1704.05579.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Interactive attention networks for aspect-level sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Dehong", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1709.00893" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dehong Ma, Sujian Li, Xiaodong Zhang, and Houfeng Wang. 2017. Interactive attention networks for aspect-level sentiment classification. arXiv preprint arXiv:1709.00893.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Who cares about sarcastic tweets? investigating the impact of sarcasm on sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Diana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Maynard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Mark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Greenwood", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "LREC 2014 Proceedings. ELRA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diana G Maynard and Mark A Greenwood. 2014. Who cares about sarcastic tweets? investigating the im- pact of sarcasm on sentiment analysis. In LREC 2014 Proceedings. ELRA.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Advances in pre-training distributed word representations", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Puhrsch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Edouard Grave, Piotr Bojanowski, Christian Puhrsch, and Armand Joulin. 2018. Ad- vances in pre-training distributed word representa- tions. In Proceedings of the International Confer- ence on Language Resources and Evaluation (LREC 2018).", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Harnessing cognitive features for sarcasm detection", |
|
"authors": [ |
|
{ |
|
"first": "Abhijit", |
|
"middle": [], |
|
"last": "Mishra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diptesh", |
|
"middle": [], |
|
"last": "Kanojia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seema", |
|
"middle": [], |
|
"last": "Nagar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuntal", |
|
"middle": [], |
|
"last": "Dey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1095--1104", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P16-1104" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhijit Mishra, Diptesh Kanojia, Seema Nagar, Kuntal Dey, and Pushpak Bhattacharyya. 2016. Harnessing cognitive features for sarcasm detection. In Proceed- ings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 1095-1104, Berlin, Germany. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Sarcasm detection in microblogs using na\u00efve bayes and fuzzy clustering", |
|
"authors": [ |
|
{ |
|
"first": "Shubhadeep", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pradip", |
|
"middle": [], |
|
"last": "Kumar Bala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Technology in Society", |
|
"volume": "48", |
|
"issue": "", |
|
"pages": "19--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shubhadeep Mukherjee and Pradip Kumar Bala. 2017. Sarcasm detection in microblogs using na\u00efve bayes and fuzzy clustering. Technology in Society, 48:19- 27.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of the 2014 conference on empirical methods in natural language process- ing (EMNLP), pages 1532-1543.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Attentional encoder network for targeted sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Youwei", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiahai", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyue", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanghui", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1902.09314" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Youwei Song, Jiahai Wang, Tao Jiang, Zhiyue Liu, and Yanghui Rao. 2019. Attentional encoder network for targeted sentiment classification. arXiv preprint arXiv:1902.09314.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Baselines and bigrams: Simple, good sentiment and topic classification", |
|
"authors": [ |
|
{ |
|
"first": "Sida", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "90--94", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sida Wang and Christopher Manning. 2012. Baselines and bigrams: Simple, good sentiment and topic clas- sification. In Proceedings of the 50th Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers), pages 90-94, Jeju Island, Korea. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5754--5764", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural in- formation processing systems, pages 5754-5764.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Lcf: A local context focus mechanism for aspect-based sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Biqing", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruyang", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wu", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuli", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Applied Sciences", |
|
"volume": "9", |
|
"issue": "16", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Biqing Zeng, Heng Yang, Ruyang Xu, Wu Zhou, and Xuli Han. 2019. Lcf: A local context focus mecha- nism for aspect-based sentiment classification. Ap- plied Sciences, 9(16):3389.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Context: an ordered list of dialogues \u2022 Response: reply to the last post or tweet of Context dialogues \u2022 Label: indicate wheter the responce is sarcastic or not Figure 1 shows the distribution of dialogue turns in each dataset.", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "distribution of dialogues turns.", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Models performance over Reddit dataset" |
|
} |
|
} |
|
} |
|
} |