|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T09:43:00.063558Z" |
|
}, |
|
"title": "C-Net: Contextual Network for Sarcasm Detection", |
|
"authors": [ |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology (Indian School of Mines", |
|
"location": { |
|
"settlement": "Dhanbad", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Aman", |
|
"middle": [], |
|
"last": "Sinha", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology (Indian School of Mines", |
|
"location": { |
|
"settlement": "Dhanbad", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rohit", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology (Indian School of Mines", |
|
"location": { |
|
"settlement": "Dhanbad", |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Automatic Sarcasm Detection in conversations is a difficult and tricky task. Classifying an utterance as sarcastic or not in isolation can be futile since most of the time the sarcastic nature of a sentence heavily relies on its context. This paper presents our proposed model, C-Net, which takes contextual information of a sentence in a sequential manner to classify it as sarcastic or non-sarcastic. Our model showcases competitive performance in the Sarcasm Detection shared task organised on CodaLab and achieved 75.0% F1-score on the Twitter dataset and 66.3% F1-score on Reddit dataset.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Automatic Sarcasm Detection in conversations is a difficult and tricky task. Classifying an utterance as sarcastic or not in isolation can be futile since most of the time the sarcastic nature of a sentence heavily relies on its context. This paper presents our proposed model, C-Net, which takes contextual information of a sentence in a sequential manner to classify it as sarcastic or non-sarcastic. Our model showcases competitive performance in the Sarcasm Detection shared task organised on CodaLab and achieved 75.0% F1-score on the Twitter dataset and 66.3% F1-score on Reddit dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Sarcasm detection plays a crucial role in improving the effectiveness of chatbot systems. Sentiment classification systems can fail in the absence of a robust sarcasm detection system. A sarcastic sentence can express a negative sentiment even with the presence of positive or neutral sentiment words in that sentence. Hence, accurate detection of sarcasm can take an artificially intelligent agent closer to imitate human behaviour and enable it to better understand true intentions and emotions of humans (Joshi et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 507, |
|
"end": 527, |
|
"text": "(Joshi et al., 2018)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper represents work on the Sarcasm Detection shared task which is a part of the Second Workshop on Figurative Language Processing, colocated with ACL 2020. The shared task aims to investigate and understand how much conversation context is needed or helpful for Sarcasm Detection. Two datasets, one of Reddit and the other of Twitter, were provided for developing and testing multiple sarcasm detection systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we present our study on the effectiveness of contextual information to decide if an * Equal contribution. utterance is sarcastic or not. For this, the baseline models were first created using traditional machine learning algorithms like logistic regression, SVM etc. which were trained to classify utterances without considering their contextual information. Sequence models like vanilla RNN and LSTM were trained similarly. Then different types of word embeddings (ELMo and Glove) and sentence embedding (DeepMoji) to capture emotional states in the sentences were also experimented to detect incongruities within the text. The latest state-of-the-art transformer based models like BERT, XLNet and RoBERTa were also used for classifying sentences in isolation. Our investigations for creating systems which can use the context information effectively in a sequential manner led to the creation of our proposed model, which showed decent performances in both the test datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The evolution of various trends in Sarcasm detection research can be seen in (Joshi et al., 2017) . Sarcasm detection was initially performed using rule-based approaches. (Riloff et al., 2013) presented rule-based classifiers that look for a positive verb and a negative situation phrase in a sentence. (Maynard and Greenwood, 2014) proposed using hashtag sentiment as an indicator for sarcasm and (Liu et al., 2014) introduced POS sequences and semantic imbalance as features. Statistical featurebased approaches were also used for this task e.g. (Reyes and Rosso, 2012) introduced features related to ambiguity, unexpectedness, emotional scenario, etc. to better capture situational dependencies for the presence of sarcasm.", |
|
"cite_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 97, |
|
"text": "(Joshi et al., 2017)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 192, |
|
"text": "(Riloff et al., 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 332, |
|
"text": "(Maynard and Greenwood, 2014)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 416, |
|
"text": "(Liu et al., 2014)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 548, |
|
"end": 571, |
|
"text": "(Reyes and Rosso, 2012)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Machine learning algorithms were also used for sarcasm detection. Majority of work in sarcasm detection earlier relied on SVM ( (Joshi et al., 2015) ; (Tepperman et al., 2006) ; ). (Riloff et al., 2013) compared rule-based techniques with an SVM-based classifier. (Reyes et al., 2013) used Naive Bayes and decision trees for multiple pairs of labels among irony, humor, politics and education. (Bamman and Smith, 2015) used binary logistic regression for their work. The importance of context information was first presented in (Wallace et al., 2014) which described their annotation study where annotators repeatedly asked for context information to judge a text to be sarcastic or not. Many times they changed their previously given labels to a text after being shown the context behind it. (Rajadesingan et al., 2015) and (Bamman and Smith, 2015) tried to include the author context by analysing the author's past tweets and sentiments. To consider the conversational context, (Wang et al., 2015) and (Joshi et al., 2016 ) used a sequence labeling approach. (Wang et al., 2015 ) also tried to use the topical context of a text, since some topics are more likely to generate sarcasm as compared to other topics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 148, |
|
"text": "(Joshi et al., 2015)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 151, |
|
"end": 175, |
|
"text": "(Tepperman et al., 2006)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 181, |
|
"end": 202, |
|
"text": "(Riloff et al., 2013)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 264, |
|
"end": 284, |
|
"text": "(Reyes et al., 2013)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 394, |
|
"end": 418, |
|
"text": "(Bamman and Smith, 2015)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 528, |
|
"end": 550, |
|
"text": "(Wallace et al., 2014)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 793, |
|
"end": 820, |
|
"text": "(Rajadesingan et al., 2015)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 825, |
|
"end": 849, |
|
"text": "(Bamman and Smith, 2015)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 980, |
|
"end": 999, |
|
"text": "(Wang et al., 2015)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1004, |
|
"end": 1023, |
|
"text": "(Joshi et al., 2016", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1061, |
|
"end": 1079, |
|
"text": "(Wang et al., 2015", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Recent works in this domain include deep learning methods such as (Ghosh et al., 2018) included several types of Long Short-Term Memory (LSTM) networks that can model both, the conversation context and the response. (Hazarika et al., 2018) used CNNs to incorporate various contextual information. (Potamias et al., 2019) used pre-trained RoBERTa weights combined with an RCNN to capture contextual information to detect sarcasm.", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 86, |
|
"text": "(Ghosh et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 239, |
|
"text": "(Hazarika et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The dataset used for this study was provided in the shared task on Sarcasm Detection, organized at Codalab. It included two separate datasets, Twitter and Reddit, each of them having equal no. of sarcastic and non-sarcastic responses. For each response provided in the dataset, the conversation context consists of an ordered list of previous dialogues. Table 1 shows the size of the train and test sets of both the datasets. From Figure 1 , we can see that we have variable number of sentences in the context set of responses, ranging from 2 to 20. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 354, |
|
"end": 361, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 431, |
|
"end": 439, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We used three kinds of approaches to experiment in this task. First, methods that classified utterances in isolation were investigated. Then, approaches that considered partial conversation context for classifying texts were experimented. Finally, methods that can potentially utilise the complete conversation context information were looked into.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We experimented with traditional machine learning based approaches like logistic regression, Naive Bayes classifier, SVM etc. first for sarcasm detection by treating the response sentences in isolation. Sequential models like RNNs can easily model a sequential data hence, they are widely used in Natural Language Processing. Basic RNN and LSTM variants were also used in experiments for this task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline models", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We used Deepmoji (Felbo et al., 2017) in order to investigate the correlation between emotion and presence of sarcasm in sentences. ELMo (Peters et al., 2018) provides contextualized word representation where embeddings of each word is actually a function of the entire sentence containing that word. This may help in capturing local semantic incongruities within a sentence, which is an indicator of sarcasm. Recently introduced transformer models like BERT (Devlin et al., 2019) , XLNet (Yang et al., 2019) and RoBERTa (Liu et al., 2019) have given state-of-the-art results on various NLP tasks. Experiments were performed with these models to classify utterances as sarcastic or not-sarcastic. Figure 2 : C-Net Architecture: Here, 'n' is the maximum size of the context set. Model-1, 2, 3... n+1 are BERT (base-uncased) models which are trained separately on the response sentences, last sentence of context sets, second last sentence of context sets and so on till the first sentence of context sets respectively. Probability values generated by these n+1 models are used by the Fusion Layer to generate another probability value as output, which tells about the possibility of sarcasm presence in the response.", |
|
"cite_spans": [ |
|
{ |
|
"start": 17, |
|
"end": 37, |
|
"text": "(Felbo et al., 2017)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 137, |
|
"end": 158, |
|
"text": "(Peters et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 480, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 489, |
|
"end": 508, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 521, |
|
"end": 539, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 697, |
|
"end": 705, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pretrained Networks", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Sarcasm detection can be attributed to information like emotional state, the topic of the conversation, etc. which can be extracted from the conversation context of an utterance. Manually annotating a huge corpus of text data can be a tedious task. We propose our model Contextual-Network (C-Net) for Sarcasm Detection, which uses pseudo-labeling to provide labels for the context sentences by giving them the same label as the response sentence. This is followed by a fusion layer as shown in Figure 2 . Training in this way helps in including the contextual information in the model and therefore aid in detecting situations that may lead to the occurrence of a sarcastic sentence in the near future.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 494, |
|
"end": 502, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "C-Net", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "By using pseudo labels for training BERT on context sentences, we assigned a score to each context sentence. These scores told about the probability of the conversation leading to a sarcastic response, if that particular context sentence was present in the conversation. This helped in analysing how sarcasm generating situations build up during conversations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C-Net", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "For this model, we used Simple Exponential Smoothing (SES) in the fusion layer, which is a time series forecasting method for univariate data without a trend or seasonality. Forecasts produced using exponential smoothing methods are weighted averages of past observations, with the weights decaying exponentially as the observations get older.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C-Net", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The mathematical expression for Simple Exponential Smoothing (SES) is given by", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C-Net", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "y t+1 = \u03b1(y t + (1 \u2212 \u03b1)y t\u22121 + (1 \u2212 \u03b1) 2 y t\u22122 + ...)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C-Net", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "where \u03b1 \u2208 (0, 1) controls the rate at which the influence of the observations at previous time steps decays exponentially. Here y t\u22121 , y t\u22122 , and so on, are scores predicted by Model 2, Model 3 and so on till Model n+1 respectively. These scores are the probability of the response being sarcastic if these context sentences were present in the conversation anytime before the response. y t is the probability of response being sarcastic, predicted by Model 1. C-Net takes all these scores into consideration and gives the final output value y t+1 , as shown in Figure 2 . In this way the method is capable of handling the complete context set.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 564, |
|
"end": 572, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "C-Net", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Hence, generating probability values by giving pseudo-labels to context sentences and combining those values using simple exponential smoothing helps in making a more accurate prediction of sarcasm in conversation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "C-Net", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The training datasets were split into a 90% training set and 10% validation set. We used Fastai tokenizer for pre-processing the datasets and then applied various basic machine learning algorithms for sentence classification. We also used the torchtext library and spacy tokenizer to pre-process the dataset before using Vanilla RNN and bidirectional LSTM models. Both the models were trained for 10 epochs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods using response only", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We used transformer architectures available within the Huggingface's transformers library and trained them using Gradual Unfreezing and Slanted Triangular Learning Rates (Howard and Ruder, 2018 1e-4 and for other layers, it was 1e-5. The batch size used was 16 and the model was trained with half-precision settings on 16 GB GPU. Pre-trained torchMoji was used to generate 2304 dimensional sentence encoding for each response sentence. Using ELMo we obtained 1024 dimensional word vectors. The sentence vectors for each response was obtained by averaging the wordvectors. We also concatenated the ELMo representation of each response sentence with the DeepMoji representation of the same sentence to make the sentence representations richer. Then we applied logistic regression to classify the sentence representations obtained by the above-said approaches.", |
|
"cite_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 193, |
|
"text": "(Howard and Ruder, 2018", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods using response only", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We observe that two fixed context utterances are always available for each response in both the datasets. Thus, we create a C-Net with 3 models. Model 1 uses response, Model 2 uses the latest context and Model 3 uses the second latest context. For the training of each model, the output target is the label of response. Thus, we train each model in the lieu of detecting sarcasm in response.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods using fixed context set", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "As we see in Figure 2 , the fusion layer works on the probability values generated by the BERT (baseuncased) models to give an output. The fusion layer can be either a Logistic Regression or a Simple Exponential Smoothing model. Since the sequence of dialogues in a conversation matters in deciding the polarity or emotion of future dialogues, simple exponential smoothing was used to take advantage of the sequential nature of the dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 21, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Methods using fixed context set", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In order to include the complete context set for training, we used Timestamping to preserve the sequence of sentences. In this method, two bertbase-uncased models were trained separately on the response only set and all the context sentences. Also, for all the context sentences, a special marker was concatenated at the end which would make the model aware of the position of that sentence in a conversation. Output probabilities by these two models were used to get label for the response.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Method using complete context set", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "To reproduce the best results mentioned in this paper, C-Net with SES should be used, utilising the two latest context sentences associated with each response. The pre-trained BERT (base-uncased) model, provided in the transformers library by Huggingface (Wolf et al., 2019) , was trained similarly as mentioned in the implementation section using the fastai library (Howard and Gugger, 2020) . While fine-tuning BERT on response sentences, the learning rates used were ranging between 1e-5 to 1e-4. But while training on Context sentences, the learning rates used were ranging between 1e-6 to 1e-5. The optimum parameter \u03b1 for SES was found out from the training set with a grid search. In our experiments, it was found out that the value 0.395 for \u03b1 best fits the Twitter training data and 0.2 best fits the Reddit training data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 255, |
|
"end": 274, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 367, |
|
"end": 392, |
|
"text": "(Howard and Gugger, 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reproducibility", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "From Table 2 , it can be seen that the BERT classifier performed the best on Twitter dataset and RoBERTa model performed the best on the Reddit dataset when compared to all methods over response only set. Overall, the C-Net model gave the best results as compared to all the approaches on the twitter test dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 12, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The results in the case of C-Net for both the fusion methods (LR and SES) are better as compared to the results of the BERT classifier trained only on response in the twitter data. However, this is not true for the Reddit dataset. The BERT and RoBERTa model trained only on response sentences in the Reddit dataset performed better as compared to the C-Net approach. This is counterintuitive as per the theory that context information helps in sarcasm detection. However, it's possible that the Reddit response-only dataset contains many flags for sarcasm, which are also present in the large dataset the models were pre-trained with. Further pre-training of the models on the target Reddit and Twitter dataset may further improve the results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In this paper, we compared the performances of various approaches for the Sarcasm Detection task. We experimented with traditional machine learning based approaches, and the latest state-of-theart transformer architectures. The results obtained show that our proposed model, C-Net, has the potential to effectively use the conversation context of an utterance to capture the sarcastic nature of a conversation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "The variable number of context sentences for each response sentence makes it difficult to capture the long range dependency. Hence, as future work, approaches that can effectively deal with variable context set size can be investigated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Contextualized sarcasm detection on twitter", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Bamman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ICWSM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Bamman and Noah A. Smith. 2015. Contextual- ized sarcasm detection on twitter. In ICWSM.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Semi-supervised recognition of sarcastic sentences in twitter and amazon", |
|
"authors": [ |
|
{ |
|
"first": "Dmitry", |
|
"middle": [], |
|
"last": "Davidov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Tsur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Rappoport", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of the Fourteenth Conference on Computational Natural Language Learning, CoNLL '10", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "107--116", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dmitry Davidov, Oren Tsur, and Ari Rappoport. 2010. Semi-supervised recognition of sarcastic sentences in twitter and amazon. In Proceedings of the Four- teenth Conference on Computational Natural Lan- guage Learning, CoNLL '10, page 107-116, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. ArXiv, abs/1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Using millions of emoji occurrences to learn any-domain representations for detecting sentiment, emotion and sarcasm", |
|
"authors": [ |
|
{ |
|
"first": "Bjarke", |
|
"middle": [], |
|
"last": "Felbo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Mislove", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iyad", |
|
"middle": [], |
|
"last": "Rahwan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sune", |
|
"middle": [], |
|
"last": "Lehmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1615--1625", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1169" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bjarke Felbo, Alan Mislove, Anders S\u00f8gaard, Iyad Rahwan, and Sune Lehmann. 2017. Using millions of emoji occurrences to learn any-domain represen- tations for detecting sentiment, emotion and sarcasm. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 1615-1625, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Sarcasm analysis using conversation context", |
|
"authors": [ |
|
{ |
|
"first": "Debanjan", |
|
"middle": [], |
|
"last": "Ghosh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Fabbri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Smaranda", |
|
"middle": [], |
|
"last": "Muresan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Computational Linguistics", |
|
"volume": "44", |
|
"issue": "4", |
|
"pages": "755--792", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/coli_a_00336" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Debanjan Ghosh, Alexander R. Fabbri, and Smaranda Muresan. 2018. Sarcasm analysis using conversa- tion context. Computational Linguistics, 44(4):755- 792.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Cascade: Contextual sarcasm detection in online discussion forums", |
|
"authors": [ |
|
{ |
|
"first": "Devamanyu", |
|
"middle": [], |
|
"last": "Hazarika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sruthi", |
|
"middle": [], |
|
"last": "Gorantla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Zimmermann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Devamanyu Hazarika, Soujanya Poria, Sruthi Gorantla, Erik Cambria, Roger Zimmermann, and Rada Mihalcea. 2018. Cascade: Contextual sarcasm detection in online discussion forums. ArXiv, abs/1805.06413.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "2020. fastai: A layered api for deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Howard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Gugger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Information", |
|
"volume": "11", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy Howard and Sylvain Gugger. 2020. fastai: A layered api for deep learning. Information, 11:108.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Finetuned language models for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Howard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Fine- tuned language models for text classification. ArXiv, abs/1801.06146.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Automatic sarcasm detection: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ACM Comput. Surv", |
|
"volume": "", |
|
"issue": "5", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3124420" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Joshi, Pushpak Bhattacharyya, and Mark J. Car- man. 2017. Automatic sarcasm detection: A survey. ACM Comput. Surv., 50(5).", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Carman", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Investigations in Computational Sarcasm", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Joshi, Pushpak Bhattacharyya, and Mark J. Car- man. 2018. Investigations in Computational Sar- casm, 1st edition. Springer Publishing Company, In- corporated.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Harnessing context incongruity for sarcasm detection", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vinita", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "757--762", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P15-2124" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Joshi, Vinita Sharma, and Pushpak Bhat- tacharyya. 2015. Harnessing context incongruity for sarcasm detection. In Proceedings of the 53rd An- nual Meeting of the Association for Computational Linguistics and the 7th International Joint Confer- ence on Natural Language Processing (Volume 2: Short Papers), pages 757-762, Beijing, China. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Harnessing sequence labeling for sarcasm detection in dialogue from TV series 'Friends", |
|
"authors": [ |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vaibhav", |
|
"middle": [], |
|
"last": "Tripathi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Carman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "146--155", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K16-1015" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditya Joshi, Vaibhav Tripathi, Pushpak Bhat- tacharyya, and Mark J. Carman. 2016. Harnessing sequence labeling for sarcasm detection in dialogue from TV series 'Friends'. In Proceedings of The 20th SIGNLL Conference on Computational Natu- ral Language Learning, pages 146-155, Berlin, Ger- many. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Lexical influences on the perception of sarcasm", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Roger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gina", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Kreuz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Caucci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the Workshop on Computational Approaches to Figurative Language, FigLanguages '07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roger J. Kreuz and Gina M. Caucci. 2007. Lexical influences on the perception of sarcasm. In Proceed- ings of the Workshop on Computational Approaches to Figurative Language, FigLanguages '07, page 1-4, USA. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Sarcasm detection in social media based on imbalanced classification", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gaoyan", |
|
"middle": [], |
|
"last": "Ou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tengjiao", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongqing", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Lei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Web-Age Information Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "459--471", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Liu, Wei Chen, Gaoyan Ou, Tengjiao Wang, Dongqing Yang, and Kai Lei. 2014. Sarcasm detec- tion in social media based on imbalanced classifica- tion. In Web-Age Information Management, pages 459-471, Cham. Springer International Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. ArXiv, abs/1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Who cares about sarcastic tweets? investigating the impact of sarcasm on sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Maynard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Greenwood", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4238--4243", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diana Maynard and Mark Greenwood. 2014. Who cares about sarcastic tweets? investigating the im- pact of sarcasm on sentiment analysis. In Proceed- ings of the Ninth International Conference on Lan- guage Resources and Evaluation (LREC'14), pages 4238-4243, Reykjavik, Iceland. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2227--2237", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1202" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A transformer-based approach to irony and sarcasm detection", |
|
"authors": [ |
|
{ |
|
"first": "Georgios", |
|
"middle": [], |
|
"last": "Rolandos Alexandros Potamias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Siolas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stafylopatis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rolandos Alexandros Potamias, Georgios Siolas, and A. Stafylopatis. 2019. A transformer-based ap- proach to irony and sarcasm detection. ArXiv, abs/1911.10401.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Sarcasm detection on twitter: A behavioral modeling approach", |
|
"authors": [ |
|
{ |
|
"first": "Ashwin", |
|
"middle": [], |
|
"last": "Rajadesingan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reza", |
|
"middle": [], |
|
"last": "Zafarani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Eighth ACM International Conference on Web Search and Data Mining, WSDM '15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "97--106", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2684822.2685316" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashwin Rajadesingan, Reza Zafarani, and Huan Liu. 2015. Sarcasm detection on twitter: A behavioral modeling approach. In Proceedings of the Eighth ACM International Conference on Web Search and Data Mining, WSDM '15, page 97-106, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Making objective decisions from subjective data: Detecting irony in customer reviews. Decision Support Systems", |
|
"authors": [ |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Reyes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "53", |
|
"issue": "", |
|
"pages": "754--760", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.dss.2012.05.027" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonio Reyes and Paolo Rosso. 2012. Making ob- jective decisions from subjective data: Detecting irony in customer reviews. Decision Support Sys- tems, 53:754-760.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A multidimensional approach for detecting irony in twitter", |
|
"authors": [ |
|
{ |
|
"first": "Antonio", |
|
"middle": [], |
|
"last": "Reyes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tony", |
|
"middle": [], |
|
"last": "Veale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Lang. Resour. Eval", |
|
"volume": "47", |
|
"issue": "1", |
|
"pages": "239--268", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s10579-012-9196-x" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antonio Reyes, Paolo Rosso, and Tony Veale. 2013. A multidimensional approach for detecting irony in twitter. Lang. Resour. Eval., 47(1):239-268.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Sarcasm as contrast between a positive sentiment and negative situation", |
|
"authors": [ |
|
{ |
|
"first": "Ellen", |
|
"middle": [], |
|
"last": "Riloff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashequl", |
|
"middle": [], |
|
"last": "Qadir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prafulla", |
|
"middle": [], |
|
"last": "Surve", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lalindra De", |
|
"middle": [], |
|
"last": "Silva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Gilbert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruihong", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "704--714", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ellen Riloff, Ashequl Qadir, Prafulla Surve, Lalindra De Silva, Nathan Gilbert, and Ruihong Huang. 2013. Sarcasm as contrast between a positive sentiment and negative situation. In Proceedings of the 2013 Conference on Empirical Methods in Natural Lan- guage Processing, pages 704-714, Seattle, Washing- ton, USA. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Yeah right: Sarcasm recognition for spoken dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Tepperman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Traum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shrikanth", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph Tepperman, David Traum, and Shrikanth Narayanan. 2006. Yeah right: Sarcasm recognition for spoken dialogue systems.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Icwsm -a great catchy name: Semi-supervised recognition of sarcastic sentences in online product reviews", |
|
"authors": [ |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Tsur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitry", |
|
"middle": [], |
|
"last": "Davidov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Rappoport", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "ICWSM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oren Tsur, Dmitry Davidov, and Ari Rappoport. 2010. Icwsm -a great catchy name: Semi-supervised recognition of sarcastic sentences in online product reviews. In ICWSM.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Humans require context to infer ironic intent", |
|
"authors": [ |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Do Kook Choe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Kertz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Charniak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/P14-2084" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Byron C. Wallace, Do Kook Choe, Laura Kertz, and Eugene Charniak. 2014. Humans require context to infer ironic intent (so computers probably do, too).", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "512--516", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 512-516, Baltimore, Mary- land. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Twitter sarcasm detection exploiting a context-based model", |
|
"authors": [ |
|
{ |
|
"first": "Zelin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhijian", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruimin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yafeng", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings, Part I, of the 16th International Conference on Web Information Systems Engineering -WISE 2015", |
|
"volume": "9418", |
|
"issue": "", |
|
"pages": "77--91", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-319-26190-4_6" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zelin Wang, Zhijian Wu, Ruimin Wang, and Yafeng Ren. 2015. Twitter sarcasm detection exploiting a context-based model. In Proceedings, Part I, of the 16th International Conference on Web Information Systems Engineering -WISE 2015 -Volume 9418, page 77-91, Berlin, Heidelberg. Springer-Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Huggingface's transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R'emi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Brew", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R'emi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. Huggingface's trans- formers: State-of-the-art natural language process- ing. ArXiv, abs/1910.03771.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NeurIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime G. Car- bonell, Ruslan Salakhutdinov, and Quoc V. Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In NeurIPS.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Context set size distribution. The x-axis shows the size of context sets in both the training datasets. The y-axis shows the percentage of data containing that much context size.", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "Dataset statistics.", |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"text": "). The learning rate for the last layer was", |
|
"content": "<table><tr><td>Method</td><td>Twitter Reddit</td></tr><tr><td>Response Only Set</td><td/></tr><tr><td>Logistic Regression</td><td>0.685 0.622</td></tr><tr><td>Naive Bayes</td><td>0.673 0.626</td></tr><tr><td>SGD Classifier</td><td>0.668 0.626</td></tr><tr><td>XGBoost</td><td>0.672 0.617</td></tr><tr><td>SVM</td><td>0.632 0.334</td></tr><tr><td>Vanilla RNN</td><td>0.478 0.463</td></tr><tr><td>Bi-LSTM</td><td>0.497 0.481</td></tr><tr><td>DeepMoji</td><td>0.679 0.633</td></tr><tr><td>ELMo</td><td>0.684 0.544</td></tr><tr><td>ELMo+DeepMoji</td><td>0.681 0.518</td></tr><tr><td>XLNet (base-cased)</td><td>0.712 0.598</td></tr><tr><td>BERT (base-uncased)</td><td>0.733 0.671</td></tr><tr><td>RoBERTa (base)</td><td>0.680 0.678</td></tr><tr><td>Fixed Context Set</td><td/></tr><tr><td>C-Net+LR</td><td>0.747 0.650</td></tr><tr><td>C-Net+SES</td><td>0.750 0.663</td></tr><tr><td>Complete Context Set</td><td/></tr><tr><td>Time-stamping</td><td>0.710 0.500</td></tr></table>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "", |
|
"content": "<table/>", |
|
"html": null, |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |