|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T02:10:40.995164Z" |
|
}, |
|
"title": "Automatically Predicting Judgement Dimensions of Human Behaviour", |
|
"authors": [ |
|
{ |
|
"first": "Segun", |
|
"middle": [], |
|
"last": "Taofeek", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Instituto Polit\u00e9cnico Nacional Mexico City", |
|
"location": { |
|
"country": "Mexico" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Gelbukh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Instituto Polit\u00e9cnico Nacional Mexico City", |
|
"location": { |
|
"country": "Mexico" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Instituto", |
|
"middle": [ |
|
"Polit\u00e9cnico" |
|
], |
|
"last": "Nacional", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes our submission to the ALTA-2020 shared task on assessing behaviour from short text, We evaluate the effectiveness of traditional machine learning and recent transformers pre-trained models. Our submission with the Roberta-large model and prediction threshold achieved first place on the private leaderboard.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes our submission to the ALTA-2020 shared task on assessing behaviour from short text, We evaluate the effectiveness of traditional machine learning and recent transformers pre-trained models. Our submission with the Roberta-large model and prediction threshold achieved first place on the private leaderboard.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Language enables us to express evaluation of people, action, event, and things. This manifests as emotion and assessment of human behaviour and artefacts. The study of evaluative language has benefited from efforts in several disciplines such as linguistics, philosophy, psychology, cognitive science and computer science (Benamara et al., 2017) . In linguistics, the appraisal framework of Martin and White (2003) provides a detailed classification scheme for understanding how evaluation is expressed and implied in language. In computer science, affective computing study evaluative language under the umbrella term of sentiment analysis with common tasks involving detection and classification of polarity and emotion, and aspectbased sentiment analysis, among others. Sentiment analysis has benefited from the availability of usergenerated content on online platforms.", |
|
"cite_spans": [ |
|
{ |
|
"start": 322, |
|
"end": 345, |
|
"text": "(Benamara et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 414, |
|
"text": "Martin and White (2003)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The theory of appraisal proposed by Martin and White (2003) has three categories of evaluative text: affect, judgement, and appreciation. These categories respectively model opinions in terms of emotions, norms, and aesthetics. Utterances are viewed as indicating positive (\"praising\") or negative (\"blaming\") disposition towards some object (person, thing, action, situation, or event) . The judgement dimensions are normality, capacity, tenacity, veracity, and propriety. Each of the dimensions represents an answer to the following corresponding questions: The corpus used in this paper is annotated with the above judgement dimensions. Taboada and Grieve (2004) automatically categorized appraisal into affect, judgement, and appreciation using a lexical approach that groups adjectives according to their semantic orientation. Benamara et al. (2017) surveyed linguistic and computational approaches to the study of evaluative text. Their analysis suggested that appraisal is a richer and more detailed task amenable to computational approaches subject to availability of data. They envision that appraisal analysis can contribute to the advances in affective computing. Recently, Hofmann et al. (2020) showed that dimensions of appraisal can improve emotion detection in text. A similar observation was made by Whitelaw et al. (2005) who found appraisal phrases as useful features for sentiment analysis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 59, |
|
"text": "Martin and White (2003)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 386, |
|
"text": "(person, thing, action, situation, or event)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 640, |
|
"end": 665, |
|
"text": "Taboada and Grieve (2004)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 832, |
|
"end": 854, |
|
"text": "Benamara et al. (2017)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1185, |
|
"end": 1206, |
|
"text": "Hofmann et al. (2020)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1316, |
|
"end": 1338, |
|
"text": "Whitelaw et al. (2005)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper investigates the capabilities of machine learning models in predicting the dimensions of human judgement expressed in short texts (tweets) as part of the ALTA-2020 shared task on assessing human behaviour (Moll\u00e1, 2020) . The task aims to advance computational techniques for analysing evaluative language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 216, |
|
"end": 229, |
|
"text": "(Moll\u00e1, 2020)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The use of neural networks has lead to significant performance improvements in NLP tasks. However, neural networks require a large amount of labeled data. On the contrary, the traditional machine learning models such as NBSVM are competitive in lowdata regimes (Wang and Manning, 2012; Aroyehun Label Normality Capacity Tenacity Veracity Propriety Proportion 0.11 0.16 0.11 0.015 0.18 Table 1 : Frequency of each label in the training set as a fraction of the total number of examples.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 385, |
|
"end": 392, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "and Gelbukh, 2018). The recently introduced contextual representation learning models (Peters et al., 2018; Devlin et al., 2019) are pre-trained with language modeling objective on a large and diverse collection of text. The learned representation can be transferred to downstream tasks via fine tuning (Howard and Ruder, 2018) . We examine the effectiveness of using NBSVM and fine tuning a Roberta-large model (Liu et al., 2019) for predicting dimensions of judgement expressed in short text.", |
|
"cite_spans": [ |
|
{ |
|
"start": 86, |
|
"end": 107, |
|
"text": "(Peters et al., 2018;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 108, |
|
"end": 128, |
|
"text": "Devlin et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 303, |
|
"end": 327, |
|
"text": "(Howard and Ruder, 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 412, |
|
"end": 430, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Task. Given a short text predict one or more judgement dimensions expressed in the given text. This is a multilabel classification problem where the labels consist of the five judgement dimensions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Data. We employed the data provided by the organizers of the ALTA-2020 shared task (Moll\u00e1, 2020) . The training set has 198 tweets. Each example is annotated with the presence or absence of each of the judgement dimensions as outlined in Section 1. Table 1 shows the proportion of each label in the training set. The proportion ranges from 2% to 18%. The test set consists of 100 examples. About 50% each is used for the public and private leaderboards for the competition on Kaggle 1 In-class platform. The private leaderboard is used for the final ranking, the scores are available after the completion of the competition while the public leaderboard is used by the competition participants to evaluate their models during the competition. In our experiment using the Roberta-large model, we created a validation set by randomly sampling 10% of the training set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 96, |
|
"text": "(Moll\u00e1, 2020)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 249, |
|
"end": 256, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Data Pre-processing. We clean the text of each tweet by removing punctuation marks, digits, and repeated characters. We normalize URLS and usernames (tokens that starts with the @ symbol).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Hashtags are converted to their constituent word(s) after removing the # symbol.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "1 https://www.kaggle.com/ NBSVM. Wang and Manning (2012) proposed a support vector machine (SVM) model that uses the naive bayes log-count ratio as features. NBSVM is a strong linear model for text classification. In our implementation we use the logistic regression classifier in place of the SVM. The features are based on word n-grams (unigrams and bigrams). We experiment with and without the data pre-processing step. In the multi-label classification setting, we train a binary classifier per label with the same classifier settings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Roberta-large. An optimized BERT (Devlin et al., 2019) model trained for longer and on larger and more diverse text collection totalling 160GB. In addition, the pre-training tasks did not include next sentence prediction and the tokenizer is based on BPE (Liu et al., 2019) . We fine tune the model on the data provided by the task organizers without the data pre-processing step. We used the simpletransformers library 2 for our experiment. The classifier is a linear layer with sigmoid activation function. The hyperparameters are: maximum learning rate of 4e \u2212 5, number of epochs is 20 with early stopping on the validation loss using a patience of 3, batch size of 64, the model parameters are optimized using AdamW with a linear schedule and a warm up steps of 4 and the maximum sequence length is 128.", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 54, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 273, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Prediction threshold. Lipton et al. (2014) studied the difficulty of relating the maximum achievable F1 score with the decision thresholds on predicted conditional probabilities. They observed that selecting predictions that maximize the F1 score is a function of the conditional probability assigned to an example and the distribution of conditional probabilities for other examples. Following this observation, we choose decision threshold for each label to track the distribution of conditional probabilities on the validation set without reference to the gold labels, to avoid overfitting. The default decision threshold is 0.5 and we find that the conditional probabilities are significantly less. We apply this heuristic to the model outputs of the Robertalarge model. Specifically, we set 0.2 as the decision threshold for the capacity label and 0.1 for the remaining labels.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "3 Results Table 2 shows the results obtained on the test set split into two equal halves as the public and private leaderboards. With the NBSVM model, we achieved the best score of 0.16 on the public leaderboard. The application of data pre-processing step did not impact the performance of the NB-SVM model, probably because the tokens removed are not relevant lexical units for the task. Following this observation, we did not apply the pre-processing step to our experiments with the Roberta-large model. The Roberta-large model obtained a relatively lower score on the public leaderboard and appears to generalize better on the other half of the test set as shown by the scores on the private leaderboard. There is a significant performance improvement due to the decision thresholding on the Roberta-large model outputs. With this strategy, we achieved the best overall score on the ALTA-2020 competition.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 17, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We address the task of automatically predicting judgement dimensions in the context of the ALTA-2020 shared task. We evaluated the performance of a strong linear classifier, NBSVM with n-grams as features and a recent pre-trained language model, Roberta-large. We observed that the NBSVM achieves our best score on the public leaderboard but it did not generalize to the private test set. The Roberta-large model with decision thresholding strategy showed consistent performance on both the public and private leaderboards. With this model, we achieved the best overall score on the competition. While we achieved better performance with the Roberta-large model, we think that the statistical power (Card et al., 2020) of the test set is limited due to the small sample size (100 examples).", |
|
"cite_spans": [ |
|
{ |
|
"start": 699, |
|
"end": 718, |
|
"text": "(Card et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "As such, it is difficult to differentiate performance improvement by chance from substantial model advantage. We hope to test our approaches on a larger test set in order to examine the robustness of our approaches.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "https://simpletransformers.ai/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The authors thank CONACYT for the computer resources provided through the INAOE Supercomputing Laboratory's Deep Learning Platform for Language Technologies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Aggression detection in social media: Using deep neural networks, data augmentation, and pseudo labeling", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Segun Taofeek Aroyehun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gelbukh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the First Workshop on Trolling, Aggression and Cyberbullying (TRAC-2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "90--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Segun Taofeek Aroyehun and Alexander Gelbukh. 2018. Aggression detection in social media: Us- ing deep neural networks, data augmentation, and pseudo labeling. In Proceedings of the First Work- shop on Trolling, Aggression and Cyberbullying (TRAC-2018), pages 90-97.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Evaluative language beyond bags of words: Linguistic insights and computational applications", |
|
"authors": [ |
|
{ |
|
"first": "Farah", |
|
"middle": [], |
|
"last": "Benamara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maite", |
|
"middle": [], |
|
"last": "Taboada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannick", |
|
"middle": [], |
|
"last": "Mathieu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Computational Linguistics", |
|
"volume": "43", |
|
"issue": "1", |
|
"pages": "201--264", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Farah Benamara, Maite Taboada, and Yannick Mathieu. 2017. Evaluative language beyond bags of words: Linguistic insights and computational applications. Computational Linguistics, 43(1):201-264.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "With little power comes great responsibility", |
|
"authors": [ |
|
{ |
|
"first": "Dallas", |
|
"middle": [], |
|
"last": "Card", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Urvashi", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Mahowald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9263--9274", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dallas Card, Peter Henderson, Urvashi Khandelwal, Robin Jia, Kyle Mahowald, and Dan Jurafsky. 2020. With little power comes great responsibility. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9263-9274.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Appraisal theories for emotion classification in text", |
|
"authors": [ |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Hofmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enrica", |
|
"middle": [], |
|
"last": "Troiano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Sassenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Klinger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.14155" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan Hofmann, Enrica Troiano, Kai Sassenberg, and Roman Klinger. 2020. Appraisal theories for emotion classification in text. arXiv preprint arXiv:2003.14155.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Universal language model fine-tuning for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Howard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "328--339", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1031" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 328-339, Melbourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Optimal thresholding of classifiers to maximize f1 measure", |
|
"authors": [ |
|
{ |
|
"first": "Zachary", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Lipton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Elkan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Balakrishnan", |
|
"middle": [], |
|
"last": "Naryanaswamy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Machine Learning and Knowledge Discovery in Databases", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "225--239", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zachary C. Lipton, Charles Elkan, and Balakrishnan Naryanaswamy. 2014. Optimal thresholding of clas- sifiers to maximize f1 measure. In Machine Learn- ing and Knowledge Discovery in Databases, pages 225-239, Berlin, Heidelberg. Springer Berlin Hei- delberg.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "The language of evaluation", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Martin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "White", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James R. Martin and Peter R. White. 2003. The lan- guage of evaluation, volume 2. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Overview of the 2020 ALTA Shared Task: Assess Human Behaviour", |
|
"authors": [ |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Moll\u00e1", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 18th Annual Workshop of the Australasian Language Technology Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diego Moll\u00e1. 2020. Overview of the 2020 ALTA Shared Task: Assess Human Behaviour. In Pro- ceedings of the 18th Annual Workshop of the Aus- tralasian Language Technology Association.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2227--2237", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1202" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Analyzing appraisal automatically", |
|
"authors": [ |
|
{ |
|
"first": "Maite", |
|
"middle": [], |
|
"last": "Taboada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jack", |
|
"middle": [], |
|
"last": "Grieve", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the AAAI Spring Symposium on Exploring Attitude and Affect in Text: Theories and Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maite Taboada and Jack Grieve. 2004. Analyzing ap- praisal automatically. In In Proceedings of the AAAI Spring Symposium on Exploring Attitude and Affect in Text: Theories and Applications.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Baselines and bigrams: Simple, good sentiment and topic classification", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Sida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "90--94", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sida I. Wang and Christopher D. Manning. 2012. Base- lines and bigrams: Simple, good sentiment and topic classification. In Proceedings of the 50th Annual Meeting of the Association for Computational Lin- guistics (Volume 2: Short Papers), pages 90-94.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Using appraisal groups for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Casey", |
|
"middle": [], |
|
"last": "Whitelaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navendu", |
|
"middle": [], |
|
"last": "Garg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shlomo", |
|
"middle": [], |
|
"last": "Argamon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 14th ACM International Conference on Information and knowledge management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "625--631", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Casey Whitelaw, Navendu Garg, and Shlomo Argamon. 2005. Using appraisal groups for sentiment analysis. In Proceedings of the 14th ACM International Con- ference on Information and knowledge management, pages 625-631.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF2": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Mean F1 score on the public and private test sets. Average is the unweighted mean of the scores on the private and public leaderboards as they are approximately 50% each of the test set.", |
|
"html": null, |
|
"num": null |
|
} |
|
} |
|
} |
|
} |