|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:12:02.027460Z" |
|
}, |
|
"title": "CURAJ_IIITDWD@LT-EDI-ACL2022: Hope Speech Detection in English YouTube Comments using Deep Learning Techniques", |
|
"authors": [ |
|
{ |
|
"first": "Vanshita", |
|
"middle": [], |
|
"last": "Jha", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Central University of Rajasthan", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ankit", |
|
"middle": [ |
|
"Kumar" |
|
], |
|
"last": "Mishra", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Goa University", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Sunil", |
|
"middle": [], |
|
"last": "Saumya", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Hope Speech are positive terms that help to promote or criticise a point of view without hurting the user's or community's feelings. Non-Hope Speech, on the other side, includes expressions that are harsh, ridiculing, or demotivating. The goal of this article is to find the hope speech comments in a YouTube dataset. The datasets were created as part of the \"LT-EDI-ACL 2022: Hope Speech Detection for Equality, Diversity, and Inclusion\" shared task. The shared task dataset was proposed in Malayalam, Tamil, English, Spanish, and Kannada languages. In this paper, we worked at English-language YouTube comments. We employed several deep learning based models such as DNN (dense or fully connected neural network), CNN (Convolutional Neural Network), Bi-LSTM (Bidirectional Long Short Term Memory Network), and GRU(Gated Recurrent Unit) to identify the hopeful comments. We also used Stacked LSTM-CNN and Stacked LSTM-LSTM network to train the model. The best macro avg F1-score 0.67 for development dataset was obtained using the DNN model.The macro avg F1-score of 0.67 was achieved for the classification done on the test data as well.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Hope Speech are positive terms that help to promote or criticise a point of view without hurting the user's or community's feelings. Non-Hope Speech, on the other side, includes expressions that are harsh, ridiculing, or demotivating. The goal of this article is to find the hope speech comments in a YouTube dataset. The datasets were created as part of the \"LT-EDI-ACL 2022: Hope Speech Detection for Equality, Diversity, and Inclusion\" shared task. The shared task dataset was proposed in Malayalam, Tamil, English, Spanish, and Kannada languages. In this paper, we worked at English-language YouTube comments. We employed several deep learning based models such as DNN (dense or fully connected neural network), CNN (Convolutional Neural Network), Bi-LSTM (Bidirectional Long Short Term Memory Network), and GRU(Gated Recurrent Unit) to identify the hopeful comments. We also used Stacked LSTM-CNN and Stacked LSTM-LSTM network to train the model. The best macro avg F1-score 0.67 for development dataset was obtained using the DNN model.The macro avg F1-score of 0.67 was achieved for the classification done on the test data as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In recent years, the majority of the world's population has access to social media. The social media's posts, comments, articles, and other content have a significant impact on everyone's lives. People tend to believe that their lives on social media are the same as their real lives, therefore the influence of others' opinions or expressions is enormous . People submit their posts to social networking platform and receive both positive and negative expressions from their peer users.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "People in a multilingual world use a variety of languages to express themselves, including English, Hindi, Malayalam, French, and others (Chakravarthi et al., 2021, 2020) . While the most effective expression in real life is face or visual expression, which frequently delivers a much more efficient message than linguistic words, expressions in virtual life, such as social media, are frequently expressed through linguistic texts (or words) and emoticons. These words have a significant impact on one's life (Sampath et al., 2022; Ravikiran et al., 2022; Chakravarthi et al., 2022b; Bharathi et al., 2022; Priyadharshini et al., 2022) . For example, if we respond to someone's social media post with \"Well Done!\", \"Very Good\", \"Must do it again\", \"need a little more practise\", and so on, it may instil confidence in the author. On the other hand, negative statements such as \"You should not try it\", \"You don't deserve it\", \"You are from a different religion\", and others demotivate the person. The comments that fall into the first group are referred to as \"Hope Speech\" while those that fall into the second category are referred to as \"Nonhope speech\" or \"Hate Speech\" (Kumar et al., 2020; Biradar et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 129, |
|
"text": "English, Hindi, Malayalam, French, and", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 130, |
|
"end": 170, |
|
"text": "others (Chakravarthi et al., 2021, 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 510, |
|
"end": 532, |
|
"text": "(Sampath et al., 2022;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 556, |
|
"text": "Ravikiran et al., 2022;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 557, |
|
"end": 584, |
|
"text": "Chakravarthi et al., 2022b;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 585, |
|
"end": 607, |
|
"text": "Bharathi et al., 2022;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 608, |
|
"end": 636, |
|
"text": "Priyadharshini et al., 2022)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1175, |
|
"end": 1195, |
|
"text": "(Kumar et al., 2020;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1196, |
|
"end": 1217, |
|
"text": "Biradar et al., 2021)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In the previous decade, researchers have worked heavily on hate speech identification in order to maintain social media clean and healthy. However, in order to improve the user experience, it is also necessary to emphasise the message of hope on these sites. To our knowledge, the shared task \"LT-EDI-EACL 2021: Hope Speech Detection for Equality, Diversity, and Inclusion 1 \" was the first attempt to recognise hope speech in YouTube comments. The organizers proposed the shared task in three different languages that is English, Tamil and Malayalam. Many research teams from all over the world took part in the shared task and contributed their working notes to describe how to identify the hope speech comments. (Saumya and Mishra, 2021 ) used a parallel network of CNN and LSTM with GloVe and Word2Vec embeddings and obtained a weighted F1-score of 0.91 for En-glish Dataset. Similarly, for Tamil and Malayalam they trained parallel Bidirectional LSTM model and obtained F1-score of 0.56 and 0.78 respectively. (Puranik et al., 2021) trained several fine tuned transformer models and identified that ULM-FiT is best for English with F1-score 0.9356. They also found that mBERT obtained 0.85 F1-score on Malayalam dataset and distilmBERT obtained 0.59 F1-score on Tamil dataset. For the same task a fine tuned ALBERT model was used by (Chen and Kong, 2021) and they obtained a F1-score of 0.91. Similarly, (Zhao and Tao, 2021; Huang and Bai, 2021; Ziehe et al., 2021; Mahajan et al., 2021) employed XLM-RoBERTa-Based Model with Attention for Hope Speech Detection. (Dave et al., 2021) experimented the conventional classifiers like logistic regression and support vector machine with TF_IDF character N-gram fearures for hope speech classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 715, |
|
"end": 739, |
|
"text": "(Saumya and Mishra, 2021", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 1015, |
|
"end": 1037, |
|
"text": "(Puranik et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1338, |
|
"end": 1359, |
|
"text": "(Chen and Kong, 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1409, |
|
"end": 1429, |
|
"text": "(Zhao and Tao, 2021;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1430, |
|
"end": 1450, |
|
"text": "Huang and Bai, 2021;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1451, |
|
"end": 1470, |
|
"text": "Ziehe et al., 2021;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1471, |
|
"end": 1492, |
|
"text": "Mahajan et al., 2021)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1568, |
|
"end": 1587, |
|
"text": "(Dave et al., 2021)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "ACL 2022 will see the introduction of a new edition of the shared task \"Hope Speech Detection for Equality, Diversity, and Inclusion.\" In contrast to LT-EDI-EACL 2021, this time the shared task LT-EDI-ACL 2022 has been proposed in five different languages: English, Malayalam, Tamil, Kannada, and Spanish (Chakravarthi, 2020; Chakravarthi and Muralidaran, 2021; Chakravarthi et al., 2022a). The data was extracted via the YouTube platform. We took part in the competition and worked on the dataset of English hope speech comments. The experiments were carried out on several neural network based models such as a dense or multilayer neural network (DNN), one layer CNN network (CNN), one layer Bi-LSTM network (Bi-LSTM), and one layer GRU network (GRU), among deep learning networks. The stack connections of LSTM-CNN and LSTM-LSTM were also trained for hope speech detection. After all experimentation, it was found that DNN produced the best results with macro average F1-score of 0.67 on development as well as on test dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of the article is oragnized as follows. The next section 2 give the details of the given task and dataset statistics. This is followed by the description of methodology used for experimentation in Section 3. The results are explained in the Section 4. At the end, Section 5 talks about future scope of the research. At LT-EDI-ACL 2022, the shared task on Hope Speech Detection for Equality, Diversity, and Inclusion (provided in English, Tamil, Spanish, Kannada, and Malayalam) intended to determine whether the given comment was Hope speech or not. The dataset was gathered from the YouTube platform. In the given dataset, there were two fields for each language: comment and label. We only submitted the system for the English dataset. In the English training dataset, there were approximately 22740 comments, with 1962 labeled as hope speech and 20778 labeled as non-hope speech. There were 2841 comments in the development dataset, with 272 hope speech and 2569 non-hope speech comments.The test dataset contained 250 hope speech and 2593 non-hope speech comments. The English dataset statistics is shown in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1121, |
|
"end": 1128, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Several deep learning models were developed to identify the hope speech from supplied English YouTube comments. The architecture of our best model DNN, as depicted in Figure 1 , will be explained in this section. We also explain the architecture of stacked network LSTM-LSTM as shown in Figure 2 . Figure 2 : A stacked LSTM network for hope speech detection", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 175, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 287, |
|
"end": 295, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 306, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We used a few early procedures to convert the raw input comments into readable input vectors. We started with data cleaning and then moved on to data preprocessing. Every comment was changed to lower case during data cleaning. Numbers, punctuation, symbols, emojis, and links have all been removed. The nltk library was used to eliminate stopwords like the, an, and so on. Finally, the extra spaces were removed, resulting in a clean text. During data preprocessing, we first tokenized each comment in the dataset and created a bag of words with an index number for each unique word. The comments were then turned into an index sequence. The length of the encoded vectors was varied. After that, the encoded indices vector was padded to form an equal-length vector. In our case we kept the length of each vector as ten.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data cleaning and pre-processing", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Several deep learning classification models were developed. We started with a multilayer dense neural network (DNN) as shown in Figure 1 . After that, a single layer CNN model and a single layer Bi-LSTM model were constructed. Finally, we built stacked LSTM-CNN and stacked LSTM-LSTM models shown in Figure 2 . In Section 4, the results of each model are discussed. Regardless of the model, the feeding input and collecting output were the same in all instances. The whole process flow from input to output is depicted in Figures 1 and 2 . As can be seen, there were three stages to the process: data preparation, feature extraction, and classification. The biggest distinction among the models was in the feature extraction criterion.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 136, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 308, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 538, |
|
"text": "Figures 1 and 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classification Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To demonstrate the model flow, a representative example from the English Dataset is used. The representative example \"Sasha Dumse God Accepts Everyone.\" was first changed to lower case as \"sasha dumse god accepts everyone.\". During the data cleaning process, the dot(.) is eliminated. The lowercase text was then encoded and padded into a sequence list as \" [3005, 4871, 466, 48, 25] \". The index \"3005\" refers to the word \"sasha\", the index \"4871\" to the word \"dumse,\" and so on. The sentence was padded into the length of ten.", |
|
"cite_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 364, |
|
"text": "[3005,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 365, |
|
"end": 370, |
|
"text": "4871,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 371, |
|
"end": 375, |
|
"text": "466,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 376, |
|
"end": 379, |
|
"text": "48,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 383, |
|
"text": "25]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classification Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "After preprocessing the data, each index is turned into a one-hot vector ( of 1x20255 dimension) with a size equal to the vocabulary. The resultant one-hot vector was sparse and high dimensional, and it was then passed through an embedding layer, yielding a low dimensional dense embedding vector (of 1x 300 dimension). Between the input and embedding layers, many sets of weights were used. We experimented with random weights as well as pre-trained Word2Vec and GloVe weights, but found that random weights initialization at the embedding layer performed better. As a result, we've only covered the usage of random weights at the embedding layer in this article. For abstract level feature extraction, the embedded vector was provided as an input to a stacked DNN or LSTM layer as shown in Figures 1 and 2 respectively. Finally, the collected features were classified into hope and non-hope categories using a dense (or an output) layer. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 792, |
|
"end": 807, |
|
"text": "Figures 1 and 2", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classification Models", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "All of the experiments were carried out in the Keras and sklearn environment. We used the pandas library to read the datasets. Keras preprocessing classes and the nltk library were used to prepare the dataset. All the results shown in Table 2 is on English development dataset. The initial experiment was with dense neural network (DNN). The three layers of dense network with relu activation (in the internal layer) and sigmoid activation at the output layer were trained with English comment dataset. Similarly, the experiments were performed with CNN, Bi-LSTM, GRU, LSTM-CNN, and LSTM-LSTM. The best result was obtained by DNN with macro average F1-score 0.67. The results of other models are shown in Table 2 . Later, once the labels for test dataset was released by the organizers, we also collected the model performance on test dataset. The macro average F1-score achieved after categorising the test data from the DNN model was 0.67, which was the same as in the case of development data. Table 3 lists the test dataset results produced from the DNN model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 242, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 705, |
|
"end": 712, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 997, |
|
"end": 1035, |
|
"text": "Table 3 lists the test dataset results", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "As part of the joint task LT-EDI-ACL2022, we presented a model provided by team CURJ_IIITDWD for detecting hope speech on an English dataset obtained from the YouTube platform. We used many deep learning algorithms in the paper and found that DNN with three hidden layers performed best on the development and test dataset with a macro average F1-score of 0.67. In the future, we can improve classification performance by training transfer learning models like BERT and ULMFiT ans so on.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "https://sites.google.com/view/lt-edi-2021/home", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Arunaggiri Pandian, and Swetha Valli. 2022. Findings of the shared task on Speech Recognition for Vulnerable Individuals in Tamil", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Bharathi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Subalalitha Chinnaudayar Navaneethakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sripriya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the Second Workshop on Language Technology for", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B Bharathi, Bharathi Raja Chakravarthi, Subalalitha Chinnaudayar Navaneethakrishnan, N Sripriya, Arunaggiri Pandian, and Swetha Valli. 2022. Find- ings of the shared task on Speech Recognition for Vulnerable Individuals in Tamil. In Proceedings of the Second Workshop on Language Technology for", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Diversity and Inclusion", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Equality", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Equality, Diversity and Inclusion. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Hate or non-hate: Translation based hate speech identification in code-mixed hinglish data set", |
|
"authors": [ |
|
{ |
|
"first": "Shankar", |
|
"middle": [], |
|
"last": "Biradar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunil", |
|
"middle": [], |
|
"last": "Saumya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arun", |
|
"middle": [], |
|
"last": "Chauhan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "2021 IEEE International Conference on Big Data (Big Data)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2470--2475", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shankar Biradar, Sunil Saumya, and Arun Chauhan. 2021. Hate or non-hate: Translation based hate speech identification in code-mixed hinglish data set. In 2021 IEEE International Conference on Big Data (Big Data), pages 2470-2475. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "HopeEDI: A multilingual hope speech detection dataset for equality, diversity, and inclusion", |
|
"authors": [ |
|
{ |
|
"first": "Chakravarthi", |
|
"middle": [], |
|
"last": "Bharathi Raja", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi. 2020. HopeEDI: A mul- tilingual hope speech detection dataset for equality, diversity, and inclusion. In Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Me- dia, pages 41-53, Barcelona, Spain (Online). Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Findings of the shared task on hope speech detection for equality, diversity, and inclusion", |
|
"authors": [ |
|
{ |
|
"first": "Vigneshwaran", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Muralidaran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "61--72", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi and Vigneshwaran Mural- idaran. 2021. Findings of the shared task on hope speech detection for equality, diversity, and inclu- sion. In Proceedings of the First Workshop on Lan- guage Technology for Equality, Diversity and Inclu- sion, pages 61-72, Kyiv. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Findings of the shared task on Hope Speech Detection for Equality, Diversity, and Inclusion", |
|
"authors": [ |
|
{ |
|
"first": "Vigneshwaran", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Muralidaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Subalalitha Chinnaudayar Navaneethakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [ |
|
"\u00c1ngel" |
|
], |
|
"last": "Phillip Mccrae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salud", |
|
"middle": [], |
|
"last": "Garc\u00eda-Cumbreras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rafael", |
|
"middle": [], |
|
"last": "Mar\u00eda Jim\u00e9nez-Zafra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prasanna", |
|
"middle": [], |
|
"last": "Valencia-Garc\u00eda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Kumar Kumaresan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ponnusamy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jos\u00e9 Antonio Garc\u00eda-D\u00edaz", |
|
"middle": [], |
|
"last": "Garc\u00eda-Baena", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Proceedings of the Second Workshop on Language Technology for Equality, Diversity and Inclusion. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Vigneshwaran Muralidaran, Ruba Priyadharshini, Subalalitha Chinnaudayar Na- vaneethakrishnan, John Phillip McCrae, Miguel \u00c1n- gel Garc\u00eda-Cumbreras, Salud Mar\u00eda Jim\u00e9nez-Zafra, Rafael Valencia-Garc\u00eda, Prasanna Kumar Kumare- san, Rahul Ponnusamy, Daniel Garc\u00eda-Baena, and Jos\u00e9 Antonio Garc\u00eda-D\u00edaz. 2022a. Findings of the shared task on Hope Speech Detection for Equality, Diversity, and Inclusion. In Proceedings of the Sec- ond Workshop on Language Technology for Equality, Diversity and Inclusion. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Corpus creation for sentiment analysis in code-mixed Tamil-English text", |
|
"authors": [ |
|
{ |
|
"first": "Vigneshwaran", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Muralidaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"Philip" |
|
], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "202--210", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Vigneshwaran Muralidaran, Ruba Priyadharshini, and John Philip McCrae. 2020. Corpus creation for sentiment analysis in code-mixed Tamil-English text. In Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collabora- tion and Computing for Under-Resourced Languages (CCURL), pages 202-210, Marseille, France. Euro- pean Language Resources association.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Prasanna Kumar Kumaresan, and Rahul Ponnusamy. 2022b. Findings of the shared task on Homophobia Transphobia Detection in Social Media Comments", |
|
"authors": [ |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thenmozhi", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"Phillip" |
|
], |
|
"last": "Durairaj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Buitaleer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the Second Workshop on Language Technology for Equality, Diversity and Inclusion. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Ruba Priyadharshini, Then- mozhi Durairaj, John Phillip McCrae, Paul Buitaleer, Prasanna Kumar Kumaresan, and Rahul Ponnusamy. 2022b. Findings of the shared task on Homophobia Transphobia Detection in Social Media Comments. In Proceedings of the Second Workshop on Language Technology for Equality, Diversity and Inclusion. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Dataset for identification of homophobia and transophobia in multilingual YouTube comments", |
|
"authors": [ |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prasanna", |
|
"middle": [], |
|
"last": "Ponnusamy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kayalvizhi", |
|
"middle": [], |
|
"last": "Kumar Kumaresan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Durairaj", |
|
"middle": [], |
|
"last": "Sampath", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sathiyaraj", |
|
"middle": [], |
|
"last": "Thenmozhi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajendran", |
|
"middle": [], |
|
"last": "Thangasamy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"Phillip" |
|
], |
|
"last": "Nallathambi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2109.00227" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Ruba Priyadharshini, Rahul Ponnusamy, Prasanna Kumar Kumaresan, Kayalvizhi Sampath, Durairaj Thenmozhi, Sathi- yaraj Thangasamy, Rajendran Nallathambi, and John Phillip McCrae. 2021. Dataset for identi- fication of homophobia and transophobia in mul- tilingual YouTube comments. arXiv preprint arXiv:2109.00227.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "2021. cs_english@ LT-EDI-EACL2021: Hope Speech Detection Based On Finetuning ALBERT Model", |
|
"authors": [ |
|
{ |
|
"first": "Shi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "128--131", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shi Chen and Bing Kong. 2021. cs_english@ LT-EDI- EACL2021: Hope Speech Detection Based On Fine- tuning ALBERT Model. In Proceedings of the First Workshop on Language Technology for Equality, Di- versity and Inclusion, pages 128-131.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "IRNLP_DAIICT@ LT-EDI-EACL2021: hope speech detection in code mixed text using TF-IDF char n-grams and MuRIL", |
|
"authors": [ |
|
{ |
|
"first": "Shripad", |
|
"middle": [], |
|
"last": "Bhargav Dave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prasenjit", |
|
"middle": [], |
|
"last": "Bhat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Majumder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "114--117", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bhargav Dave, Shripad Bhat, and Prasenjit Majumder. 2021. IRNLP_DAIICT@ LT-EDI-EACL2021: hope speech detection in code mixed text using TF-IDF char n-grams and MuRIL. In Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion, pages 114-117.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "TEAM HUB@ LT-EDI-EACL2021: hope speech detection based on pre-trained language model", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "122--127", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Huang and Yang Bai. 2021. TEAM HUB@ LT- EDI-EACL2021: hope speech detection based on pre-trained language model. In Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion, pages 122-127.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "NITP-AI-NLP@ HASOC-Dravidian-CodeMix-FIRE2020: A Machine Learning Approach to Identify Offensive Languages from Dravidian Code-Mixed Text", |
|
"authors": [ |
|
{ |
|
"first": "Abhinav", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunil", |
|
"middle": [], |
|
"last": "Saumya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jyoti", |
|
"middle": [ |
|
"Prakash" |
|
], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "FIRE (Working Notes)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "384--390", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhinav Kumar, Sunil Saumya, and Jyoti Prakash Singh. 2020. NITP-AI-NLP@ HASOC-Dravidian- CodeMix-FIRE2020: A Machine Learning Approach to Identify Offensive Languages from Dravidian Code-Mixed Text. In FIRE (Working Notes), pages 384-390.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Findings of shared task on offensive language identification in Tamil and Malayalam", |
|
"authors": [ |
|
{ |
|
"first": "Prasanna", |
|
"middle": [], |
|
"last": "Kumar Kumaresan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ratnasingam", |
|
"middle": [], |
|
"last": "Sakuntharaj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sajeetha", |
|
"middle": [], |
|
"last": "Thavareesan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Subalalitha", |
|
"middle": [], |
|
"last": "Navaneethakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anand", |
|
"middle": [], |
|
"last": "Kumar Madasamy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Forum for Information Retrieval Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "16--18", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Prasanna Kumar Kumaresan, Ratnasingam Sakuntharaj, Sajeetha Thavareesan, Subalalitha Navaneethakr- ishnan, Anand Kumar Madasamy, Bharathi Raja Chakravarthi, and John P McCrae. 2021. Findings of shared task on offensive language identification in Tamil and Malayalam. In Forum for Information Retrieval Evaluation, pages 16-18.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Teamuncc@ lt-edi-eacl2021: Hope speech detection using transfer learning with transformers", |
|
"authors": [ |
|
{ |
|
"first": "Khyati", |
|
"middle": [], |
|
"last": "Mahajan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erfan", |
|
"middle": [], |
|
"last": "Al-Hossami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samira", |
|
"middle": [], |
|
"last": "Shaikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "136--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Khyati Mahajan, Erfan Al-Hossami, and Samira Shaikh. 2021. Teamuncc@ lt-edi-eacl2021: Hope speech de- tection using transfer learning with transformers. In Proceedings of the First Workshop on Language Tech- nology for Equality, Diversity and Inclusion, pages 136-142.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Findings of the shared task on Abusive Comment Detection in Tamil", |
|
"authors": [ |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thenmozhi", |
|
"middle": [], |
|
"last": "Subalalitha Chinnaudayar Navaneethakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Malliga", |
|
"middle": [], |
|
"last": "Durairaj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kogilavani", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shanmugavadivel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "U", |
|
"middle": [], |
|
"last": "Siddhanth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prasanna", |
|
"middle": [], |
|
"last": "Hegde", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kumar Kumaresan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Proceedings of the Second Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruba Priyadharshini, Bharathi Raja Chakravarthi, Sub- alalitha Chinnaudayar Navaneethakrishnan, Then- mozhi Durairaj, Malliga Subramanian, Kogila- vani Shanmugavadivel, Siddhanth U Hegde, and Prasanna Kumar Kumaresan. 2022. Findings of the shared task on Abusive Comment Detection in Tamil. In Proceedings of the Second Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Overview of the DravidianCodeMix 2021 shared task on sentiment detection in Tamil, Malayalam, and Kannada", |
|
"authors": [ |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sajeetha", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhivya", |
|
"middle": [], |
|
"last": "Thavareesan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Durairaj", |
|
"middle": [], |
|
"last": "Chinnappa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Thenmozhi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ponnusamy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Forum for Information Retrieval Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruba Priyadharshini, Bharathi Raja Chakravarthi, Sajeetha Thavareesan, Dhivya Chinnappa, Durairaj Thenmozhi, and Rahul Ponnusamy. 2021. Overview of the DravidianCodeMix 2021 shared task on senti- ment detection in Tamil, Malayalam, and Kannada. In Forum for Information Retrieval Evaluation, pages 4-6.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Ruba Priyadharshini, Sajeetha Thavareesan, and Bharathi Raja Chakravarthi. 2021. IIITT@ LT-EDI-EACL2021-hope speech detection: there is always hope in transformers", |
|
"authors": [ |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Puranik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adeep", |
|
"middle": [], |
|
"last": "Hande", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2104.09066" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karthik Puranik, Adeep Hande, Ruba Priyad- harshini, Sajeetha Thavareesan, and Bharathi Raja Chakravarthi. 2021. IIITT@ LT-EDI-EACL2021- hope speech detection: there is always hope in trans- formers. arXiv preprint arXiv:2104.09066.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Ratnavel Rajalakshmi, Sajeetha Thavareesan, Rahul Ponnusamy, and Shankar Mahadevan. 2022. Findings of the shared task on Offensive Span Identification in code-mixed Tamil-English comments", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manikandan Ravikiran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anand", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sangeetha", |
|
"middle": [], |
|
"last": "Kumar Madasamy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sivanesan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the Second Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manikandan Ravikiran, Bharathi Raja Chakravarthi, Anand Kumar Madasamy, Sangeetha Sivanesan, Rat- navel Rajalakshmi, Sajeetha Thavareesan, Rahul Pon- nusamy, and Shankar Mahadevan. 2022. Findings of the shared task on Offensive Span Identification in code-mixed Tamil-English comments. In Pro- ceedings of the Second Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Kishor Kumar Ponnusamy, and Santhiya Pandiyan. 2022. Findings of the shared task on Emotion Analysis in Tamil", |
|
"authors": [ |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kogilavani", |
|
"middle": [], |
|
"last": "Subalalitha Chinnaudayar Navaneethakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sajeetha", |
|
"middle": [], |
|
"last": "Shanmugavadivel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sathiyaraj", |
|
"middle": [], |
|
"last": "Thavareesan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parameswari", |
|
"middle": [], |
|
"last": "Thangasamy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adeep", |
|
"middle": [], |
|
"last": "Krishnamurthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sean", |
|
"middle": [], |
|
"last": "Hande", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Benhur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the Second Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Ruba Priyadharshini, Subalalitha Chinnaudayar Navaneethakrishnan, Kogilavani Shanmugavadivel, Sajeetha Thavareesan, Sathiyaraj Thangasamy, Parameswari Krishna- murthy, Adeep Hande, Sean Benhur, Kishor Kumar Ponnusamy, and Santhiya Pandiyan. 2022. Findings of the shared task on Emotion Analysis in Tamil. In Proceedings of the Second Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Offensive language identification in dravidian code mixed social media text", |
|
"authors": [ |
|
{ |
|
"first": "Sunil", |
|
"middle": [], |
|
"last": "Saumya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhinav", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jyoti", |
|
"middle": [ |
|
"Prakash" |
|
], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "36--45", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sunil Saumya, Abhinav Kumar, and Jyoti Prakash Singh. 2021. Offensive language identification in dravidian code mixed social media text. In Proceedings of the First Workshop on Speech and Language Technolo- gies for Dravidian Languages, pages 36-45.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "IIIT_DWD@ LT-EDI-EACL2021: hope speech detection in YouTube multilingual comments", |
|
"authors": [ |
|
{ |
|
"first": "Sunil", |
|
"middle": [], |
|
"last": "Saumya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankit", |
|
"middle": [], |
|
"last": "Kumar Mishra", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "107--113", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sunil Saumya and Ankit Kumar Mishra. 2021. IIIT_DWD@ LT-EDI-EACL2021: hope speech de- tection in YouTube multilingual comments. In Pro- ceedings of the First Workshop on Language Tech- nology for Equality, Diversity and Inclusion, pages 107-113.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "ZYJ@ LT-EDI-EACL2021: XLM-RoBERTa-based model with attention for hope speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Yingjia", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Tao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "118--121", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yingjia Zhao and Xin Tao. 2021. ZYJ@ LT-EDI- EACL2021: XLM-RoBERTa-based model with at- tention for hope speech detection. In Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion, pages 118-121.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "GCDH@ LT-EDI-EACL2021: XLM-RoBERTa for hope speech detection in English, Malayalam, and Tamil", |
|
"authors": [ |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Ziehe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franziska", |
|
"middle": [], |
|
"last": "Pannach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "132--135", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefan Ziehe, Franziska Pannach, and Aravind Krish- nan. 2021. GCDH@ LT-EDI-EACL2021: XLM- RoBERTa for hope speech detection in English, Malayalam, and Tamil. In Proceedings of the First Workshop on Language Technology for Equality, Di- versity and Inclusion, pages 132-135.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "sasha dumse god accepts everyone Data cleaning: converting lowercase, removing number, puctuation, space, links and removing stopwords (Raw Example Sentence from training set) (Cleaned text) Data preprocessing: Tokenization and bag of words creation, proving indices to every unique word, encoding, and padding (A DNN network for hope speech detection 2 Task and data description" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "English Dataset statistics", |
|
"num": null, |
|
"content": "<table><tr><td/><td/><td/><td/><td/><td/><td>Hope</td><td>Non-Hope</td></tr><tr><td/><td>Feature extraction using</td><td>stacked LSTM</td><td>LSTM (256) LSTM (256)</td><td>LSTM (256) LSTM (256)</td><td>LSTM (256) LSTM (256)</td><td>LSTM (256) LSTM (256)</td><td>LSTM (256) LSTM (256)</td></tr><tr><td>Data cleaning and Pre-processing</td><td>(Cleaned text) (Preprocessed text)</td><td colspan=\"6\">sasha dumse god accepts everyone Data cleaning: converting lowercase, removing number, puctuation, space, links and removing stopwords Data preprocessing: Tokenization and bag of words creation, proving indices to every unique word, encoding, and padding Here indices are taken randomly. [ 466, 25 ] 4871, 3005, 48,</td></tr><tr><td/><td/><td/><td/><td colspan=\"3\">(Raw Example Sentence from training set)</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"text": "Results of English Development dataset", |
|
"num": null, |
|
"content": "<table><tr><td>Methods</td><td>Metrics</td><td colspan=\"4\">Non-Hope Hope Macro Avg Weighted Avg</td></tr><tr><td/><td colspan=\"2\">Precision 0.43</td><td>0.93</td><td>0.68</td><td>0.89</td></tr><tr><td>DNN</td><td colspan=\"2\">Recall F1-score 0.40 0.38</td><td>0.95 0.94</td><td>0.66 0.67</td><td>0.89 0.89</td></tr><tr><td/><td colspan=\"2\">Precision 0.39</td><td>0.93</td><td>0.66</td><td>0.88</td></tr><tr><td>CNN</td><td colspan=\"2\">Recall F1-score 0.38 0.37</td><td>0.94 0.94</td><td>0.65 0.66</td><td>0.88 0.88</td></tr><tr><td/><td colspan=\"2\">Precision 0.39</td><td>0.94</td><td>0.66</td><td>0.88</td></tr><tr><td>Bi-LSTM</td><td colspan=\"2\">Recall F1-score 0.40 0.40</td><td>0.93 0.94</td><td>0.67 0.67</td><td>0.88 0.88</td></tr><tr><td/><td colspan=\"2\">Precision 0.39</td><td>0.94</td><td>0.66</td><td>0.88</td></tr><tr><td>GRU</td><td colspan=\"2\">Recall F1-score 0.38 0.38</td><td>0.94 0.94</td><td>0.66 0.66</td><td>0.88 0.88</td></tr><tr><td/><td colspan=\"2\">Precision 0.41</td><td>0.93</td><td>0.67</td><td>0.88</td></tr><tr><td>LSTM-CNN</td><td colspan=\"2\">Recall F1-score 0.38 0.35</td><td>0.95 0.94</td><td>0.65 0.67</td><td>0.89 0.87</td></tr><tr><td/><td colspan=\"2\">Precision 0.34</td><td>0.94</td><td>0.64</td><td>0.88</td></tr><tr><td>LSTM-LSTM</td><td colspan=\"2\">Recall F1-score 0.38 0.43</td><td>0.91 0.92</td><td>0.67 0.65</td><td>0.86 0.89</td></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "Results of English test dataset", |
|
"num": null, |
|
"content": "<table><tr><td>Metrices Non-Hope Hope Macro Avg Weighted Avg 0.94 0.67 0.89 Precision 0.40 0.38 0.94 0.66 0.90 Recall F1 score 0.39 0.94 0.67 0.89</td></tr></table>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |