|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:12:24.106516Z" |
|
}, |
|
"title": "CFILT IIT Bombay@LT-EDI-EACL2021: Hope Speech Detection for Equality, Diversity, and Inclusion using Multilingual Representation from Transformers", |
|
"authors": [ |
|
{ |
|
"first": "Pankaj", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology Bombay Mumbai", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology Bombay Mumbai", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "With the internet becoming part and parcel of our lives, engagement in social media has increased a lot. Identifying and eliminating offensive content from social media has become of utmost priority to prevent any kind of violence. However, detecting encouraging, supportive and positive content is equally important to prevent misuse of censorship targeted to attack freedom of speech. This paper presents our system for the shared task Hope Speech Detection for Equality, Diversity, and Inclusion at LT-EDI, EACL 2021. The data for this shared task is provided in English, Tamil, and Malayalam which was collected from YouTube comments. It is a multiclass classification problem where each data instance is categorized into one of the three classes: 'Hope speech', 'Not hope speech', and 'Not in intended language'. We propose a system that employs multilingual transformer models to obtain the representation of text and classifies it into one of the three classes. We explored the use of multilingual models trained specifically for Indian languages along with generic multilingual models. Our system was ranked 2 nd for English, 2 n d for Malayalam, and 7 th for the Tamil language in the final leader board published by organizers and obtained a weighted F1-score of 0.92, 0.84, 0.55 respectively on the hidden test dataset used for the competition. We have made our system publicly available at GitHub.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "With the internet becoming part and parcel of our lives, engagement in social media has increased a lot. Identifying and eliminating offensive content from social media has become of utmost priority to prevent any kind of violence. However, detecting encouraging, supportive and positive content is equally important to prevent misuse of censorship targeted to attack freedom of speech. This paper presents our system for the shared task Hope Speech Detection for Equality, Diversity, and Inclusion at LT-EDI, EACL 2021. The data for this shared task is provided in English, Tamil, and Malayalam which was collected from YouTube comments. It is a multiclass classification problem where each data instance is categorized into one of the three classes: 'Hope speech', 'Not hope speech', and 'Not in intended language'. We propose a system that employs multilingual transformer models to obtain the representation of text and classifies it into one of the three classes. We explored the use of multilingual models trained specifically for Indian languages along with generic multilingual models. Our system was ranked 2 nd for English, 2 n d for Malayalam, and 7 th for the Tamil language in the final leader board published by organizers and obtained a weighted F1-score of 0.92, 0.84, 0.55 respectively on the hidden test dataset used for the competition. We have made our system publicly available at GitHub.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The prominence of web-based media is expanding quickly because it is being used to create and share content, even by those who are ignorant of online media. Several web platforms allow users to add textual feedback on non-textual content, such as images, photos, animations, etc. With millions of videos posted by its users and billions of comments on all these videos, YouTube is undoubtedly the most famous of them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Online social media comments/posts have been examined to identify and avoid the propagation of negativity using strategies such as detecting abusive language detection (Lee et al., 2018) and hate speech (Schmidt and Wiegand, 2017) . There is a lot of work that is being done to remove the negativity from the web but Hope speech detection focuses to spread positivity by detecting content that is encouraging, positive, and supportive.", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 186, |
|
"text": "(Lee et al., 2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 203, |
|
"end": 230, |
|
"text": "(Schmidt and Wiegand, 2017)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "When it comes to hope speech detection there has not been much work done but recently the NLP community has started showing interest in this area. In a work by Palakodety et al. (2019) , they have analyzed YouTube comments and performed the task of hope speech detection to identify hostilitydiffusing content. Here the authors have not taken other aspects like equality, diversity, and inclusion into account.", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 184, |
|
"text": "Palakodety et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Chakravarthi et al. (2020) did work in Indian languages where they manually annotated the YouTube comments for Tamil and Malayalam languages for performing sentiment analysis. In a similar work, Chakravarthi (2020) released the dataset consisting of YouTube comments with hope and not-hope speech annotation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Hate speech is a well-researched area related to Hope speech. According to the survey done by Schmidt and Wiegand (2017) , automatic hate detection was needed due to a large number of people using the net and the massive scale with which the web is growing. Zhang and Luo (2018) used deep neural networks for the task and they were able to outperform the best performing method by up to 5 percentage points in macro-average F1-score.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 120, |
|
"text": "Schmidt and Wiegand (2017)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 258, |
|
"end": 278, |
|
"text": "Zhang and Luo (2018)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Multilingual BERT (Pires et al., 2019 ) is a variant of BERT (Devlin et al., 2019) that has been heavily used by the NLP community. Pires et al. (2019) in their work showed that multilingual rep-resentation by multilingual BERT handles cross linguality without being explicitly trained for it. It also handles transfer across scripts and to codeswitching fairly well. Conneau et al. (2020) proposed another variant of the BERT model called XLM-RoBERTa by pre-training multilingual models at scale. There have been various attempts to tackle problems related to Indian languages by training transformer models specifically for Indian languages. Indic BERT (Kakwani et al., 2020) and MuRIL (https://tfhub.dev/google/ MuRIL/1) are two such transformer-based language models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 37, |
|
"text": "(Pires et al., 2019", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 61, |
|
"end": 82, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 151, |
|
"text": "Pires et al. (2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 368, |
|
"end": 389, |
|
"text": "Conneau et al. (2020)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 655, |
|
"end": 677, |
|
"text": "(Kakwani et al., 2020)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We plan to tackle the Hope speech discovery for the English, Tamil, and Malayalam language by obtaining representation from multilingual transformer models. Tamil and Malayalam are Dravidian languages locally spoken in the states of Tamil Nadu and Kerala respectively on Indian territory. For a country like India, where people speak many languages, code-mixing is fairly common (Barman et al., 2014; Bali et al., 2014; Gupta et al., 2018) . The dataset for this task is code-mixed such as tag, inter-sentential, and intra-sentential (Chakravarthi, 2020).", |
|
"cite_spans": [ |
|
{ |
|
"start": 379, |
|
"end": 400, |
|
"text": "(Barman et al., 2014;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 401, |
|
"end": 419, |
|
"text": "Bali et al., 2014;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 420, |
|
"end": 439, |
|
"text": "Gupta et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The shared task was launched as a part of the first workshop on Language Technology for Equality, Diversity, Inclusion (LT-EDI-EACL-2021) (Chakravarthi and Muralidaran, 2021). It was conducted for English, Tamil, and Malayalam language categories. The task was to classify a given piece of text scraped from YouTube comments into one of the three possible categories. These three possible categories or labels were: The rest of the paper is paper is organized as follows. In Section 2, we provide the dataset details and statistics. Section 3 consists of our system description and architecture details. In Section 4, we describe our experimental setups and report our results. We conclude this paper in Section 5 and briefly discuss our future plans for tackling this problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The organizers of LT-EDI 2021 have provided the dataset for training, validation, and testing of the Language Label 1 Label 2 Label 3 English 20,778 1962 22 Tamil 7,872 6,327 1,961 Malayalam 6,205 1,668 691 Table 1 : Distribution of training data samples across three classes systems for the shared tasks. The dataset consists of pairs of text and their corresponding label. Training and validation sets were provided with labels for developing the systems for the given shared task and a test set was supplied without ground truth labels for a fair evaluation and publishing final results and team ranking among participants.However, the labels for test data were also made available once competition concluded and final results were declared. However, the labels for test data were also made available once the competition concluded and the final results were declared. The dataset was unevenly distributed across three possible classes for all three languages. Table 1 shows the class distribution of the dataset for English, Tamil, and Malayalam.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 228, |
|
"text": "Language Label 1 Label 2 Label 3 English 20,778 1962 22 Tamil 7,872 6,327 1,961 Malayalam 6,205 1,668 691 Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 978, |
|
"end": 985, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The core deep learning model of our system consists of BERT-based transformer model in a multilingual setting. We did not perform any kind of pre-processing of the text data to avoid computational overhead at run time and to evaluate efficacy of multilingual transformers on raw text. We obtained a pooled 768-dimensional vector representation for the entire raw text of each instance. This vector is then fed to a softmax layer which gives the probability distribution of the sentence being from given three possible classes. This setup of trained end-to-end with cross-entropy loss function and F1-score as an evaluation metric.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Description", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We have experimented with various multilingual transformer-based models that were trained on multiple languages together. Multilingual models pretrained specifically for Indian languages were also employed for the given shared tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments and Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We report the performances of our system for the four multilingual transformer models. These four models are multilingual BERT, XLM-RoBERTa, IndicBERT, and MuRIL. Among these fours, the last two models were pre-trained specifically for Indian languages. IndicBERT is an ALBERT based model that was pre-trained on 12 major Indian languages and MuRIL (Multilingual Representations for Indian Languages) is a BERT-based model pretrained on 17 Indian languages and their transliterated counterparts. After extensive experimentation, we finalized hyper-parameters which worked well for all four transformer models. To have a fair comparison between all models, it was also important to not have a large variation in hyper-parameters and training methodology since all the models have a common base transformer architecture as BERT. Following hyper-parameters were finally used to train the system in all four experimental setups:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setups", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Loss function: Cross-Entropy ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Setups", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In this section, we present our results on the test set for the four experimental setups discussed above. In line with shared task organizers, we also used weighted F1-score as an evaluation metric. In Table 2 , the performance of our system using above mentioned four transformer model as the base is reported. This performance is on the test set. XLM-RoBERTa was the best performing multilingual model for all the three language categories. Multilingual models trained specifically for Indian languages performed at par if not better than generic multilingual models. Among the three language categories, all of our systems achieved the best scores for English and worst scores for Tamil. The high F1-score for the English language category can be attributed to the heavily skewed distribution of the dataset samples across three classes. Since Label 1 was very dominant, it increased the weighted F1-score for the English language.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 209, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We submitted our best performing system to the competition and obtained encouraging results. In the final leader board published by organizers, our team was placed at 2 nd , 2 nd , and 7 th rank for English, Malayalam, and Tamil language categories. After extensive experimentation and hyperparameter tuning we were able to improve our scores published on the competition leader-board for Tamil and Malayalam languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In this paper, we have described our system submitted for the shared task in Hope Speech detection and reported its performance. We have extensively experimented with the possibility of employing multilingual models for given tasks. We report the performance of four different multilingual models which include transformer models specifically trained for Indian languages. Our experiments showed that transformer models pre-trained on a smaller set of languages have the potential to perform at par or better than models trained on hundreds of languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the future, we plan to introduce some linguistic-based features and combined them with multilingual transformer representation to improve the overall effectiveness of the system. We also plan to systematically study the effect of abolishing the 'Not in intended language' class from the dataset as it opens up the opportunity to have a single system or deep learning model for all three", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank the entire organizing team of LT-EDI 2021-EACL 2021 for providing us the platform and opportunity to work on such a problem of potentially high social impact.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "annex", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "i am borrowing ya mixing?\" an analysis of english-hindi code mixing in facebook", |
|
"authors": [ |
|
{ |
|
"first": "Kalika", |
|
"middle": [], |
|
"last": "Bali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jatin", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Monojit", |
|
"middle": [], |
|
"last": "Choudhury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yogarshi", |
|
"middle": [], |
|
"last": "Vyas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the First Workshop on Computational Approaches to Code Switching", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "116--126", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kalika Bali, Jatin Sharma, Monojit Choudhury, and Yo- garshi Vyas. 2014. \"i am borrowing ya mixing?\" an analysis of english-hindi code mixing in facebook. In Proceedings of the First Workshop on Computa- tional Approaches to Code Switching, pages 116- 126.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "DCU-UVT: Word-level language classification with code-mixed data", |
|
"authors": [ |
|
{ |
|
"first": "Utsab", |
|
"middle": [], |
|
"last": "Barman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joachim", |
|
"middle": [], |
|
"last": "Wagner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Grzegorz", |
|
"middle": [], |
|
"last": "Chrupa\u0142a", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the First Workshop on Computational Approaches to Code Switching", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "127--132", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/W14-3915" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Utsab Barman, Joachim Wagner, Grzegorz Chrupa\u0142a, and Jennifer Foster. 2014. DCU-UVT: Word-level language classification with code-mixed data. In Proceedings of the First Workshop on Computa- tional Approaches to Code Switching, pages 127- 132, Doha, Qatar. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "HopeEDI: A multilingual hope speech detection dataset for equality, diversity, and inclusion", |
|
"authors": [ |
|
{ |
|
"first": "Chakravarthi", |
|
"middle": [], |
|
"last": "Bharathi Raja", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi. 2020. HopeEDI: A mul- tilingual hope speech detection dataset for equality, diversity, and inclusion. In Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Me- dia, pages 41-53, Barcelona, Spain (Online). Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A sentiment analysis dataset for codemixed Malayalam-English", |
|
"authors": [ |
|
{ |
|
"first": "Navya", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shardul", |
|
"middle": [], |
|
"last": "Jose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Suryawanshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"Philip" |
|
], |
|
"last": "Sherly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mc-Crae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--184", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Navya Jose, Shardul Suryawanshi, Elizabeth Sherly, and John Philip Mc- Crae. 2020. A sentiment analysis dataset for code- mixed Malayalam-English. In Proceedings of the 1st Joint Workshop on Spoken Language Technolo- gies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 177-184, Marseille, France. European Language Resources association.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Findings of the shared task on Hope Speech Detection for Equality, Diversity, and Inclusion", |
|
"authors": [ |
|
{ |
|
"first": "Vigneshwaran", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Muralidaran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi and Vigneshwaran Mural- idaran. 2021. Findings of the shared task on Hope Speech Detection for Equality, Diversity, and Inclu- sion. In Proceedings of the First Workshop on Lan- guage Technology for Equality, Diversity and Inclu- sion. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Unsupervised cross-lingual representation learning at scale", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kartikay", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Wenzek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8440--8451", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.747" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 8440- 8451, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Uncovering code-mixed challenges: A framework for linguistically driven question generation and neural based question answering", |
|
"authors": [ |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pabitra", |
|
"middle": [], |
|
"last": "Lenka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asif", |
|
"middle": [], |
|
"last": "Ekbal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 22nd Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "119--130", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deepak Gupta, Pabitra Lenka, Asif Ekbal, and Pushpak Bhattacharyya. 2018. Uncovering code-mixed chal- lenges: A framework for linguistically driven ques- tion generation and neural based question answering. In Proceedings of the 22nd Conference on Computa- tional Natural Language Learning, pages 119-130.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "IndicNLPSuite: Monolingual corpora, evaluation benchmarks and pre-trained multilingual language models for Indian languages", |
|
"authors": [ |
|
{ |
|
"first": "Divyanshu", |
|
"middle": [], |
|
"last": "Kakwani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Kunchukuttan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satish", |
|
"middle": [], |
|
"last": "Golla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Gokul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avik", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mitesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pratyush", |
|
"middle": [], |
|
"last": "Khapra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4948--4961", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.445" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Divyanshu Kakwani, Anoop Kunchukuttan, Satish Golla, Gokul N.C., Avik Bhattacharyya, Mitesh M. Khapra, and Pratyush Kumar. 2020. IndicNLPSuite: Monolingual corpora, evaluation benchmarks and pre-trained multilingual language models for Indian languages. In Findings of the Association for Com- putational Linguistics: EMNLP 2020, pages 4948- 4961, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Comparative studies of detecting abusive language on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Younghun", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seunghyun", |
|
"middle": [], |
|
"last": "Yoon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyomin", |
|
"middle": [], |
|
"last": "Jung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2nd Workshop on Abusive Language Online (ALW2)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "101--106", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-5113" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Younghun Lee, Seunghyun Yoon, and Kyomin Jung. 2018. Comparative studies of detecting abusive lan- guage on twitter. In Proceedings of the 2nd Work- shop on Abusive Language Online (ALW2), pages 101-106, Brussels, Belgium. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Kashmir: A computational analysis of the voice of peace", |
|
"authors": [ |
|
{ |
|
"first": "Shriphani", |
|
"middle": [], |
|
"last": "Palakodety", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashiqur", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Khudabukhsh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shriphani Palakodety, Ashiqur R. KhudaBukhsh, and Jaime G. Carbonell. 2019. Kashmir: A compu- tational analysis of the voice of peace. CoRR, abs/1909.12940.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "How multilingual is multilingual BERT?", |
|
"authors": [ |
|
{ |
|
"first": "Telmo", |
|
"middle": [], |
|
"last": "Pires", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Schlinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Garrette", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4996--5001", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1493" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual BERT? In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4996- 5001, Florence, Italy. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "A survey on hate speech detection using natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Wiegand", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Fifth International Workshop on Natural Language Processing for Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-1101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Schmidt and Michael Wiegand. 2017. A survey on hate speech detection using natural language pro- cessing. In Proceedings of the Fifth International Workshop on Natural Language Processing for So- cial Media, pages 1-10, Valencia, Spain. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Hate speech detection: A solved problem? the challenging case of long tail on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Ziqi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ziqi Zhang and Lei Luo. 2018. Hate speech detection: A solved problem? the challenging case of long tail on twitter. CoRR, abs/1803.03662.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Label 1: Hope Speech \u2022 Label 2: Not hope speech \u2022 Label 3: Not in intended language", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Architectural flowgraph of the our system", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"text": "Performance (Weighted F1-score) of our system on test set for various multilingual transformer models", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |