|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:12:06.285508Z" |
|
}, |
|
"title": "IIITT@LT-EDI-EACL2021-Hope Speech Detection: There is always Hope in Transformers", |
|
"authors": [ |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Puranik", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Information Technology Tiruchirappalli", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Adeep", |
|
"middle": [], |
|
"last": "Hande", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Information Technology Tiruchirappalli", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Priyadarshini", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "ULTRA Arts and Science College", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sajeetha", |
|
"middle": [], |
|
"last": "Thavareesan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Eastern University", |
|
"location": { |
|
"country": "Sri Lanka" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Bharathi", |
|
"middle": [ |
|
"Raja" |
|
], |
|
"last": "Chakravarthi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Ireland", |
|
"location": { |
|
"settlement": "Galway" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "In a world filled with serious challenges like climate change, religious and political conflicts, global pandemics, terrorism, and racial discrimination, an internet full of hate speech, abusive and offensive content is the last thing we desire for. In this paper, we work to identify and promote positive and supportive content on these platforms. We work with several transformer-based models to classify social media comments as hope speech or nothope speech in English, Malayalam and Tamil languages. This paper portrays our work for the Shared Task on Hope Speech Detection for Equality, Diversity, and Inclusion at LT-EDI 2021-EACL 2021. The codes for our best submission can be viewed 1 .", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "In a world filled with serious challenges like climate change, religious and political conflicts, global pandemics, terrorism, and racial discrimination, an internet full of hate speech, abusive and offensive content is the last thing we desire for. In this paper, we work to identify and promote positive and supportive content on these platforms. We work with several transformer-based models to classify social media comments as hope speech or nothope speech in English, Malayalam and Tamil languages. This paper portrays our work for the Shared Task on Hope Speech Detection for Equality, Diversity, and Inclusion at LT-EDI 2021-EACL 2021. The codes for our best submission can be viewed 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Social Media has inherently changed the way people interact and carry on with their everyday lives as people using the internet (Jose et al., 2020; . Due to the vast amount of data being available on social media applications such as YouTube, Facebook, an Twitter it has resulted in people stating their opinions in the form of comments that could imply hate or negative sentiment towards an individual or a community (Chakravarthi et al., 2020c; Mandl et al., 2020) . This results in people feeling hostile about certain posts and thus feeling very hurt (Bhardwaj et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 147, |
|
"text": "(Jose et al., 2020;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 446, |
|
"text": "(Chakravarthi et al., 2020c;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 447, |
|
"end": 466, |
|
"text": "Mandl et al., 2020)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 578, |
|
"text": "(Bhardwaj et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Being a free platform, social media runs on user-generated content. With people from multifarious backgrounds present, it creates a rich social structure (Kapoor et al., 2018) and has become an exceptional source of information. It has laid it's roots so deeply into the lives of people that they count on it for their every need. Regardless, this tends to mislead people in search of credible information. Certain individuals or ethnic groups also fall prey to people utilizing these platforms to foster destructive or harmful behaviour which is a common scenario in cyberbullying (Abaido, 2020).", |
|
"cite_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 175, |
|
"text": "(Kapoor et al., 2018)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The earliest inscription in India dated from 580 BCE was the Tamil inscription in pottery and then, the Asoka inscription in Prakrit, Greek and Aramaic dating from 260 BCE. Thunchaththu Ramanujan Ezhuthachan split Malayalam from Tamil after the 15th century CE by using Pallava Grantha script to write religious texts. Pallava Grantha was used in South India to write Sanskrit and foreign words in Tamil literature. Modern Tamil and Malayalam have their own script. However, people use the Latin script to write on social media (Chakravarthi et al., 2018 (Chakravarthi et al., , 2019 Chakravarthi, 2020b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 528, |
|
"end": 554, |
|
"text": "(Chakravarthi et al., 2018", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 583, |
|
"text": "(Chakravarthi et al., , 2019", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 584, |
|
"end": 604, |
|
"text": "Chakravarthi, 2020b)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The automatic detection of hateful, offensive, and unwanted language related to events and subjects on gender, religion, race or ethnicity in social media posts is very much necessary (Rizwan et al., 2020; Ghanghor et al., 2021a,b) . Such harmful content could spread, stimulate, and vindicate hatred, outrage, and prejudice against the targeted users. Removing such comments was never an option as it suppresses the freedom of speech of the user and it is highly unlikely to stop the person from posting more. In fact, he/she/they would be prompted to post more of such comments 2 (Yasaswini et al., 2021; Hegde et al., 2021) . This brings us to our goal to spread positivism and hope and identify such posts to strengthen an openminded, tolerant, and unprejudiced society. Badjatiya et al. (2017) . Hope is support, reassurance or any kind of positive reinforcement at the time of crisis (Chakravarthi, 2020a). Palakodety et al. (2020) identifies the need for the automatic detection of content that can eliminate hostility and bring about a sense of hope during times of wrangling and brink of a war between nations. There have also been works to identify hate speech in multilingual (Aluru et al., 2020) and code-mixed data in Tamil, Malayalam, and Kannada language (Chakravarthi et al., 2020b,a; Hande et al., 2020) . However, there have been very fewer works in Hope speech detection for Indian languages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 205, |
|
"text": "(Rizwan et al., 2020;", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 231, |
|
"text": "Ghanghor et al., 2021a,b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 582, |
|
"end": 606, |
|
"text": "(Yasaswini et al., 2021;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 607, |
|
"end": 626, |
|
"text": "Hegde et al., 2021)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 775, |
|
"end": 798, |
|
"text": "Badjatiya et al. (2017)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 913, |
|
"end": 937, |
|
"text": "Palakodety et al. (2020)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 1270, |
|
"end": 1300, |
|
"text": "(Chakravarthi et al., 2020b,a;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1301, |
|
"end": 1320, |
|
"text": "Hande et al., 2020)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The dataset is provided by (Chakravarthi, 2020a) (Chakravarthi and Muralidaran, 2021) and contains 59,354 comments from the famous online video sharing platform YouTube out of which 28,451 are in English, 20,198 in Tamil, and 10,705 comments are in Malayalam (Table 2 ) which can be classified as Hope speech, not hope speech and other languages. This dataset is split into train (80%), development (10%) and test (10%) dataset (Table 3) .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 259, |
|
"end": 267, |
|
"text": "(Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 428, |
|
"end": 437, |
|
"text": "(Table 3)", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Subjects like hope speech might raise confusions and disagreements between annotators belonging to different groups. The dataset was annotated by a minimum of three annotators and the inter-annotator agreement was determined using Krippendorff's alpha (krippendorff, 2011 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 252, |
|
"end": 271, |
|
"text": "(krippendorff, 2011", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this section, we give a detailed explanation of the experimental conditions upon which the models are developed. 4.1 Architecture", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment Setup", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The dense layers used in CNN (convolutional neural networks) connects all layers in the next layer with each other in a feed-forward fashion (Huang et al., 2018) . Though they have the same formulae as the linear layers i.e. wx+b, the output is passed through an activation function which is a non-linear function. We implemented our models with 2 dense layers, rectified linear units (ReLU) (Agarap, 2019) as the activation function and dropout of 0.4.", |
|
"cite_spans": [ |
|
{ |
|
"start": 141, |
|
"end": 161, |
|
"text": "(Huang et al., 2018)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dense", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "Bidirectional LSTM or biLSTM is a sequence processing model . It uses both the future and past input features at a time as it contains two LSTM's, one taking input in the forward direction and another in the backward direction . The backward and forward pass through the unfolded network just like any regular network. However, BiLSTM requires us to unfold the hidden states for every time step. It produces a drastic increase in the size of information being fed thus, improving the context available (Huang et al., 2015) . Refer Table 4 for the parameters used in the BiLSTM model. The pretrained BERT Multilingual model bertbase-multilingual-uncased (Pires et al., 2019) from Huggingface 4 (Wolf et al., 2020) is executed in PyTorch (Paszke et al., 2019) . It consists of 12layers, 768 hidden, 12 attention heads and 110M parameters which are fine-tuned by concatenating with bidirectional LSTM layers. The BiLSTM layers take the embeddings from the transformer encoder as the input which increases the information being fed, which in turn betters the context and accuracy. Adam algorithm with weight decay fix is used as an optimizer. We train our models with the default learning rate of 2e \u2212 5. We use the cross-entropy loss as it is a multilabel classification task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 502, |
|
"end": 522, |
|
"text": "(Huang et al., 2015)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 653, |
|
"end": 673, |
|
"text": "(Pires et al., 2019)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 693, |
|
"end": 712, |
|
"text": "(Wolf et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 736, |
|
"end": 757, |
|
"text": "(Paszke et al., 2019)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 531, |
|
"end": 538, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Bidirectional LSTM", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "It has a similar architecture as that of BERT but due to memory limitations and longer training periods, ALBERT or A Lite BERT introduces two parameter reduction techniques (Chiang et al., 2020) . ALBERT distinguishes itself from BERT with features like factorization of the embedding matrix, cross-layer parameter sharing and intersentence coherence prediction. We implemented albert-base-v2 pretrained model with 12 repeating layers, 768 hidden, 12 attention heads, and 12M parameters for the English dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 194, |
|
"text": "(Chiang et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ALBERT", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "DistilBERT is a distilled version of BERT to make it smaller, cheaper, faster, and lighter (Sanh et al., 2019) . With up to 40% less number of parameters than bert-base-uncased, it promises to run 60% faster while preserving 97% of it's performance. We employ distilbert-baseuncased for the English dataset and distilbertbase-multilingual-cased for the Tamil and Malayalam datasets. Both models have 6-layers, 768hidden, 12-heads and while the former has 66M parameters, the latter has 134M parameters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 110, |
|
"text": "(Sanh et al., 2019)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DistilBERT", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "A Robustly optimized BERT Pretraining Approach (RoBERTa) is a modification of BERT (Liu et al., 2020) . RoBERTa is trained for longer, with larger batches on 1000% more data than BERT. The Next Sentence Prediction (NSP) task employed in BERT's pre-training is removed and dynamic masking during training is introduced. It's additionally trained on a 76 GB large new dataset (CC-NEWS). roberta-base follows the BERT architecture but has 125M parameters and is used for the English dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 101, |
|
"text": "(Liu et al., 2020)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "RoBERTa", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "CharacterBERT (CharBERT) (El Boukkouri et al., 2020) is a variant of BERT (Devlin et al., 2019) which uses CharacterCNN (Zhang et al., 2015) like ELMo (Peters et al., 2018) , instead of relying on WordPieces (Wu et al., 2016) . CharacterBERT is highly desired as it produces a single embedding for any input token which is more suitable than having an inconstant number of WordPiece vectors for each token. It furthermore replaces BERT from domain-specific wordpiece vocabulary and enables it to be more robust to noisy inputs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 95, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 120, |
|
"end": 140, |
|
"text": "(Zhang et al., 2015)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 151, |
|
"end": 172, |
|
"text": "(Peters et al., 2018)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 208, |
|
"end": 225, |
|
"text": "(Wu et al., 2016)", |
|
"ref_id": "BIBREF46" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CharacterBERT", |
|
"sec_num": "4.2.5" |
|
}, |
|
{ |
|
"text": "We use the pretrained model generalcharacter-bert 5 which was pretrained on the same corpus of that of BERT, but with a different tokenization approach. A CharacterCNN module is used that produces word-level contextual representations and it can be re-adapted to any domain without needing to worry about the suitability of any wordpieces (Figure 1 ). This approach helps for superior robustness by approaching the character of the inputs.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 339, |
|
"end": 348, |
|
"text": "(Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "CharacterBERT", |
|
"sec_num": "4.2.5" |
|
}, |
|
{ |
|
"text": "Universal Language Model Fine-tuning, or ULM-FiT, was a transfer learning method introduced to perform various NLP tasks (Howard and Ruder, 2018) . Training of ULMFiT involves pretraining the general language model on a Wikipedia-based corpus, fine-tuning the language model on a target text, and finally, fine-tuning the classifier on the target task. Discriminative fine-tuning is applied to fine-tune the model as different layers capture the different extent of information. It is then trained using the learning rate scheduling strategy, Slanted triangular learning rates (STLR), where the learning rate increases initially and then drops. Gradual unfreezing is used to fine-tune the target classifier rather than training all layers at once, which might lead to catastrophic forgetting.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 145, |
|
"text": "(Howard and Ruder, 2018)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ULMFiT", |
|
"sec_num": "4.2.6" |
|
}, |
|
{ |
|
"text": "Pretrained model, AWD-LSTM (Merity et al., 2017) with 3 layers and 1150 hidden activation per layer and an embedding size of 400 is used as the language model for the English dataset. Adam optimizer with \u03b2 1 = 0.9 and \u03b2 2 = 0.99 is implemented. Later, the start and end learning rates are set to 1e-8 and 1e-2 respectively and fine-tuned by gradually unfreezing the layers to produce better results. Dropouts with a multiplier of 0.5 were applied.", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 48, |
|
"text": "(Merity et al., 2017)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ULMFiT", |
|
"sec_num": "4.2.6" |
|
}, |
|
{ |
|
"text": "XLM-RoBERTa (Ruder et al., 2019 ) is a pretrained multilingual language model to execute diverse NLP transfer tasks. It's trained on over 2TB of filtered CommonCrawl data in 100 different languages. It was an update to the XLM-100 model (Lample and Conneau, 2019) but with increased training data. As it shares the same train-ing routine with the RoBERTa model, \"RoBERTa\" was included in the name.", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 31, |
|
"text": "(Ruder et al., 2019", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 263, |
|
"text": "(Lample and Conneau, 2019)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "XLM-RoBERTa", |
|
"sec_num": "4.2.7" |
|
}, |
|
{ |
|
"text": "xlm-roberta-base with 12 layers, 768 hidden, 12 heads, and 270M parameters were used. It is fine-tuned for classifying code-mixed Tamil and Malayalam datasets. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "XLM-RoBERTa", |
|
"sec_num": "4.2.7" |
|
}, |
|
{ |
|
"text": "IndicBERT (Kakwani et al., 2020 ) is an ALBERT model pretrained on 12 major Indian languages with a corpus of over 9 billion tokens. It performs as well as other multilingual models with considerably fewer parameters for various NLP tasks. It's trained by choosing a single model for all languages to learn the relationship between languages and understand code-mixed data. ai4bharat/indic-bert model was employed for the Tamil and Malayalam task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 31, |
|
"text": "(Kakwani et al., 2020", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "IndicBERT", |
|
"sec_num": "4.2.9" |
|
}, |
|
{ |
|
"text": "In this section, we have compared the F1-scores of our transformer-based models to successfully classify social media comments/posts into hope speech or not hope speech and detect the usage of other languages if any. We have tabulated the weighted average F1-scores of our various models for validation and test dataset for English, Malayalam and Tamil languages in tables 5, 6 and 7 respectively. Table 5 demonstrates that the character-aware model CharacterBERT performed exceptionally well for the validation dataset. It beat ULMFiT (Howard and Ruder, 2018 ) by a mere difference of 0.0012, but other BERT-based models like BERT (Devlin et al., 2019) with dense and BiLSTM architecture, ALBERT (Chiang et al., 2020) , Distil-BERT (Sanh et al., 2019) and RoBERTa (Liu et al., 2020) by about a percent. This promising result shown by character-bert for the validation dataset made it our best model. Unfortunately, few models managed to perform better than it for the test dataset. The considerable class imbalance of about 2,484 hope to 25,940 not hope comments and the interference of comments in other languages have significantly affected the results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 536, |
|
"end": 559, |
|
"text": "(Howard and Ruder, 2018", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 632, |
|
"end": 653, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 697, |
|
"end": 718, |
|
"text": "(Chiang et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 733, |
|
"end": 752, |
|
"text": "(Sanh et al., 2019)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 765, |
|
"end": 783, |
|
"text": "(Liu et al., 2020)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 398, |
|
"end": 405, |
|
"text": "Table 5", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Similar transformer-based model trained on multilingual data was used to classify Malayalam and Tamil datasets. Models like multilingual BERT, XLM-RoBERTa (Ruder et al., 2019) , MuRIL, IndicBERT 10 and DistilBERT multilingual with both BiLSTM and Dense architectures. mBERT (Multilingual BERT) uncased with BiL-STM concatenated to it outperformed the other models for the Malayalam validation dataset and continued its dominance for the test data as well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 175, |
|
"text": "(Ruder et al., 2019)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The data distribution for the Tamil dataset seemed a bit balanced with an approximate ratio of 4:5 between hope and not-hope. mBERT cased with BiLSTM architecture appeared to be the best model with an F1-score of 0.6183 for validation but dropped drastically by 8% for the test data. We witnessed a considerable fall in the scores of other models like mBERT and XLM-RoBERTa with linear layers of up to 15%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Multilingual comments experience an enor-mous variety of text as people tend to write in code-mixed data and other non-native scripts which are inclined to be mispredicted. A variation in the concentration of such comments between train, validation and test can result in a fluctuation in the test results. The precision, recall and F1-scores of CharacterBERT, mBERTuncased, and mBERT-cased are tabulated under English, Malayalam, and Tamil respectively, as shown in Table 8 . They were the best performing models on the validation set.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 467, |
|
"end": 474, |
|
"text": "Table 8", |
|
"ref_id": "TABREF11" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "During these unprecedented times, there is a need to detect positive, enjoyable content on social media in order to help people who are combating depression, anxiety, melancholy, etc. This paper presents several methodologies that can detect hope in social media comments. We have traversed through transfer learning of several stateof-the-art transformer models for languages such as English, Tamil, and Malayalam. Due to its superior fine-tuning method, ULMFiT achieves an F1-score of 0.9356 on English data. We observe that mBERT achieves 0.8545 on Malayalam test set and distilmBERT achieves 0.5926 weighted F1-score on Tamil test set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://github.com/karthikpuranik11/ Hope-Speech-Detection-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.qs.com/negative-comments-on-socialmedia/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.ala.org/advocacy/ intfreedom/hate(Accessed January 16, 2021)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://huggingface.co/transformers/ pretrained_models.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/helboukkouri/ character-bert", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://tfhub.dev/google/MuRIL/1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://commoncrawl.org/the-data/ 8 http://lotus.kuee.kyoto-u.ac.jp/WAT/ indic-multilingual/index.html 9 https://github.com/ google-research-datasets/dakshina", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://indicnlp.ai4bharat.org/indic-bert/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Cyberbullying on social media platforms among university students in the united arab emirates", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Ghada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Abaido", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Journal of Adolescence and Youth", |
|
"volume": "25", |
|
"issue": "1", |
|
"pages": "407--420", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1080/02673843.2019.1669059" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ghada M. Abaido. 2020. Cyberbullying on social media platforms among university students in the united arab emirates. International Journal of Ado- lescence and Youth, 25(1):407-420.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Deep learning using rectified linear units (relu)", |
|
"authors": [ |
|
{ |
|
"first": "Agarap", |
|
"middle": [], |
|
"last": "Abien Fred", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abien Fred Agarap. 2019. Deep learning using recti- fied linear units (relu).", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Deep learning models for multilingual hate speech detection", |
|
"authors": [ |
|
{ |
|
"first": "Binny", |
|
"middle": [], |
|
"last": "Sai Saketh Aluru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Punyajoy", |
|
"middle": [], |
|
"last": "Mathew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Animesh", |
|
"middle": [], |
|
"last": "Saha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mukherjee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sai Saketh Aluru, Binny Mathew, Punyajoy Saha, and Animesh Mukherjee. 2020. Deep learning models for multilingual hate speech detection.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Deep learning for hate speech detection in tweets", |
|
"authors": [ |
|
{ |
|
"first": "Pinkesh", |
|
"middle": [], |
|
"last": "Badjatiya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashank", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manish", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vasudeva", |
|
"middle": [], |
|
"last": "Varma", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 26th International Conference on World Wide Web Companion -WWW '17 Companion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3041021.3054223" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pinkesh Badjatiya, Shashank Gupta, Manish Gupta, and Vasudeva Varma. 2017. Deep learning for hate speech detection in tweets. Proceedings of the 26th International Conference on World Wide Web Com- panion -WWW '17 Companion.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Amitava Das, and Tanmoy Chakraborty. 2020. Hostility detection dataset in hindi", |
|
"authors": [ |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Bhardwaj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shad", |
|
"middle": [], |
|
"last": "Md", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asif", |
|
"middle": [], |
|
"last": "Akhtar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ekbal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohit Bhardwaj, Md Shad Akhtar, Asif Ekbal, Ami- tava Das, and Tanmoy Chakraborty. 2020. Hostility detection dataset in hindi.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "HopeEDI: A multilingual hope speech detection dataset for equality, diversity, and inclusion", |
|
"authors": [ |
|
{ |
|
"first": "Chakravarthi", |
|
"middle": [], |
|
"last": "Bharathi Raja", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Third Workshop on Computational Modeling of People's", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi. 2020a. HopeEDI: A mul- tilingual hope speech detection dataset for equality, diversity, and inclusion. In Proceedings of the Third Workshop on Computational Modeling of People's", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Emotion's in Social Media", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Opinions, Personality, and Emotion's in Social Me- dia, pages 41-53, Barcelona, Spain (Online). Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Leveraging orthographic information to improve machine translation of under-resourced languages", |
|
"authors": [ |
|
{ |
|
"first": "Chakravarthi", |
|
"middle": [], |
|
"last": "Bharathi Raja", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi. 2020b. Leveraging ortho- graphic information to improve machine translation of under-resourced languages. Ph.D. thesis, NUI Galway.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Improving wordnets for underresourced languages using machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mihael", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Arcan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 9th Global Wordnet Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "77--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Mihael Arcan, and John P. McCrae. 2018. Improving wordnets for under- resourced languages using machine translation. In Proceedings of the 9th Global Wordnet Confer- ence, pages 77-86, Nanyang Technological Univer- sity (NTU), Singapore. Global Wordnet Association.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "WordNet gloss translation for underresourced languages using multilingual neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mihael", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Arcan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Second Workshop on Multilingualism at the Intersection of Knowledge Bases and Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Mihael Arcan, and John P. McCrae. 2019. WordNet gloss translation for under- resourced languages using multilingual neural ma- chine translation. In Proceedings of the Second Workshop on Multilingualism at the Intersection of Knowledge Bases and Machine Translation, pages 1-7, Dublin, Ireland. European Association for Ma- chine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A sentiment analysis dataset for codemixed Malayalam-English", |
|
"authors": [ |
|
{ |
|
"first": "Navya", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shardul", |
|
"middle": [], |
|
"last": "Jose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Suryawanshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"Philip" |
|
], |
|
"last": "Sherly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mc-Crae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--184", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Navya Jose, Shardul Suryawanshi, Elizabeth Sherly, and John Philip Mc- Crae. 2020a. A sentiment analysis dataset for code- mixed Malayalam-English. In Proceedings of the 1st Joint Workshop on Spoken Language Technolo- gies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 177-184, Marseille, France. European Language Resources association.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Findings of the shared task on Hope Speech Detection for Equality, Diversity, and Inclusion", |
|
"authors": [ |
|
{ |
|
"first": "Vigneshwaran", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Muralidaran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi and Vigneshwaran Mural- idaran. 2021. Findings of the shared task on Hope Speech Detection for Equality, Diversity, and Inclu- sion. In Proceedings of the First Workshop on Lan- guage Technology for Equality, Diversity and Inclu- sion. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Corpus creation for sentiment analysis in code-mixed Tamil-English text", |
|
"authors": [ |
|
{ |
|
"first": "Vigneshwaran", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Muralidaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"Philip" |
|
], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mc-Crae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "202--210", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Vigneshwaran Murali- daran, Ruba Priyadharshini, and John Philip Mc- Crae. 2020b. Corpus creation for sentiment anal- ysis in code-mixed Tamil-English text. In Pro- ceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced lan- guages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 202-210, Marseille, France. European Language Resources association.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Overview of the Track on Sentiment Analysis for Dravidian Languages in Code-Mixed Text", |
|
"authors": [ |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vigneshwaran", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shardul", |
|
"middle": [], |
|
"last": "Muralidaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Navya", |
|
"middle": [], |
|
"last": "Suryawanshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Jose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Sherly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "In Forum for Information Retrieval Evaluation", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "21--24", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3441501.3441515" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Ruba Priyadharshini, Vigneshwaran Muralidaran, Shardul Suryawanshi, Navya Jose, Elizabeth Sherly, and John P. McCrae. 2020c. Overview of the Track on Sentiment Analy- sis for Dravidian Languages in Code-Mixed Text. In Forum for Information Retrieval Evaluation, FIRE 2020, page 21-24, New York, NY, USA. Associa- tion for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Pretrained language model embryology: The birth of ALBERT", |
|
"authors": [ |
|
{ |
|
"first": "Cheng-Han", |
|
"middle": [], |
|
"last": "Chiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sung-Feng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hung-Yi", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.553" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cheng-Han Chiang, Sung-Feng Huang, and Hung-yi Lee. 2020. Pretrained language model embryology: The birth of ALBERT. In Proceedings of the 2020", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6813--6828", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 6813-6828, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "CharacterBERT: Reconciling ELMo and BERT for word-level open-vocabulary representations from characters", |
|
"authors": [ |
|
{ |
|
"first": "Hicham", |
|
"middle": [ |
|
"El" |
|
], |
|
"last": "Boukkouri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Ferret", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Lavergne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroshi", |
|
"middle": [], |
|
"last": "Noji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierre", |
|
"middle": [], |
|
"last": "Zweigenbaum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun'ichi", |
|
"middle": [], |
|
"last": "Tsujii", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6903--6915", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.coling-main.609" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hicham El Boukkouri, Olivier Ferret, Thomas Lavergne, Hiroshi Noji, Pierre Zweigenbaum, and Jun'ichi Tsujii. 2020. CharacterBERT: Reconciling ELMo and BERT for word-level open-vocabulary representations from characters. In Proceedings of the 28th International Conference on Compu- tational Linguistics, pages 6903-6915, Barcelona, Spain (Online). International Committee on Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A unified deep learning architecture for abuse detection", |
|
"authors": [ |
|
{ |
|
"first": "Antigoni-Maria", |
|
"middle": [], |
|
"last": "Founta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Despoina", |
|
"middle": [], |
|
"last": "Chatzakou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Kourtellis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Blackburn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antigoni-Maria Founta, Despoina Chatzakou, Nicolas Kourtellis, Jeremy Blackburn, Athena Vakali, and Il- ias Leontiadis. 2018. A unified deep learning archi- tecture for abuse detection.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Ruba Priyadharshini, and Bharathi Raja Chakravarthi. 2021a. IIITK@DravidianLangTech-EACL2021: Offensive Language Identification and Meme Classification in Tamil, Malayalam and Kannada", |
|
"authors": [ |
|
{ |
|
"first": "Parameswari", |
|
"middle": [], |
|
"last": "Nikhil Kumar Ghanghor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sajeetha", |
|
"middle": [], |
|
"last": "Krishnamurthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Thavareesan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikhil Kumar Ghanghor, Parameswari Krishna- murthy, Sajeetha Thavareesan, Ruba Priyad- harshini, and Bharathi Raja Chakravarthi. 2021a. IIITK@DravidianLangTech-EACL2021: Offensive Language Identification and Meme Classification in Tamil, Malayalam and Kannada. In Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "IIITK@LT-EDI-EACL2021: Hope Speech Detection for Equality, Diversity, and Inclusion in Tamil, Malayalam and English", |
|
"authors": [ |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Nikhil Kumar Ghanghor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prasanna", |
|
"middle": [], |
|
"last": "Ponnusamy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Kumar Kumaresan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sajeetha", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bharathi Raja", |
|
"middle": [], |
|
"last": "Thavareesan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chakravarthi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Language Technology for Equality", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikhil Kumar Ghanghor, Rahul Ponnusamy, Prasanna Kumar Kumaresan, Ruba Priyad- harshini, Sajeetha Thavareesan, and Bharathi Raja Chakravarthi. 2021b. IIITK@LT-EDI-EACL2021: Hope Speech Detection for Equality, Diversity, and Inclusion in Tamil, Malayalam and English. In Proceedings of the First Workshop on Language Technology for Equality, Diversity and Inclusion, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "KanCMD: Kannada CodeMixed dataset for sentiment analysis and offensive language detection", |
|
"authors": [ |
|
{ |
|
"first": "Adeep", |
|
"middle": [], |
|
"last": "Hande", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bharathi Raja", |
|
"middle": [], |
|
"last": "Chakravarthi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "54--63", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adeep Hande, Ruba Priyadharshini, and Bharathi Raja Chakravarthi. 2020. KanCMD: Kannada CodeMixed dataset for sentiment analysis and offensive language detection. In Proceedings of the Third Workshop on Computational Modeling of Peo- ple's Opinions, Personality, and Emotion's in Social Media, pages 54-63, Barcelona, Spain (Online). Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "UVCE-IIITT@DravidianLangTech-EACL2021: Tamil Troll Meme Classification: You need to Pay more Attention", |
|
"authors": [ |
|
{ |
|
"first": "Adeep", |
|
"middle": [], |
|
"last": "Siddhanth U Hegde", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Hande", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sajeetha", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bharathi Raja", |
|
"middle": [], |
|
"last": "Thavareesan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chakravarthi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siddhanth U Hegde, Adeep Hande, Ruba Priyadharshini, Sajeetha Thavareesan, and Bharathi Raja Chakravarthi. 2021. UVCE- IIITT@DravidianLangTech-EACL2021: Tamil Troll Meme Classification: You need to Pay more Attention. In Proceedings of the First Workshop on Speech and Language Technologies for Dra- vidian Languages. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Universal language model fine-tuning for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Howard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "328--339", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1031" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeremy Howard and Sebastian Ruder. 2018. Universal language model fine-tuning for text classification. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 328-339, Melbourne, Aus- tralia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Bidirectional lstm-crf models for sequence tagging", |
|
"authors": [ |
|
{ |
|
"first": "Zhiheng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiheng Huang, Wei Xu, and Kai Yu. 2015. Bidirec- tional lstm-crf models for sequence tagging.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A Survey of Current Datasets for Code-Switching Research", |
|
"authors": [ |
|
{ |
|
"first": "Navya", |
|
"middle": [], |
|
"last": "Jose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shardul", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Suryawanshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Sherly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mc-Crae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 6th International Conference on Advanced Computing and Communication Systems (ICACCS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "136--141", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICACCS48705.2020.9074205" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Navya Jose, Bharathi Raja Chakravarthi, Shardul Suryawanshi, Elizabeth Sherly, and John P. Mc- Crae. 2020. A Survey of Current Datasets for Code- Switching Research. In 2020 6th International Con- ference on Advanced Computing and Communica- tion Systems (ICACCS), pages 136-141.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages", |
|
"authors": [ |
|
{ |
|
"first": "Divyanshu", |
|
"middle": [], |
|
"last": "Kakwani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Kunchukuttan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Satish", |
|
"middle": [], |
|
"last": "Golla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Gokul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avik", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Mitesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pratyush", |
|
"middle": [], |
|
"last": "Khapra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Findings of EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Divyanshu Kakwani, Anoop Kunchukuttan, Satish Golla, Gokul N.C., Avik Bhattacharyya, Mitesh M. Khapra, and Pratyush Kumar. 2020. IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for In- dian Languages. In Findings of EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Advances in social media research: Past, present and future. Information Systems Frontiers", |
|
"authors": [ |
|
{ |
|
"first": "Kawaljeet", |
|
"middle": [], |
|
"last": "Kapoor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kuttimani", |
|
"middle": [], |
|
"last": "Tamilmani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nripendra", |
|
"middle": [], |
|
"last": "Rana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushp", |
|
"middle": [], |
|
"last": "Patil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yogesh", |
|
"middle": [], |
|
"last": "Dwivedi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sridhar", |
|
"middle": [], |
|
"last": "Nerur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1920, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s10796-017-9810-y" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kawaljeet Kapoor, Kuttimani Tamilmani, Nripendra Rana, Pushp Patil, Yogesh Dwivedi, and Sridhar Nerur. 2018. Advances in social media research: Past, present and future. Information Systems Fron- tiers, 20.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Computing krippendorff's alpha-reliability", |
|
"authors": [], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "klaus krippendorff. 2011. Computing krippendorff's alpha-reliability.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Crosslingual language model pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample and Alexis Conneau. 2019. Cross- lingual language model pretraining.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Ro{bert}a: A robustly optimized {bert} pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2020. Ro{bert}a: A robustly optimized {bert} pretraining approach.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Overview of the HASOC Track at FIRE 2020: Hate Speech and Offensive Language Identification in Tamil", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Mandl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandip", |
|
"middle": [], |
|
"last": "Modha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anand", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bharathi Raja Chakravarthi ;", |
|
"middle": [], |
|
"last": "Malayalam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hindi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "German", |
|
"middle": [], |
|
"last": "English", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Forum for Information Retrieval Evaluation", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "29--32", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3441501.3441517" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Mandl, Sandip Modha, Anand Kumar M, and Bharathi Raja Chakravarthi. 2020. Overview of the HASOC Track at FIRE 2020: Hate Speech and Of- fensive Language Identification in Tamil, Malay- alam, Hindi, English and German. In Forum for Information Retrieval Evaluation, FIRE 2020, page 29-32, New York, NY, USA. Association for Com- puting Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Regularizing and optimizing lstm language models", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Merity", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Shirish Keskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Merity, Nitish Shirish Keskar, and Richard Socher. 2017. Regularizing and optimizing lstm lan- guage models.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Hope speech detection: A computational analysis of the voice of peace", |
|
"authors": [ |
|
{ |
|
"first": "Shriphani", |
|
"middle": [], |
|
"last": "Palakodety", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashiqur", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Khudabukhsh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shriphani Palakodety, Ashiqur R. KhudaBukhsh, and Jaime G. Carbonell. 2020. Hope speech detection: A computational analysis of the voice of peace.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Pytorch: An imperative style, high-performance deep learning library", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Paszke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Massa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lerer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bradbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Chanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Killeen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeming", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Gimelshein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Antiga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alban", |
|
"middle": [], |
|
"last": "Desmaison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Kopf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Devito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Raison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alykhan", |
|
"middle": [], |
|
"last": "Tejani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sasank", |
|
"middle": [], |
|
"last": "Chilamkurthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benoit", |
|
"middle": [], |
|
"last": "Steiner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junjie", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soumith", |
|
"middle": [], |
|
"last": "Chintala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "8026--8037", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Te- jani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. Pytorch: An imperative style, high-performance deep learn- ing library. In Advances in Neural Information Pro- cessing Systems, volume 32, pages 8026-8037. Cur- ran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Deep contextualized word representations", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Neumann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2227--2237", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1202" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "How multilingual is multilingual BERT?", |
|
"authors": [ |
|
{ |
|
"first": "Telmo", |
|
"middle": [], |
|
"last": "Pires", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Schlinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Garrette", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4996--5001", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1493" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual BERT? In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4996- 5001, Florence, Italy. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Named Entity Recognition for Code-Mixed Indian Corpus using Meta Embedding", |
|
"authors": [ |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mani", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Vegupatti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 6th International Conference on Advanced Computing and Communication Systems (ICACCS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "68--72", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICACCS48705.2020.9074379" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruba Priyadharshini, Bharathi Raja Chakravarthi, Mani Vegupatti, and John P. McCrae. 2020. Named Entity Recognition for Code-Mixed Indian Corpus using Meta Embedding. In 2020 6th International Conference on Advanced Computing and Communi- cation Systems (ICACCS), pages 68-72.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Hate-speech and offensive language detection in Roman Urdu", |
|
"authors": [ |
|
{ |
|
"first": "Hammad", |
|
"middle": [], |
|
"last": "Rizwan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muhammad", |
|
"middle": [ |
|
"Haroon" |
|
], |
|
"last": "Shakeel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asim", |
|
"middle": [], |
|
"last": "Karim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2512--2522", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.197" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hammad Rizwan, Muhammad Haroon Shakeel, and Asim Karim. 2020. Hate-speech and offensive lan- guage detection in Roman Urdu. In Proceedings of the 2020 Conference on Empirical Methods in Nat- ural Language Processing (EMNLP), pages 2512- 2522, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Unsupervised cross-lingual representation learning", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "31--38", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-4007" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Ruder, Anders S\u00f8gaard, and Ivan Vuli\u0107. 2019. Unsupervised cross-lingual representation learning. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts, pages 31-38, Florence, Italy. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter", |
|
"authors": [ |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Bidirectional recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Paliwal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "IEEE Transactions on Signal Processing", |
|
"volume": "45", |
|
"issue": "11", |
|
"pages": "2673--2681", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/78.650093" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Schuster and K. K. Paliwal. 1997. Bidirectional re- current neural networks. IEEE Transactions on Sig- nal Processing, 45(11):2673-2681.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Bidirectional recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Paliwal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "IEEE Trans. Signal Process", |
|
"volume": "45", |
|
"issue": "", |
|
"pages": "2673--2681", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Schuster and K. Paliwal. 1997. Bidirectional re- current neural networks. IEEE Trans. Signal Pro- cess., 45:2673-2681.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Thirty years of research into hate speech: topics of interest and their evolution", |
|
"authors": [ |
|
{ |
|
"first": "Alice", |
|
"middle": [], |
|
"last": "Tontodimamma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugenia", |
|
"middle": [], |
|
"last": "Nissi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Annalina", |
|
"middle": [], |
|
"last": "Sarra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lara", |
|
"middle": [], |
|
"last": "Fontanella", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Scientometrics", |
|
"volume": "126", |
|
"issue": "1", |
|
"pages": "157--179", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alice Tontodimamma, Eugenia Nissi, Annalina Sarra, and Lara Fontanella. 2021. Thirty years of research into hate speech: topics of interest and their evolu- tion. Scientometrics, 126(1):157-179.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Norouzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wolfgang", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Krikun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Klaus", |
|
"middle": [], |
|
"last": "Macherey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Klingner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Apurva", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Gouws", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Kato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taku", |
|
"middle": [], |
|
"last": "Kudo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Kazawa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Stevens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Kurian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nishant", |
|
"middle": [], |
|
"last": "Patil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Smith ; G", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Macduff", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Hughes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Oriol Vinyals", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Wu, Mike Schuster, Z. Chen, Quoc V. Le, Mo- hammad Norouzi, Wolfgang Macherey, M. Krikun, Yuan Cao, Q. Gao, Klaus Macherey, Jeff Klingner, Apurva Shah, M. Johnson, X. Liu, L. Kaiser, S. Gouws, Y. Kato, Taku Kudo, H. Kazawa, K. Stevens, G. Kurian, Nishant Patil, W. Wang, C. Young, J. Smith, Jason Riesa, Alex Rudnick, Oriol Vinyals, G. S. Corrado, Macduff Hughes, and J. Dean. 2016. Google's neural machine translation system: Bridging the gap between human and ma- chine translation. ArXiv, abs/1609.08144.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Sajeetha Thavareesan, and Bharathi Raja Chakravarthi. 2021. IIITT@DravidianLangTech-EACL2021: Transfer Learning for Offensive Language Detection in Dravidian Languages", |
|
"authors": [ |
|
{ |
|
"first": "Konthala", |
|
"middle": [], |
|
"last": "Yasaswini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Puranik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adeep", |
|
"middle": [], |
|
"last": "Hande", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Konthala Yasaswini, Karthik Puranik, Adeep Hande, Ruba Priyadharshini, Sajeetha Thava- reesan, and Bharathi Raja Chakravarthi. 2021. IIITT@DravidianLangTech-EACL2021: Transfer Learning for Offensive Language Detection in Dravidian Languages. In Proceedings of the First Workshop on Speech and Language Technolo- gies for Dravidian Languages. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Character-level convolutional networks for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "649--657", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text clas- sification. In Advances in Neural Information Pro- cessing Systems, volume 28, pages 649-657. Curran Associates, Inc.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Context-independent representations in BERT and CharacterBERT (Source: ElBoukkouri et al. (2020))" |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"content": "<table><tr><td/><td>Tamil</td><td>Not hope</td></tr><tr><td>I can't uninstall mY Pubg</td><td>Tamil</td><td>Not Tamil</td></tr><tr><td>ooororutharum avarude ishtam pole jeevikatte . k.</td><td>Malayalam</td><td>Hope</td></tr><tr><td>Etraem aduthu nilkallae Arunae</td><td>Malayalam</td><td>Not hope</td></tr><tr><td>Phoenix contact me give you're mail I'd I hope I can support you sure!</td><td colspan=\"2\">Malayalam Not Malayalam</td></tr><tr><td colspan=\"2\">Table 1: Examples of hope speech or not hope speech</td><td/></tr><tr><td>2 Related Works</td><td/><td/></tr><tr><td>The need for the segregation of toxic comments</td><td/><td/></tr><tr><td>from social media platforms has been identified</td><td/><td/></tr><tr><td>back in the day. Founta et al. (2018) has tried</td><td/><td/></tr><tr><td>to study the textual properties and behaviour of</td><td/><td/></tr><tr><td>abusive postings on Twitter using a Unified Deep</td><td/><td/></tr><tr><td>Learning Architecture. Hate speech can be classi-</td><td/><td/></tr><tr><td>fied into various categories like hatred against an</td><td/><td/></tr><tr><td>individual or group belonging to a race, religion,</td><td/><td/></tr><tr><td>skin colour, ethnicity, gender, disability, or nation 3</td><td/><td/></tr><tr><td>and there have been studies to observe it's evolu-</td><td/><td/></tr><tr><td>tion in social media over the past thirty years (Ton-</td><td/><td/></tr><tr><td>todimamma et al., 2021). Deep Learning methods</td><td/><td/></tr><tr><td>were used to classify hate speech into racist, sexist</td><td/><td/></tr><tr><td>or neither in</td><td/><td/></tr></table>", |
|
"html": null, |
|
"text": "TextLanguage Label God gave us a choice my choice is to love, I would die for that kid English Hope The Democrats are.Backed powerful rich people like Soros English Not hope ESTE PSIC\u00c3\"PATA MAS\u00c3\"N LUCIFERIANO ES HOMBRE TRANS English Not English Neega podara vedio nalla iruku ana subtitle vainthuchu ahh yella language papaga Tamil Hope Avan matum enkita maatunan... Avana kolla paniduven", |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table><tr><td>Class</td><td>English</td><td>Tamil</td><td>Malayalam</td></tr><tr><td>Hope</td><td>2,484</td><td>7,899</td><td>2,052</td></tr><tr><td>Not Hope</td><td>25,940</td><td>9,816</td><td>7,765</td></tr><tr><td>Other lang</td><td>27</td><td>2,483</td><td>888</td></tr><tr><td>Total</td><td>28,451</td><td>20,198</td><td>10,705</td></tr></table>", |
|
"html": null, |
|
"text": "). Refer table 1 for examples of hope speech, not hope speech and other languages for English, Tamil and Malayalam datasets respectively.", |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"content": "<table><tr><td>Split</td><td>English</td><td>Tamil</td><td>Malayalam</td></tr><tr><td>Training</td><td>22,762</td><td>16,160</td><td>8564</td></tr><tr><td>Development</td><td>2,843</td><td>2,018</td><td>1070</td></tr><tr><td>Test</td><td>2,846</td><td>2,020</td><td>1071</td></tr><tr><td>Total</td><td>28,451</td><td>20,198</td><td>10,705</td></tr></table>", |
|
"html": null, |
|
"text": "Classwise Data Distribution", |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"content": "<table><tr><td>4.2 Embeddings</td></tr><tr><td>4.2.1 BERT</td></tr><tr><td>Bidirectional Encoder Representations from</td></tr><tr><td>Transformers (BERT) (Devlin et al., 2019). The</td></tr><tr><td>multilingual base model is pretrained on the</td></tr><tr><td>top 104 languages of the world on Wikipedia</td></tr><tr><td>(2.5B words) with 110 thousand shared wordpiece</td></tr><tr><td>vocabulary. The input is encoded into vectors with</td></tr><tr><td>BERT's innovation of bidirectionally training the</td></tr><tr><td>language model which catches a deeper context</td></tr><tr><td>and flow of the language. Furthermore, novel</td></tr><tr><td>tasks like Next Sentence Prediction (NSP) and</td></tr><tr><td>Masked Language Modelling (MLM) are used to</td></tr><tr><td>train the model.</td></tr></table>", |
|
"html": null, |
|
"text": "Parameters for the BiLSTM model", |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"content": "<table><tr><td>Architecture</td><td colspan=\"3\">Embeddings F1-Score validation F1-Score test</td></tr><tr><td/><td>mbert-uncased</td><td>0.8436</td><td>0.8545</td></tr><tr><td>BiLSTM</td><td>mbert-cased xlm-roberta-base</td><td>0.8280 0.8271</td><td>0.8482 0.8233</td></tr><tr><td/><td>MuRIL</td><td>0.8089</td><td>0.8212</td></tr><tr><td/><td>mbert-uncased</td><td>0.8373</td><td>0.8433</td></tr><tr><td/><td>indic-bert</td><td>0.7719</td><td>0.8264</td></tr><tr><td>Dense</td><td>xlm-roberta-base</td><td>0.7757</td><td>0.7001</td></tr><tr><td/><td>distilmbert-cased</td><td>0.8312</td><td>0.8395</td></tr><tr><td/><td>MuRIL</td><td>0.8023</td><td>0.8187</td></tr></table>", |
|
"html": null, |
|
"text": "Weighted F1-scores of hope speech detection classifier models on English dataset", |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"content": "<table><tr><td>Architecture</td><td colspan=\"3\">Embeddings F1-Score Validation F1-Score test</td></tr><tr><td/><td>mbert-uncased</td><td>0.6124</td><td>0.5601</td></tr><tr><td>BiLSTM</td><td>mbert-cased xlm-roberta-base</td><td>0.6183 0.5472</td><td>0.5297 0.5738</td></tr><tr><td/><td>MuRIL</td><td>0.5802</td><td>0.5463</td></tr><tr><td/><td>mbert-uncased</td><td>0.5916</td><td>0.4473</td></tr><tr><td/><td>mbert-cased</td><td>0.5946</td><td>0.4527</td></tr><tr><td>Dense</td><td>indic-bert xlm-roberta-base</td><td>0.5609 0.5481</td><td>0.5785 0.3936</td></tr><tr><td/><td>distilmbert-cased</td><td>0.6034</td><td>0.5926</td></tr><tr><td/><td>MuRIL</td><td>0.5504</td><td>0.5291</td></tr></table>", |
|
"html": null, |
|
"text": "Weighted F1-scores of hope speech detection classifier model on Malayalam dataset", |
|
"type_str": "table" |
|
}, |
|
"TABREF9": { |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Weighted F1-scores of hope speech detection classifier models on Tamil dataset", |
|
"type_str": "table" |
|
}, |
|
"TABREF11": { |
|
"num": null, |
|
"content": "<table><tr><td>Wikipedia, Common Crawl 7 , PMINDIA 8 and</td></tr><tr><td>Dakshina 9 datasets. MuRIL is trained on trans-</td></tr><tr><td>lation and transliteration segment pairs which give</td></tr><tr><td>an advantage as the transliterated text is very com-</td></tr><tr><td>mon in social media. It is used for the Malayalam</td></tr><tr><td>and Tamil datasets.</td></tr></table>", |
|
"html": null, |
|
"text": "Classification report for our system models based on the results of test set", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |