|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:20:57.812284Z" |
|
}, |
|
"title": "Findings of the Shared Task on Troll Meme Classification in Tamil", |
|
"authors": [ |
|
{ |
|
"first": "Shardul", |
|
"middle": [], |
|
"last": "Suryawanshi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Ireland Galway Galway", |
|
"location": { |
|
"country": "Ireland" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Bharathi", |
|
"middle": [ |
|
"Raja" |
|
], |
|
"last": "Chakravarthi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Ireland Galway Galway", |
|
"location": { |
|
"country": "Ireland" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The internet has facilitated its user-base with a platform to communicate and express their views without any censorship. On the other hand, this freedom of expression or free speech can be abused by its user or a troll to demean an individual or a group. Demeaning people based on their gender, sexual orientation, religious believes or any other characteristics-trolling-could cause significant distress in the online community. Hence, the content posted by a troll needs to be identified and dealt with before causing any more damage. Amongst all the forms of troll content, memes are most prevalent due to their popularity and ability to propagate across cultures. A troll uses a meme to demean, attack or offend its targetted audience. In this shared task, we provide a resource (TamilMemes) that could be used to train a system capable of identifying a troll meme in the Tamil language. In our TamilMemes dataset, each meme has been categorized into either a \"troll\" or a \"not troll\" class. Along with the meme images, we also provided the Latin transcripted text from memes. We received ten system submissions from the participants, which were evaluated using the weighted average F1-score. The system with the weighted average F1-score of 0.55 secured the first rank.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The internet has facilitated its user-base with a platform to communicate and express their views without any censorship. On the other hand, this freedom of expression or free speech can be abused by its user or a troll to demean an individual or a group. Demeaning people based on their gender, sexual orientation, religious believes or any other characteristics-trolling-could cause significant distress in the online community. Hence, the content posted by a troll needs to be identified and dealt with before causing any more damage. Amongst all the forms of troll content, memes are most prevalent due to their popularity and ability to propagate across cultures. A troll uses a meme to demean, attack or offend its targetted audience. In this shared task, we provide a resource (TamilMemes) that could be used to train a system capable of identifying a troll meme in the Tamil language. In our TamilMemes dataset, each meme has been categorized into either a \"troll\" or a \"not troll\" class. Along with the meme images, we also provided the Latin transcripted text from memes. We received ten system submissions from the participants, which were evaluated using the weighted average F1-score. The system with the weighted average F1-score of 0.55 secured the first rank.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "We have seen a rise in the usage of memes on the internet. Memes could come in many forms and languages, but we emphasize on the image with text (IWT) memes (Du et al., 2020) in the Tamil language. IWT memes are inherently multimodal as their meaning is not understood when just the image or the text considered in isolation; hence, both image and text should be considered. These memes can propagate and mutate through cultures like the selfish gene (Dawkins, 2016) . Propagation and mutation of memes ensure widespread on the internet; hence they could be weaponized by trolls (Mandl et al., 2020) . Trolls are individuals or users on the internet who tend to attack or offend other individuals or groups in a demeaning manner (Tomaiuolo et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 174, |
|
"text": "(Du et al., 2020)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 451, |
|
"end": 466, |
|
"text": "(Dawkins, 2016)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 579, |
|
"end": 599, |
|
"text": "(Mandl et al., 2020)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 729, |
|
"end": 753, |
|
"text": "(Tomaiuolo et al., 2020)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A meme used by troll -troll meme-could come in any languages, and since languages are representative of cultures, a meme in one language could represent a culture. In this shared task, we provide the TamilMemes dataset that consists of 2,969 memes to the participants. Additionally, we also provided the text (caption) associated with each meme. The text has been transcripted in Latin by a native Tamil speaker, which was later evaluated and corrected by the expert.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Tamil (ISO 639-3: tam) is an official language in Tamil Nadu, Puducherry, Sri Lanka, Singapore and a recognised minority language in Malaysia and South Africa with a 75 million speaker base (Chakravarthi et al., 2018; Mahesan, 2019, 2020a,b) . Tamil inscription in pottery from Kodumanal and Porunthal 1 2 was the oldest inscription in India dating to 580 BCE then Asoka inscription in Prakrit, Greek and Aramaic dating to 260 BCE (Mahadevan, 2002; Rajakumar and Bharathi, 2012) . In Sanskrit, the oldest known inscriptions are from the 1st century CE, such as Dhana's Ayodhya Inscription and Ghosundi-Hathibada. The first book printed in India in an Indian language was Tampiran Vanakkam in Tamil on 1578 CE, a 16-page translation of the Portuguese \"Doctrina Christam\" (Balachandran, 2005) . In the modern Tamil script, there are 12 vowels, 18 consonants and one special character, the\u0101ytam (Chakravarthi, 2020b). The vowels and consonants combine in order to form 216 compound characters, giving a total of 247 characters. However, Tamil is often transcribed in Latin Script by a multilingual speaker on social media platforms (Chakravarthi, 2020a) . This behaviour could be attributed to the ease or comfort in scripting Tamil in Latin script (Chakravarthi et al., 2019; Hande et al., 2020) . Also, these multilingual speakers tend to switch to another language such as English. This phenomenon, also known as code-mixing or codeswitching is commonly observed amongst multilingual speakers (Jose et al., 2020; . A great deal of work and resources (Chakravarthi et al., 2020a,c) has been created for Dravidian languages but the multimodal aspect of code-mixed content in the form of meme remains unexplored.", |
|
"cite_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 217, |
|
"text": "(Chakravarthi et al., 2018;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 241, |
|
"text": "Mahesan, 2019, 2020a,b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 431, |
|
"end": 448, |
|
"text": "(Mahadevan, 2002;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 449, |
|
"end": 478, |
|
"text": "Rajakumar and Bharathi, 2012)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 770, |
|
"end": 790, |
|
"text": "(Balachandran, 2005)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1129, |
|
"end": 1150, |
|
"text": "(Chakravarthi, 2020a)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1246, |
|
"end": 1273, |
|
"text": "(Chakravarthi et al., 2019;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1274, |
|
"end": 1293, |
|
"text": "Hande et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1493, |
|
"end": 1512, |
|
"text": "(Jose et al., 2020;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1550, |
|
"end": 1580, |
|
"text": "(Chakravarthi et al., 2020a,c)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In recent years, we have seen a rise in awareness of tackling multimodal offensive (Sharma et al., 2020) or hate (Kiela et al., 2020) content in memes in the English language, but other under-resourced languages remain unexplored. Our \"Troll Meme Classification in Tamil\" task aims to promote research in multimodal troll meme classification in under-resourced Tamil language. Moreover, this task provides a unique opportunity to study the effect of code-switching or code-mixing in the English transcript of Tamil (Chakravarthi et al., 2020b) in association with the image from the meme.", |
|
"cite_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 104, |
|
"text": "(Sharma et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 133, |
|
"text": "(Kiela et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The goal of the \"Troll Meme Classification in Tamil\" shared task was to classify if a given meme is a \"troll\" or \"not-troll\" based on the image and text associated with the meme in the Tamil language. The text from the meme is written in either the Tamil grammar and English lexicon or English grammar and Tamil lexicon. However, for consistency, we transcripted the text in Latin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Troll meme is a meme, which consists of offensive text and non-offensive images, offensive images with non-offensive text, sarcastically offensive text with non-offensive images, or sarcastic images with offensive text to provoke, distract and has digressive or off-topic content with intend to demean or offend particular people, group or race, otherwise, a not-troll meme . Figure 1 shows examples of a troll and nottroll meme from the TamilMeme dataset. Example 1 is a troll meme targeted towards the potato chip brand called \"Lays\". In this example, an image is harmless with just a picture of the potato chips", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 376, |
|
"end": 384, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task Description", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Troll Not-troll Total Train 1,282 1,018 2,300 Test 395 272 667 Total 1,677 1,290 2,967 packet, but the translation of the text is \"If you buy one packet air, then 5 chips free\" which is offensive for the brand. The translation of Example 2 would be \"Sorry my friend (girl)\". This example does not contain any provoking or offensive image or text and hence, it is a not-troll meme. Previously, we developed this TamilMemes dataset and treated the task of identifying a troll meme as an image classification problem. Since the text associated with the meme acts as a context of the image, we enhanced our TamilMemes dataset by providing the text as a separate modality for the shared task. We expected our participant to approach the task in a multimodal way. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 81, |
|
"text": "Not-troll Total Train 1,282 1,018 2,300 Test 395 272 667 Total 1,677", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Split", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Precision Recall F1-score 1 Codewithzichao (Li, 2021) 0.57 0.60 0.55 2 IIITK (Ghanghor et al., 2021) 0.56 0.59 0.54 3 NLP@CUET (Hossain et al., 2021) 0.55 0.58 0.52 4 SSNCSE NLP (Silvia A and B, 2021) 0.58 0.60 0.50 5 Simon work (Que et al., 2021) 0.53 0.58 0.49 6 TrollMeta (J and HS, 2021) 0.45 0.41 0.48 7 UVCE-IIITT (Hegde et al., 2021) 0.60 0.60 0.46 8 cean 0.53 0.57 0.43 9 HUB (Huang and Bai, 2021) 0.50 0.54 0.40 10 iiit dwd (Mishra and Saumya, 2021) 0.52 0.59 0.30 Table 2 : The rank list with detailed report on Precision (P), Recall (R), F1-score (F1) of submissions for the \"Troll Meme Classification in Tamil\" task 3 Evaluation Table 1 shows the class distribution in the training and test set for the TamilMemes dataset. We provided a training set of 2,500 memes (with the Latin scripted text) to the participants. Later, we evaluated all the systems on the held-out or test set of 667 memes. We considered a weighted average version of the F1-score as a primary evaluation metric by taking class imbalance into the account. The weighted average F1-score 3 is calculated by averaging the support-weighted mean F1 score perclass.", |
|
"cite_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 53, |
|
"text": "(Li, 2021)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 77, |
|
"end": 100, |
|
"text": "(Ghanghor et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 127, |
|
"end": 149, |
|
"text": "(Hossain et al., 2021)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 200, |
|
"text": "(Silvia A and B, 2021)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 247, |
|
"text": "(Que et al., 2021)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 291, |
|
"text": "2021)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 335, |
|
"end": 340, |
|
"text": "2021)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 400, |
|
"end": 405, |
|
"text": "2021)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 453, |
|
"end": 458, |
|
"text": "2021)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 474, |
|
"end": 481, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 641, |
|
"end": 648, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Rank Team", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In system submissions, we saw a variety of methodologies used by the participants which were as simple as Logistic Regression (Wright, 1995) and as complex as BERT (Devlin et al., 2018) . By keeping multimodality in mind, most of the participants implemented a method that could leverage both text and image. Mostly, the image part has been processed using Convolutional Neural Network (CNN) such as ResNet152 (He et al., 2016) or the custom residual networks while the text has been processed RNN (LSTM (Hochreiter and Schmidhuber, 1997) ), transformer (BERT, ROBERTA (Liu et al., 2019) ). However, in one exceptional case, one participant has used a transformer for processing both modalities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 140, |
|
"text": "(Wright, 1995)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 164, |
|
"end": 185, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 410, |
|
"end": 427, |
|
"text": "(He et al., 2016)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 538, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 587, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Li (2021) utilized a transformer-based approach that leverages the pre-trained BERT and ResNet152 model to derive the text and image features. The novelty of the work 3 Weighted average F-1 score is calculated with the help of the sklearn classification report utility comes from the implementation of multimodal attention which considers the whole text caption in the context of the image by mapping both image and text features in the same semantic space. Their model achieved a 0.55 weighted average F1 score and ranked first in the shared task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 170, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Ghanghor et al. (2021) utilized a transformerbased approach to identify the troll and nottroll Tamil meme. The state of the art text classifier (BERT) and image (CNN) classifiers were used to extract useful attention features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 24, |
|
"text": "Ghanghor et al. (2021)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Their system submission for Tamil troll meme classification achieved a 0.54 weighted average F1 score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Hossain et al. (2021) utilized a systematic text-based, image-based and multimodal approach to identify a troll meme. The paper laid out a detailed investigation into the problem presented in the shared task by capturing visual and textual features using CNN, VGG16 (Simonyan and Zisserman, 2014) , Inception (Szegedy et al., 2017) , m-BERT, XLM-RoBERTa, XLNet (Yang et al., 2019) . Multimodal features were extracted by combining image (CNN, ResNet50, Inception) and text (BiLSTM) (Zhou et al., 2016) features using an early fusion technique. But the results showed that the text-based approach with XLNet achieved the highest weighted F1-score of 0.58, and enabled their system implementation to secure 3rd rank.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2, |
|
"end": 23, |
|
"text": "Hossain et al. (2021)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 268, |
|
"end": 298, |
|
"text": "(Simonyan and Zisserman, 2014)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 311, |
|
"end": 333, |
|
"text": "(Szegedy et al., 2017)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 382, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 439, |
|
"end": 444, |
|
"text": "(CNN,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 454, |
|
"text": "ResNet50,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 465, |
|
"text": "Inception)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 484, |
|
"end": 503, |
|
"text": "(Zhou et al., 2016)", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Silvia A and B (2021) used traditional NLP approaches such as Multilayer perceptron (MLP (Haykin and Network, 2004) ), Random forest (RF) (Breiman, 2001 ) and K-nearest neighbour (KNN ) (Cunningham and Delany, 2020) on the text features derived from tf-idf, count vector and mBERT embeddings. The paper opts for a text-based approach over the multimodal approach and achieved a weighted average F1-score of 0.50.", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 117, |
|
"text": "(Haykin and Network, 2004)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 154, |
|
"text": "(Breiman, 2001", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 217, |
|
"text": "(Cunningham and Delany, 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Que et al. 2021used a text-based approach to deal with the multimodal issue of identifying troll memes. Here the author trains XLM-Roberta in combination with the custom CNN on the text modality and achieves a weighted average F1-score of 0.49.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 J and HS 2021utilized ResNet-50 to identify if a given meme is a troll or not based on the visual features. However, no text modality has been used as a feature while training the system. Their system achieved a weighted average F1-score of 0.48 and ranked sixth.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Hegde et al. 2021utilized a transformerbased approach to identify the troll and nottroll Tamil meme. The state of the art text (mBERT) and image (Vision transformer) (Dosovitskiy et al., 2020) classifiers were used to extract useful attention features. They achieve a weighted average F1-score of 0.46 and ranked seventh.", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 194, |
|
"text": "(Dosovitskiy et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Huang and Bai (2021) used a BiGRU (Dey and Salem, 2017) and custom CNN to capture text and image features, which later are concatenated to form a multimodal representation of a meme. They ranked ninth with a weighted average F1-score of 0.40.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 57, |
|
"text": "(Dey and Salem, 2017)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Mishra and Saumya (2021) used a hybrid approach that combines text and image features using CNN and BiLSTM. Their proposed model obtained 10th rank in the shared task and reported a weighted F1-score of 0.30.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We have received 10 submissions for the shared task. Table 2 shows that the maximum weighted average F1-score could only reach 0.55, which is less. While the submissions claimed that the evaluation metric exceeded the validation set, the poor performance on the test set point towards overfitting.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 60, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Overfitting is commonly seen when complex models with large numbers of parameters are trained on small datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In this shared task, we provided 2,967 samples which might be less for the data-hungry models such as BERT and it's predecessor such as ROBERTa, XLM-RoBERTa. Moreover, due to the high dimensionality presented by the images, the traditional machine learning models such as logistic regression, Naive Bayes are not able to generalize. The poor performance of the traditional machine learning model shows that they are rather too simple to learn useful features from the high dimensional data presented in the form of image and text. Furthermore, the Tamil text (could be with or without code-mixed) from the meme is transcripted in the Latin script leaves little room for using pre-trained models (e.g. multilingual BERT) which are primarily trained in English or zero-shot transferred to other languages written in their native script. Hence, such multilingual models need to be finetuned on the more code-mixed, transliterated data. In the case of images, the visual features have been captured using a CNN pre-trained on the imagenet weights.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the previous study, we observed that the visual features captured by such CNNs tend to be less useful for identifying a troll Tamil meme. The same effect has been observed in the system submitted to the shared task. Overall, we need a robust approach that takes into account all the aspects of this multimodal classification problem -lack of data, high dimensionality, low resource language processing and code-mixing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the \"Troll Meme Classification in Tamil\" shared task, we presented a unique multimodal classification problem by providing a newly improved TamilMeme dataset which now has Tamil text (in the form of transcripted Latin text) from memes. This task was not only a multimodal classification problem but also posed challenges such as natural language processing of low-resourced language, transcripted code-mixed text. The submissions received from the participants showed multiple ways to approach the problem and we hope that their contribution along with our enriched dataset will kindle the research in this less explored area.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Grant Number SFI/12/RC/2289 P2 (Insight 2) and 13/RC/2106 P2 (ADAPT), co-funded by the European Regional Development Fund and Irish Research Council grant IRCLA/2017/129 (CARDAMOM-Comparative Deep Models of Language for Minority and Historical Languages).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Tamil inscriptions wiki 2 Iron Age -Early Historic Transition in South Indian Appraisal", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This publication is the outcome of the research supported in part by a research grant from Science Foundation Ireland (SFI) under", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Pioneers of Tamil Literature: Transition to Modernity. Indian Literature", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Balachandran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "49", |
|
"issue": "", |
|
"pages": "179--184", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R Balachandran. 2005. Pioneers of Tamil Literature: Transition to Modernity. Indian Literature, 49(2 (226):179-184.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Random forests. Machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Leo", |
|
"middle": [], |
|
"last": "Breiman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "", |
|
"volume": "45", |
|
"issue": "", |
|
"pages": "5--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leo Breiman. 2001. Random forests. Machine learn- ing, 45(1):5-32.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "HopeEDI: A multilingual hope speech detection dataset for equality, diversity, and inclusion", |
|
"authors": [ |
|
{ |
|
"first": "Chakravarthi", |
|
"middle": [], |
|
"last": "Bharathi Raja", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--53", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi. 2020a. HopeEDI: A mul- tilingual hope speech detection dataset for equality, diversity, and inclusion. In Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Me- dia, pages 41-53, Barcelona, Spain (Online). Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Leveraging orthographic information to improve machine translation of under-resourced languages", |
|
"authors": [ |
|
{ |
|
"first": "Chakravarthi", |
|
"middle": [], |
|
"last": "Bharathi Raja", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi. 2020b. Leveraging ortho- graphic information to improve machine translation of under-resourced languages. Ph.D. thesis, NUI Galway.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Improving wordnets for underresourced languages using machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mihael", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Arcan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 9th Global Wordnet Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "77--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Mihael Arcan, and John P. McCrae. 2018. Improving wordnets for under- resourced languages using machine translation. In Proceedings of the 9th Global Wordnet Conference, pages 77-86, Nanyang Technological University (NTU), Singapore. Global Wordnet Association.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "WordNet gloss translation for underresourced languages using multilingual neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mihael", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Arcan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Second Workshop on Multilingualism at the Intersection of Knowledge Bases and Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Mihael Arcan, and John P. McCrae. 2019. WordNet gloss translation for under- resourced languages using multilingual neural ma- chine translation. In Proceedings of the Second Workshop on Multilingualism at the Intersection of Knowledge Bases and Machine Translation, pages 1-7, Dublin, Ireland. European Association for Ma- chine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A sentiment analysis dataset for codemixed malayalam-english", |
|
"authors": [ |
|
{ |
|
"first": "Navya", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shardul", |
|
"middle": [], |
|
"last": "Jose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Suryawanshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"Philip" |
|
], |
|
"last": "Sherly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mc-Crae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "177--184", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Navya Jose, Shardul Suryawanshi, Elizabeth Sherly, and John Philip Mc- Crae. 2020a. A sentiment analysis dataset for code- mixed malayalam-english. In Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collab- oration and Computing for Under-Resourced Lan- guages (CCURL), pages 177-184.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Overview of the track on HASOC-Offensive Language Identification-DravidianCodeMix", |
|
"authors": [ |
|
{ |
|
"first": "Anand", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Philip Mccrae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Premjith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Soman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Mandl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Working Notes of the Forum for Information Retrieval Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, M Anand Kumar, John Philip McCrae, Premjith B, Soman KP, and Thomas Mandl. 2020b. Overview of the track on HASOC-Offensive Language Identification- DravidianCodeMix. In Working Notes of the Forum for Information Retrieval Evaluation (FIRE 2020).", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "CEUR Workshop Proceedings", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "CEUR Workshop Proceedings. In: CEUR-WS. org, Hyderabad, India.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Corpus creation for sentiment analysis in code-mixed Tamil-English text", |
|
"authors": [ |
|
{ |
|
"first": "Vigneshwaran", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Muralidaran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"Philip" |
|
], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mc-Crae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "202--210", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bharathi Raja Chakravarthi, Vigneshwaran Murali- daran, Ruba Priyadharshini, and John Philip Mc- Crae. 2020c. Corpus creation for sentiment anal- ysis in code-mixed Tamil-English text. In Pro- ceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced lan- guages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL), pages 202-210, Marseille, France. European Language Re- sources association.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The selfish gene", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Dawkins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Dawkins. 2016. The selfish gene. Oxford uni- versity press.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Gate-variants of gated recurrent unit (gru) neural networks", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Dey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Salem", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 IEEE 60th International Midwest Symposium on Circuits and Systems (MWSCAS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1597--1600", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/MWSCAS.2017.8053243" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R. Dey and F. M. Salem. 2017. Gate-variants of gated recurrent unit (gru) neural networks. In 2017 IEEE 60th International Midwest Symposium on Circuits and Systems (MWSCAS), pages 1597-1600.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "An image is worth 16x16 words: Transformers for image recognition at scale", |
|
"authors": [ |
|
{ |
|
"first": "Alexey", |
|
"middle": [], |
|
"last": "Dosovitskiy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucas", |
|
"middle": [], |
|
"last": "Beyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Kolesnikov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Weissenborn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohua", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Unterthiner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mostafa", |
|
"middle": [], |
|
"last": "Dehghani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Minderer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georg", |
|
"middle": [], |
|
"last": "Heigold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Gelly", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.11929" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. 2020. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Understanding visual memes: An empirical analysis of text superimposed on memes shared on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Yuhao", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muhammad", |
|
"middle": [ |
|
"Aamir" |
|
], |
|
"last": "Masood", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Joseph", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the International AAAI Conference on Web and Social Media", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "153--164", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuhao Du, Muhammad Aamir Masood, and Kenneth Joseph. 2020. Understanding visual memes: An empirical analysis of text superimposed on memes shared on twitter. In Proceedings of the Interna- tional AAAI Conference on Web and Social Media, volume 14, pages 153-164.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Offensive Language Identification and Meme Classification in Tamil, Malayalam and Kannada", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Iiitk@dravidianlangtech-Eacl2021", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "IIITK@DravidianLangtech-EACL2021: Offensive Language Identification and Meme Classification in Tamil, Malayalam and Kannada. In Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "KanCMD: Kannada CodeMixed dataset for sentiment analysis and offensive language detection", |
|
"authors": [ |
|
{ |
|
"first": "Adeep", |
|
"middle": [], |
|
"last": "Hande", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bharathi Raja", |
|
"middle": [], |
|
"last": "Chakravarthi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Third Workshop on Computational Modeling of People's Opinions, Personality, and Emotion's in Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "54--63", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adeep Hande, Ruba Priyadharshini, and Bharathi Raja Chakravarthi. 2020. KanCMD: Kannada CodeMixed dataset for sentiment analysis and offensive language detection. In Proceedings of the Third Workshop on Computational Modeling of Peo- ple's Opinions, Personality, and Emotion's in Social Media, pages 54-63, Barcelona, Spain (Online). Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "A comprehensive foundation", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Haykin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Network", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Neural networks", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon Haykin and N Network. 2004. A comprehen- sive foundation. Neural networks, 2(2004):41.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Deep residual learning for image recognition", |
|
"authors": [ |
|
{ |
|
"first": "Kaiming", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangyu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaoqing", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "770--778", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recog- nition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770- 778.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Uvceiiitt@DravidianLangTech-EACL2021: Tamil Troll Meme Classification: You need to Pay more Attention", |
|
"authors": [ |
|
{ |
|
"first": "Adeep", |
|
"middle": [], |
|
"last": "Siddhanth U Hegde", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Hande", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sajeetha", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bharathi Raja", |
|
"middle": [], |
|
"last": "Thavareesan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chakravarthi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siddhanth U Hegde, Adeep Hande, Ruba Priyadharshini, Sajeetha Thavareesan, and Bharathi Raja Chakravarthi. 2021. Uvce- iiitt@DravidianLangTech-EACL2021: Tamil Troll Meme Classification: You need to Pay more Attention. In Proceedings of the First Workshop on Speech and Language Technologies for Dra- vidian Languages. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "NLP-CUET@DravidianLangtech-EACL2021: Investigating Visual and Textual Features to Identify Trolls from Multimodal Social Media Memes", |
|
"authors": [ |
|
{ |
|
"first": "Eftekhar", |
|
"middle": [], |
|
"last": "Hossain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "Sharif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammed Moshiul", |
|
"middle": [], |
|
"last": "Hoque", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eftekhar Hossain, Omar Sharif, and Mo- hammed Moshiul Hoque. 2021. NLP- CUET@DravidianLangtech-EACL2021: Inves- tigating Visual and Textual Features to Identify Trolls from Multimodal Social Media Memes. In Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics. Bo Huang and Yang Bai. 2021.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Meme Classification for Tamil Text-Image Fusion", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hub@dravidianlangtech-Eacl2021", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "HUB@DravidianLangTech-EACL2021: Meme Classification for Tamil Text-Image Fusion. In Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Trollmeta@DravidianLangtech-EACL2021: Meme classification using deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Manoj", |
|
"middle": [], |
|
"last": "Balaji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Chinmaya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manoj Balaji J and Chinmaya HS. 2021. Troll- meta@DravidianLangtech-EACL2021: Meme clas- sification using deep learning. In Proceedings of the First Workshop on Speech and Language Technolo- gies for Dravidian Languages. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A Survey of Current Datasets for Code-Switching Research", |
|
"authors": [ |
|
{ |
|
"first": "Navya", |
|
"middle": [], |
|
"last": "Jose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shardul", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elizabeth", |
|
"middle": [], |
|
"last": "Suryawanshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Sherly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 6th International Conference on Advanced Computing and Communication Systems (ICACCS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "136--141", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICACCS48705.2020.9074205" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Navya Jose, Bharathi Raja Chakravarthi, Shardul Suryawanshi, Elizabeth Sherly, and John P. McCrae. 2020. A Survey of Current Datasets for Code- Switching Research. In 2020 6th International Con- ference on Advanced Computing and Communica- tion Systems (ICACCS), pages 136-141.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Amanpreet Singh, Pratik Ringshia, and Davide Testuggine. 2020. The hateful memes challenge: Detecting hate speech in multimodal memes", |
|
"authors": [ |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamed", |
|
"middle": [], |
|
"last": "Firooz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [], |
|
"last": "Mohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vedanuj", |
|
"middle": [], |
|
"last": "Goswami", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2005.04790" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douwe Kiela, Hamed Firooz, Aravind Mohan, Vedanuj Goswami, Amanpreet Singh, Pratik Ringshia, and Davide Testuggine. 2020. The hateful memes chal- lenge: Detecting hate speech in multimodal memes. arXiv preprint arXiv:2005.04790.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Codewithzichao@DravidianLangtech-EACL2021: Exploring Multimodal Transformers for Meme Classification in Tamil Language", |
|
"authors": [ |
|
{ |
|
"first": "Zichao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zichao Li. 2021. Codewithzichao@DravidianLangtech- EACL2021: Exploring Multimodal Transformers for Meme Classification in Tamil Language. In Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Aryan or dravidian or neither? a study of recent attempts to decipher the indus script", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Iravatham Mahadevan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Electronic Journal of Vedic Studies", |
|
"volume": "8", |
|
"issue": "1", |
|
"pages": "1--19", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iravatham Mahadevan. 2002. Aryan or dravidian or neither? a study of recent attempts to decipher the in- dus script (1995-2000). Electronic Journal of Vedic Studies, 8(1):1-19.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Overview of the HASOC Track at FIRE 2020: Hate Speech and Offensive Language Identification in Tamil", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Mandl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandip", |
|
"middle": [], |
|
"last": "Modha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anand", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bharathi Raja Chakravarthi ;", |
|
"middle": [], |
|
"last": "Malayalam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hindi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "German", |
|
"middle": [], |
|
"last": "English", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Forum for Information Retrieval Evaluation", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "29--32", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3441501.3441517" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Mandl, Sandip Modha, Anand Kumar M, and Bharathi Raja Chakravarthi. 2020. Overview of the HASOC Track at FIRE 2020: Hate Speech and Offensive Language Identification in Tamil, Malay- alam, Hindi, English and German. In Forum for Information Retrieval Evaluation, FIRE 2020, page 29-32, New York, NY, USA. Association for Com- puting Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "IIIT DWD@DravidianLangtech-EACL2021: Identifying Tamil troll meme using a hybrid deep learning approach", |
|
"authors": [ |
|
{ |
|
"first": "Ankit", |
|
"middle": [], |
|
"last": "Kumar Mishra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunil", |
|
"middle": [], |
|
"last": "Saumya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ankit Kumar Mishra and Sunil Saumya. 2021. IIIT DWD@DravidianLangtech-EACL2021: Iden- tifying Tamil troll meme using a hybrid deep learn- ing approach. In Proceedings of the First Workshop on Speech and Language Technologies for Dravid- ian Languages. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Named Entity Recognition for Code-Mixed Indian Corpus using Meta Embedding", |
|
"authors": [ |
|
{ |
|
"first": "Ruba", |
|
"middle": [], |
|
"last": "Priyadharshini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mani", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Vegupatti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mccrae", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 6th International Conference on Advanced Computing and Communication Systems (ICACCS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "68--72", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICACCS48705.2020.9074379" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruba Priyadharshini, Bharathi Raja Chakravarthi, Mani Vegupatti, and John P. McCrae. 2020. Named Entity Recognition for Code-Mixed Indian Corpus using Meta Embedding. In 2020 6th International Conference on Advanced Computing and Communi- cation Systems (ICACCS), pages 68-72.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Simon@DravidianLangtech-EACL2021: Meme Classification for Tamil with BERT", |
|
"authors": [ |
|
{ |
|
"first": "Qinyu", |
|
"middle": [], |
|
"last": "Que", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuanchi", |
|
"middle": [], |
|
"last": "Qu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suidong", |
|
"middle": [], |
|
"last": "Qu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qinyu Que, Yuanchi Qu, and Suidong Qu. 2021. Simon@DravidianLangtech-EACL2021: Meme Classification for Tamil with BERT. In Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Eighth century tamil consonants recognition from stone inscriptions", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Rajakumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bharathi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "2012 International Conference on Recent Trends in Information Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "40--43", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S Rajakumar and V Subbiah Bharathi. 2012. Eighth century tamil consonants recognition from stone in- scriptions. In 2012 International Conference on Re- cent Trends in Information Technology, pages 40-43. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Soujanya Poria, Tanmoy Chakraborty, and Bj\u00f6rn Gamb\u00e4ck. 2020. Task Report: Memotion Analysis 1.0 @SemEval 2020: The Visuo-Lingual Metaphor! In Proceedings of the 14th International Workshop on Semantic Evaluation (SemEval-2020)", |
|
"authors": [ |
|
{ |
|
"first": "Chhavi", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Paka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deepesh", |
|
"middle": [], |
|
"last": "Scott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amitava", |
|
"middle": [], |
|
"last": "Bhageria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chhavi Sharma, William Paka, Scott, Deepesh Bhageria, Amitava Das, Soujanya Poria, Tanmoy Chakraborty, and Bj\u00f6rn Gamb\u00e4ck. 2020. Task Re- port: Memotion Analysis 1.0 @SemEval 2020: The Visuo-Lingual Metaphor! In Proceedings of the 14th International Workshop on Semantic Evalua- tion (SemEval-2020), Barcelona, Spain. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "SSNCSE NLP@DravidianLangtech-EACL2021", |
|
"authors": [ |
|
{ |
|
"first": "Agnusimmaculate", |
|
"middle": [], |
|
"last": "Silvia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Bharathi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Agnusimmaculate Silvia A and Bharathi B. 2021. SSNCSE NLP@DravidianLangtech-EACL2021:", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Meme classification for Tamil using machine learning approach", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meme classification for Tamil using machine learn- ing approach. In Proceedings of the First Workshop on Speech and Language Technologies for Dra- vidian Languages. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Very deep convolutional networks for large-scale image recognition", |
|
"authors": [ |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1409.1556" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karen Simonyan and Andrew Zisserman. 2014. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "A dataset for troll classification of TamilMemes", |
|
"authors": [ |
|
{ |
|
"first": "Shardul", |
|
"middle": [], |
|
"last": "Suryawanshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihael", |
|
"middle": [], |
|
"last": "Verma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Arcan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Philip Mccrae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Buitelaar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the WILDRE5-5th Workshop on Indian Language Data: Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--13", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shardul Suryawanshi, Bharathi Raja Chakravarthi, Pranav Verma, Mihael Arcan, John Philip McCrae, and Paul Buitelaar. 2020. A dataset for troll clas- sification of TamilMemes. In Proceedings of the WILDRE5-5th Workshop on Indian Language Data: Resources and Evaluation, pages 7-13, Marseille, France. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Inception-v4, inception-resnet and the impact of residual connections on learning", |
|
"authors": [ |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Szegedy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Ioffe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Vanhoucke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Alemi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "31", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, and Alexander Alemi. 2017. Inception-v4, inception-resnet and the impact of residual connec- tions on learning. In Proceedings of the AAAI Con- ference on Artificial Intelligence, volume 31.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Sentiment Analysis in Tamil Texts: A Study on Machine Learning Techniques and Feature Representation", |
|
"authors": [ |
|
{ |
|
"first": "Sajeetha", |
|
"middle": [], |
|
"last": "Thavareesan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sinnathamby", |
|
"middle": [], |
|
"last": "Mahesan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "2019 14th Conference on Industrial and Information Systems (ICIIS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "320--325", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICIIS47346.2019.9063341" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sajeetha Thavareesan and Sinnathamby Mahesan. 2019. Sentiment Analysis in Tamil Texts: A Study on Machine Learning Techniques and Feature Rep- resentation. In 2019 14th Conference on Industrial and Information Systems (ICIIS), pages 320-325.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Sentiment Lexicon Expansion using Word2vec and fastText for Sentiment Prediction in Tamil texts", |
|
"authors": [ |
|
{ |
|
"first": "Sajeetha", |
|
"middle": [], |
|
"last": "Thavareesan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sinnathamby", |
|
"middle": [], |
|
"last": "Mahesan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 Moratuwa Engineering Research Conference (MERCon)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "272--276", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/MERCon50084.2020.9185369" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sajeetha Thavareesan and Sinnathamby Mahesan. 2020a. Sentiment Lexicon Expansion using Word2vec and fastText for Sentiment Prediction in Tamil texts. In 2020 Moratuwa Engineering Re- search Conference (MERCon), pages 272-276.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Word embedding-based Part of Speech tagging in Tamil texts", |
|
"authors": [ |
|
{ |
|
"first": "Sajeetha", |
|
"middle": [], |
|
"last": "Thavareesan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sinnathamby", |
|
"middle": [], |
|
"last": "Mahesan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 IEEE 15th International Conference on Industrial and Information Systems (ICIIS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "478--482", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/ICIIS51140.2020.9342640" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sajeetha Thavareesan and Sinnathamby Mahesan. 2020b. Word embedding-based Part of Speech tag- ging in Tamil texts. In 2020 IEEE 15th International Conference on Industrial and Information Systems (ICIIS), pages 478-482.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "A survey on troll detection", |
|
"authors": [ |
|
{ |
|
"first": "Michele", |
|
"middle": [], |
|
"last": "Tomaiuolo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gianfranco", |
|
"middle": [], |
|
"last": "Lombardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Monica", |
|
"middle": [], |
|
"last": "Mordonini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Cagnoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Agostino", |
|
"middle": [], |
|
"last": "Poggi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Future Internet", |
|
"volume": "12", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michele Tomaiuolo, Gianfranco Lombardo, Monica Mordonini, Stefano Cagnoni, and Agostino Poggi. 2020. A survey on troll detection. Future Internet, 12(2):31.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Logistic regression", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Raymond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wright", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Raymond E Wright. 1995. Logistic regression.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.08237" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Ruslan Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretrain- ing for language understanding. arXiv preprint arXiv:1906.08237.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Attention-based bidirectional long short-term memory networks for relation classification", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenyu", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bingchen", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongwei", |
|
"middle": [], |
|
"last": "Hao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th annual meeting of the association for computational linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "207--212", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Zhou, Wei Shi, Jun Tian, Zhenyu Qi, Bingchen Li, Hongwei Hao, and Bo Xu. 2016. Attention-based bidirectional long short-term memory networks for relation classification. In Proceedings of the 54th annual meeting of the association for computational linguistics (volume 2: Short papers), pages 207- 212.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "(a) An example on troll meme from TamilMemes dataset (b) An example on not-troll meme from TamilMemes dataset Figure 1: Examples for a troll and not-troll meme from the TamilMemes dataset", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "A class distribution in the training and test set of the TamilMemes dataset", |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |