|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:22:28.208524Z" |
|
}, |
|
"title": "Alternative non-BERT model choices for the textual classification in low-resource languages and environments", |
|
"authors": [ |
|
{ |
|
"first": "Syed", |
|
"middle": [], |
|
"last": "Mustavi Maheen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "North South University", |
|
"location": { |
|
"settlement": "Dhaka", |
|
"country": "Bangladesh" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Rahman", |
|
"middle": [], |
|
"last": "Faisal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "North South University", |
|
"location": { |
|
"settlement": "Dhaka", |
|
"country": "Bangladesh" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Rafakat", |
|
"middle": [], |
|
"last": "Rahman", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "North South University", |
|
"location": { |
|
"settlement": "Dhaka", |
|
"country": "Bangladesh" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Karim", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "North South University", |
|
"location": { |
|
"settlement": "Dhaka", |
|
"country": "Bangladesh" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Natural Language Processing (NLP) tasks in non-dominant and low-resource languages have not experienced significant progress. Although pre-trained BERT models are available, GPU-dependency, large memory requirement, and data scarcity often limit their applicability. As a solution, this paper proposes a fusion chain architecture comprised of one or more layers of CNN, LSTM, and BiLSTM and identifies precise configuration and chain length. The study shows that a simpler, CPU-trainable non-BERT fusion CNN + BiLSTM + CNN is sufficient to surpass the textual classification performance of the BERT-related models in resource-limited languages and environments. The fusion architecture competitively approaches the state-of-the-art accuracy in several Bengali NLP tasks and a six-class emotion detection task for a newly developed Bengali dataset. Interestingly, the performance of the identified fusion model, for instance, CNN + BiLSTM + CNN, also holds for other lowresource languages and environments. Efficacy study shows that the CNN + BiLSTM + CNN model outperforms BERT implementation for Vietnamese languages and performs almost equally in English NLP tasks experiencing artificial data scarcity. For the GLUE benchmark and other datasets such as Emotion, IMDB, and Intent classification, the CNN + BiLSTM + CNN model often surpasses or competes with BERT-base, TinyBERT, DistilBERT, and mBERT. Besides, a position-sensitive selfattention layer role further improves the fusion models' performance in the Bengali emotion classification. The models are also compressible to as low as \u2248 5\u00d7 smaller through pruning and retraining, making them more viable for resource-constrained environments. Together, this study may help NLP practitioners and serve as a blueprint for NLP model choices in textual classification for low-resource languages and environments.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Natural Language Processing (NLP) tasks in non-dominant and low-resource languages have not experienced significant progress. Although pre-trained BERT models are available, GPU-dependency, large memory requirement, and data scarcity often limit their applicability. As a solution, this paper proposes a fusion chain architecture comprised of one or more layers of CNN, LSTM, and BiLSTM and identifies precise configuration and chain length. The study shows that a simpler, CPU-trainable non-BERT fusion CNN + BiLSTM + CNN is sufficient to surpass the textual classification performance of the BERT-related models in resource-limited languages and environments. The fusion architecture competitively approaches the state-of-the-art accuracy in several Bengali NLP tasks and a six-class emotion detection task for a newly developed Bengali dataset. Interestingly, the performance of the identified fusion model, for instance, CNN + BiLSTM + CNN, also holds for other lowresource languages and environments. Efficacy study shows that the CNN + BiLSTM + CNN model outperforms BERT implementation for Vietnamese languages and performs almost equally in English NLP tasks experiencing artificial data scarcity. For the GLUE benchmark and other datasets such as Emotion, IMDB, and Intent classification, the CNN + BiLSTM + CNN model often surpasses or competes with BERT-base, TinyBERT, DistilBERT, and mBERT. Besides, a position-sensitive selfattention layer role further improves the fusion models' performance in the Bengali emotion classification. The models are also compressible to as low as \u2248 5\u00d7 smaller through pruning and retraining, making them more viable for resource-constrained environments. Together, this study may help NLP practitioners and serve as a blueprint for NLP model choices in textual classification for low-resource languages and environments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Many developed nations are now considering deep learning approaches for tackling textual toxicity in social media. But countries lacking substantial socio-economic capacity and technological infrastructures are lagging. The current trend of NLP research evolves mainly around a few dominant languages, leaving NLP research for many low-resource languages unattended or less explored (Joshi et al., 2020) . The NLP tasks in lowresource languages generally suffer from exceptionally scarce resources, ranging from lack of annotated data to insufficient computational facilities. In contrast, most NLP breakthroughs that achieve high accuracy are computationally intensive, making it more challenging for societies suffering from inadequate technological infrastructures. For instance, while the bidirectional transformer BERT has about 340 millions parameters (Devlin et al., 2018) , a more advanced model GPT-3 (Brown et al., 2020) , has about 170 billions parameters, requiring extensive GPU/TPU support and memory storage that may be unaffordable for low-resource societies. As a result, low-resource languages and environments are frequently left out with little attention from the NLP community (Joshi et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 383, |
|
"end": 403, |
|
"text": "(Joshi et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 858, |
|
"end": 879, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 904, |
|
"end": 930, |
|
"text": "GPT-3 (Brown et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1198, |
|
"end": 1218, |
|
"text": "(Joshi et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Further complicating matters, the serverless free deployment of deep learning models, as commonly done using Amazon Web Services (AWS) and Google Cloud Platform (GCP), is restrictive for larger model size (Han et al., 2015a,b) . Also, latency increases with increasing memory requirement and model size, suggesting memoryintensive device GPU/TPU for faster inference and response. These additional financial costs limit access to BERT models for NLP community works in resource-constrained environments (Strubell et al., 2019) . One intriguing question thus arises: could computationally less-expensive non-BERT models reduce GP/TPU dependency and associated fi-nancial cost without affecting the classification accuracy for textual classification in a low-resource context?", |
|
"cite_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 226, |
|
"text": "(Han et al., 2015a,b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 503, |
|
"end": 526, |
|
"text": "(Strubell et al., 2019)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The multilingual-BERT (mBERT) (Devlin et al., 2018; Pires et al., 2019) and its reduced versions (Abdaoui et al., 2020) , other compressed BERT modifications, such as TinyBERT (Jiao et al., 2019) , MobileBERT (Sun et al., 2020) , are a few viable models proposed for many languages and contexts, including the low-resource ones. Nevertheless, these models require additional fine-tuning and training for target-specific NLP tasks, requiring GPU/TPU support even in a resource-constrained context. Also, size of these models may not be optimal for deployment in low-end devices. So, textual classification in many non-dominant languages remains rudimentary, leaving the communities unequipped against the increasing toxicity and abusive comments on social platforms. Besides, many textual classification tasks do not require a rigorous use of linguistic semantics. So, models that are structured well against the semantics, for instance, the BERT models, may not always be the most optimal choice in NLP tasks less dependent on language semantics. Thus, a viable trade-off between the deployability, scarce resources, and DNN models' accuracy in NLP tasks for low-resource languages and environments needs unraveling.", |
|
"cite_spans": [ |
|
{ |
|
"start": 30, |
|
"end": 51, |
|
"text": "(Devlin et al., 2018;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 52, |
|
"end": 71, |
|
"text": "Pires et al., 2019)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 97, |
|
"end": 119, |
|
"text": "(Abdaoui et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 195, |
|
"text": "(Jiao et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 209, |
|
"end": 227, |
|
"text": "(Sun et al., 2020)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As a solution, this study integrates local and global dependencies in sentences by bringing alternative DNN models into a hybrid model structure, namely the fusion chain models. Subsequently, a rigorous architecture search identifies deployable DNN models for low-resource languages, with an improved understanding on a few intriguing questions such as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 How effective are the homogeneous (of similar layers) and heterogeneous (of different layers) form of fusion of one or more DNN layers in textual classification tasks? \u2022 What chain length is optimal to maintain accuracy and reduce the difference between training and validation loss? \u2022 How helpful the self-attention is for fusion models, and what is its optimal position?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We identify that classification accuracy is sensitive to fusion chain length, beyond which classification accuracy deteriorates considerably. Subsequent exploration of the identified fusion models reveals a position-sensitive performance of the self-attention layer for the newly annotated six-class Bengali emotion dataset. \u22ee \u22ef Figure 1 : The word embedding layer acts as the input for the fusion of DNN layers during the NAS process. In the NAS, CNN, LSTM or BiLSTM layer are all considered as the initial layer, however the subsequent layers depended on the type of initial layer chosen finally resulting the three alternative chain-structures. The output from the DNN fusion requires pruning and retraining to generate the deployable models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 329, |
|
"end": 337, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous works attempted alternative deep learning models in NLP tasks for low-resource languages and environments. For instance, using a teacherstudent framework, the BERT distillation with simpler models such as CBoW + FFN and BiLSTM as the student models for the limited availability of labeled data (Wasserblat et al., 2020) . While such models are more deployable in low-end devices, the training still relies on a memory-hungry and costly setup requiring GPU/TPU as well as large unlabelled data for student model training. Alternative approaches consider freezing the BERT-layer outcomes by assessing their roles in the classification process (Grie\u00dfhaber et al., 2020) , requiring GPU/TPU support to train. Also, the sequence of frozen layers may vary across alternative datasets, and hence, the accuracy for a particular set of frozen layers becomes context-dependent. Instead, we investigate if a simple, CPU-trainable CNN and RNN fusion layer stack can achieve textual classification accuracy in NLP tasks where syntactical knowledge is less influential than the keywords or sentiment-based phrases. To find out such alternative non-BERT models, we propose fusionchain architecture comprising one or more CNN and RNN layers and perform a rigorous network architecture search (NAS). Interestingly, the NAS process identifies a few optimal candidate mod-els capable of achieving accuracy comparable to the baseline models, as elaborated further in the subsequent sections.", |
|
"cite_spans": [ |
|
{ |
|
"start": 303, |
|
"end": 328, |
|
"text": "(Wasserblat et al., 2020)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 650, |
|
"end": 675, |
|
"text": "(Grie\u00dfhaber et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The emergence of more advanced deep neural networks capable of learning the word orders and information dependency in sentences replaces the classical machine learning models (Mikolov et al., 2013) in many NLP tasks. Precisely, the neural network models of the form of RNN (LSTM, BiLSTM) or CNN independently, or in combination with a pre-trained word embedding facility such as word2vec (Mikolov et al., 2013) , fasttext (Joulin et al., 2016) , have become the standard alternatives. For instance, Dynamic CNN architecture (DCNN) performs semantic modeling to identify words' short and long-range relations in sentences (Kalchbrenner et al., 2014) . Whereas the CNN-based models are good at local and positioninvariant feature extraction, the LSTM/BiLSTM models explicitly treat sentences as a sequence of words and capture sentence-level (for instance, syntactical (Zhu et al., 2015) ) dependencies. Also, a few alternative attempts integrate local and global textual dependencies using CNN and RNN architectures (known as hybrid models) to improve accuracy of textual classification reviewed thoroughly in (Minaee et al., 2021) . Intriguingly, the hybrid models also appear promising for target-specific sequential analysis, as evident from quantifying the function of specific DNA sequences (Quang and Xie, 2016) . Named Entity Recognition (NER) tasks also employ a hybrid approach by merging BiLSTM and CNN models (Chiu and Nichols, 2016). One of the initial works leveraging the advantages of both CNN and RNN architectures for textual classification is the Convolutional-LSTM (C-LSTM). Precisely, in C-LSTM, n-gram features extracted by a CNN layer are fed to the LSTM layer for learning the intrasentence sequential dependence of words (Zhou et al., 2015) . Authors in also tried a hybrid model with LSTM outputs fed to a CNN layer in document modeling. Alternative models include an attention mechanism with either CNN or RNN architecture to optimize textual classification performance further. For instance, Attention-Based Bidirectional Long Short-Term Memory Networks (Att-BLSTM) capture the position variant semantic information from the sentences (Basiri et al., 2021) . Another study implements an attention-based Convolutional Neural Net-work (ABCNN) to model a pair of sentences (Yin et al., 2016) . However, most of the studied hybrid models are single and two-layer models and did not explore the relevance of a larger stacking depth in textual classification tasks. The optimal fusion length and the order of the layers are still debatable and context-dependent. Besides, these CPUimplementable models facilitate the exploration and deployment of DNN models in low-resourced environments devoid of adequate advanced computing devices and facilities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 197, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 388, |
|
"end": 410, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 422, |
|
"end": 443, |
|
"text": "(Joulin et al., 2016)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 621, |
|
"end": 648, |
|
"text": "(Kalchbrenner et al., 2014)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 867, |
|
"end": 885, |
|
"text": "(Zhu et al., 2015)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 1109, |
|
"end": 1130, |
|
"text": "(Minaee et al., 2021)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1295, |
|
"end": 1316, |
|
"text": "(Quang and Xie, 2016)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1744, |
|
"end": 1763, |
|
"text": "(Zhou et al., 2015)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 2161, |
|
"end": 2182, |
|
"text": "(Basiri et al., 2021)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 2296, |
|
"end": 2314, |
|
"text": "(Yin et al., 2016)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Require: Input and Embedding Layer Require: N = Max. fusion chain length Require: ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Algorithm 1 Fusion chain generation in NAS", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "RNN = LSTM | BiLSTM Require: Initial Fusion Layer = CNN | RNN Ensure: i = RandomNumber (1 to N \u2212 1) Fusion Model = Initial Fusion Layer for x \u2190 0 to i do if x is even then Layer \u2190 RNN else if x is", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Algorithm 1 Fusion chain generation in NAS", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Alternative DNN versions possess different strengths in NLP tasks. For instance, CNN (LeCun et al., 1998) models are good at position invariant text classification tasks, whereas the RNN (Elman, 1990) models are more pertinent for sequential processing of the input texts. However, the basic RNN structure frequently suffers from vanishing gradient problems, and the improved RNN variants are-Long Short Term Memory (LSTM) (Hochreiter and Schmidhuber, 1997) and Gated Recurrent Unit (GRU) (Cho et al., 2014) LSTM and GRU, and their ability to remember previous text sequences, they perform well where context-dependencies are crucial (Yin et al., 2017) . Another variant of LSTM, the Bidirectional LSTM (BiLSTM), comprises two LSTMs taking input sequence in forward and reverse directions, exhibits improved performance over single-LSTM in many applications (Huang et al., 2015) . While each deep learning variant has its strength, a legitimate question thus arises-if a fusion model, formed with the DNN variants in a fusion chain, enhance performance of textual classification. An immediate next interesting question thus becomes the optimal chain length of the proposed fusion model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 105, |
|
"text": "CNN (LeCun et al., 1998)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 457, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 489, |
|
"end": 507, |
|
"text": "(Cho et al., 2014)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 634, |
|
"end": 652, |
|
"text": "(Yin et al., 2017)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 858, |
|
"end": 878, |
|
"text": "(Huang et al., 2015)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed fusion chain models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Textual classification accuracy depends on the context length of a word in a sentence. Fusing multiple DNN layers can increase the context length, but the optimal stacking depth for the DNN layers remains elusive and requires unravelling. The proposed fusion architecture follows a generic structure-it starts with an input layer, followed by an embedding layer that generates an embedding matrix for the given input sentence. A DNN layer is introduced immediately next to the embedding layer. Subsequently, additional DNN layers are added to form a fusion chain model of DNN layers, as schematically shown in Fig. 1 . We performed random search for an the optimal fusion chain length, using several performance objectives, including the higher classification accuracy. The network architecture search (NAS) for an optimal chain length randomly generates even and odd numbers to decide if the next stacking to be done by an LSTM/BiLSTM (for even) or CNN (for odd) layer. The current fusion process does not consider similar DNN layers to be stacked together. The maximum length of fusion chain considered in the NAS is eight, beyond which the classification accu-racy becomes considerably low (data not shown).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 610, |
|
"end": 616, |
|
"text": "Fig. 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Optimal length of the fusion chain", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The NAS process for optimal fusion chain length is summarized in algorithm 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Optimal length of the fusion chain", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We implemented a generalized random search for a set of hyper-parameters in Keras (Chollet et al., 2018) and used it in all the experiments conducted for the analysis of fusion chain models. Interestingly, the random search process needs manual tuning of only one parameter, namely the maximum word length of a sentence that affects the shape of attention and LSTM layers. With this little tuning, the search process as developed in this study remains applicable for other similar textual classification tasks. Each layer in the random search is accompanied by an activation layer, a batch normalization layer, and a dropout layer to minimize the overfitting error. The CNN and RNN layers here also include kernel, bias, and activity regularizers (see the supplemental data for details).", |
|
"cite_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 104, |
|
"text": "(Chollet et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Generalized random search", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The initial architecture search uses classification accuracy on the test dataset and the loss difference (LD = validation loss -training loss) as the performance metrics. The classification accuracy is defined as (TP + TN)/(TP + TN + FP + FN) with TP, TN, FP, FN are true positive, true negative, false positive, and false negative, respectively. The random search also considers early stopping to control the overfitting error 1 . For a comparison between the baseline models and the CNN + BiLSTM + CNN fusion model, we also considered other metrics, such as the number of parameters (# params), number of floating point operations (# FLOPs). Generally, experiments conducted in this study consider a 80% (training) and 20% (testing) split, and use fasttext (Joulin et al., 2016) as word embedding method.", |
|
"cite_spans": [ |
|
{ |
|
"start": 759, |
|
"end": 780, |
|
"text": "(Joulin et al., 2016)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Metrics used for comparison", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "The study considers datasets across different languages and contexts for the efficacy demonstration of CNN + BiLSTM + CNN fusion. We developed a new Bengali corpus for 6-class emotion classification, as well as used other previously developed Bengali datasets for different NLP tasks-i) Sixclass emotion Bengali dataset (Das et al., 2021) , ii) Hate Speech Bengali dataset (Romim et al., 2021) , and iii) DeepHateExplainer Bengali dataset (Karim et al., 2020) . As examples of non-Bengali languages that relate the low-resource contexts, we consider the Vietnamese (Ho et al., 2019) and Indonesian (Saputri et al., 2018) datasets. The lowresource contexts in English considers an artificial data scarcity for the Stanford Sentiment Treebank 2 (SST-2), (Socher et al., 2013) , emotion classification dataset (Emotion) (Saravia et al., 2018) , and the Internet Movie Database (IMDB) review dataset (Maas et al., 2011) . Finally, the efficacy study of the CNN + BiLSTM + CNN fusion model also considers evaluating the model on the on the General Language Understanding Evaluation the GLUE benchmark (Wang et al., 2018) ; however, we used randomly chosen 250 samples only from each classes to mimic artificial data scarcity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 338, |
|
"text": "(Das et al., 2021)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 393, |
|
"text": "(Romim et al., 2021)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 439, |
|
"end": 459, |
|
"text": "(Karim et al., 2020)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 752, |
|
"end": 773, |
|
"text": "(Socher et al., 2013)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 817, |
|
"end": 839, |
|
"text": "(Saravia et al., 2018)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 896, |
|
"end": 915, |
|
"text": "(Maas et al., 2011)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 1096, |
|
"end": 1115, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "We compare CNN + BiLSTM + CNN and other fusion models as identified against the models pre-viously introduced for resource-constrained environments. A few such models are BERT-base (uncased) (Devlin et al., 2018) , mBERT (Abdaoui et al., 2020) , DistilBERT (Sanh et al., 2019) , and TinyBERT (Jiao et al., 2019) . The chosen models are all BERT related, and a few of which, for instance, DistilBERT, and TinyBERT, come with reduced size and additional fine-tuning for the resource-constrained environments and low-end devices. Besides the GLU benchmark, the mBERT is also used for the textual classification in Bengali.", |
|
"cite_spans": [ |
|
{ |
|
"start": 191, |
|
"end": 212, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 221, |
|
"end": 243, |
|
"text": "(Abdaoui et al., 2020)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 257, |
|
"end": 276, |
|
"text": "(Sanh et al., 2019)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 311, |
|
"text": "(Jiao et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline models", |
|
"sec_num": "3.6" |
|
}, |
|
{ |
|
"text": "Optimal fusion chain length of fusion models:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The NAS process identifies (see Fig. 2a , b, c) that stacking unlimited DNN layers do not improve performance of the fusion models. Instead, the accuracy and LD of the textual classification deteriorate after the chain length attains an optimal value. Interestingly, chain-structure of length three or fewer layers yield the optimal performance (shown in Fig. 2a, b, c A comparison between the competing models for our newly developed corpus of emotion classification reveals that accuracy deteriorates as the chain length goes beyond three. As it appeared, the accuracy gradually reduces to lower values as the length increases beyond three (shown in Fig. 2a, b, c) . Among the models with a chain length of three or less, a model with a chain length of three is the smallest in LD values among the three allowed chains. A fusion chain that starts with a CNN layer attains the lowest validation loss and is explored further in subsequent analysis by replacing the LSTM layer with a BiLSTM layer.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 39, |
|
"text": "Fig. 2a", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 355, |
|
"end": 368, |
|
"text": "Fig. 2a, b, c", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 652, |
|
"end": 666, |
|
"text": "Fig. 2a, b, c)", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The GLUE benchmark datasets have different sentence classification tasks. The performance evaluation of CNN + BiLSTM + CNN for all the categories has been done by assuming an artificial data scarcity. Precisely, the artificial scarcity considers only 250 samples from each class. As reported, the proposed CNN + BiLSTM + CNN model frequently outperforms baseline (Grie\u00dfhaber et al., 2020) for 1000 randomly selected samples from SST-2 dataset (Socher et al., 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 363, |
|
"end": 388, |
|
"text": "(Grie\u00dfhaber et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 443, |
|
"end": 464, |
|
"text": "(Socher et al., 2013)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "GLUE benchmark with artificial data scarcity:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Methods Model structure SST-2 BERT no frozen layer 0.78 \u00b1 0.059 layer 1,2,3 frozen 0.80 \u00b1 0.045 layer 9,10,11 frozen 0.84 \u00b1 0.013 Fusion CNN + BiLSTM + CNN 0.80 models and approximates the rest for all different classification tasks available in GLUE benchmark (shown in Table 2 ). For instance, the comparison considers both the SST-2 (Socher et al., 2013) and CoLA (Warstadt et al., 2019) datasets for the single sentence classification task, and the CNN + BiLSTM + CNN model achieves the second-highest accuracy (64% for CoLA) marked as bold black with Distilled BERT accuracy at the top with 65% accuracy. Interestingly, in 4 sentence inference task (dataset RTE (Bentivogli et al., 2009) ), the CNN + BiLSTM + CNN model achieves 81% accuracy exceeding all the other baseline models in the presence of artificial scarcity. In another inference task dataset, QNLI (Rajpurkar et al., 2016) , the fusion model CNN + LSTM + CNN attains the maximum accuracy (74%) with CNN + BiLSTM + CNN and mBERT following it with an accuracy of 73%. The GLUE benchmark also includes three-sentence similarity tasks, and the CNN + BiLSTM+ CNN performed equally well for datasets such as QQP with the highest and immediate next best performances with 71% and 70%, respectively. These experiments on different NLP tasks of the GLUE benchmark demonstrate the ability of CNN + BiLSTM + CNN models to perform better in data scarcity and low-end computational facilities. (Table 1, row: 8, 9, 10, 12, 13) , and in fact, obtains the lowest LD = 0.057 among alternative fusion models. Interestingly, the fusion models performed very closely with the mBERT model, and in fact, outperformed mBERT in lowering the generalization error. For instance, reported mBERT LD = 0.457 (Table 1 , row 16), whereas the CNN + LSTM + CNN model has a low LD = 0.28. The fusion models also perform well across other Bengali text classification datasets. For instance, CNN + BiLSTM + CNN model outperforms mBERT and BanglaBERT implementation for the reported dataset in (Das et al., 2021) . In another dataset of Bengali hate speech detection (Romim et al., 2021 ), the fusion model with self-attention CNN + attn. + BiLSTM + CNN outperforms all the previous DNN and ML implementations, as evident from Table 4 . However, for the dataset in (Karim et al., 2020) , the fusion models fail to match the BERT-variants' performance (see Table 4 ) and surpass only the other DNN models. However, these datasets generally contain few thousands of samples for each classes, and do not necessarily represent data scarcity. Fur- ther exploration of the fusion models for other low-resources languages and contexts reveal the resilience of the identified models. For instance, the IMDB dataset (Maas et al., 2011) and the Emotion dataset (Saravia et al., 2018) were randomly reduced to mimick low-resource contexts. Subsequently, mBERT performance for the reduced datasets (5%, 10% for IMDB and 0.01%, 0.02% for Emotion) was compared against the fusion models' performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 336, |
|
"end": 357, |
|
"text": "(Socher et al., 2013)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 367, |
|
"end": 390, |
|
"text": "(Warstadt et al., 2019)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 667, |
|
"end": 692, |
|
"text": "(Bentivogli et al., 2009)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 867, |
|
"end": 891, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1460, |
|
"end": 1482, |
|
"text": "row: 8, 9, 10, 12, 13)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2027, |
|
"end": 2045, |
|
"text": "(Das et al., 2021)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 2100, |
|
"end": 2119, |
|
"text": "(Romim et al., 2021", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 2298, |
|
"end": 2318, |
|
"text": "(Karim et al., 2020)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 2740, |
|
"end": 2759, |
|
"text": "(Maas et al., 2011)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 2784, |
|
"end": 2806, |
|
"text": "(Saravia et al., 2018)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 271, |
|
"end": 278, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 1450, |
|
"end": 1459, |
|
"text": "(Table 1,", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1749, |
|
"end": 1757, |
|
"text": "(Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 2260, |
|
"end": 2267, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 2389, |
|
"end": 2396, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "GLUE benchmark with artificial data scarcity:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As appeared in Table 4 , fusion models outperformed in all instances; in fact, it performed significantly better for the smaller dataset size considered. Ability of fusion models also remain equally competitive in other English NLP tasks, as demonstrated from classification accuracy comparison (see Table 6 ) between the fusion models and other BERT, DNN based implementation as in (Larson et al., 2019) . Specifically, the fusion models attain a comparable accuracy of 93.62%, 93.28% as opposed to BERT-base's 94.4% reported in (Larson et al., 2019) . Interestingly, the proposed method also perform competitively with the other low-resource fine-tuning, for instance, the freezing of BERT-layer approach as in (Grie\u00dfhaber et al., 2020) . Precisely, the CNN + BiLSTM + CNN model achieves higher accuracy than the BERT-base model reported, and almost equally perform to other tuned BERT-models of frozen layers, for a randomly selected 1000 samples from the SST-2 dataset (see Table 3 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 383, |
|
"end": 404, |
|
"text": "(Larson et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 530, |
|
"end": 551, |
|
"text": "(Larson et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 713, |
|
"end": 738, |
|
"text": "(Grie\u00dfhaber et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 22, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 307, |
|
"text": "Table 6", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 978, |
|
"end": 985, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "GLUE benchmark with artificial data scarcity:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Position-sensitive self-attention role of fusion models in new Bengali corpus: An attention layer may aid in capturing the necessary information for a sequence to sequence model. We also investigated how adding a self-attention layer to the fusion model affects the accuracy of the the newly developed 6-class Bengali emotion corpus. However, an immediate question arises-what the optimal position of the attention layer be within a fusion chain. To answer this, we execute four different experiments, utilizing a self-attention layer in four alternative places: between the embedding and the first CNN layer, between the first CNN layer and the first LSTM layer, between the first LSTM layer and the second CNN layer, and between the second CNN layer and the final output layer. As observed, the model provides an accuracy of 85.79% and a loss difference of 0.205 if the attention layer is placed between the embedding and the first DNN layer. Interestingly, the accuracy increased to 86.68%, and the loss difference reduced to 0.164 if the attention layer posits between the first CNN and first LSTM layer. It was the highest accuracy produced and the lowest loss difference of 0.164 among the alternative self-attention position tried. An attention layer between the LSTM and the second CNN layers generates shape mismatch and stops the model from training. Final experiment that places attention between the second CNN and output layer produces an accuracy of 85.79% with a 0.285 loss difference. These experiments show that for the 6-class Bengali emotion classification, a position-sensitive attention layer makes a difference in classification accuracy and reduces overfitting error. The accuracy improvement because of the self-attention layer still holds if an artificial scarcity for the new corpus is produced by considering 25%, 50%, 75% of the complete dataset, as shown in Fig. 3c . However, further analysis with other datasets and languages would clarify whether self-attention layer roles, as observed here in Fig. 3 , are context-dependent or generic, and are beyond the scope of this study.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1887, |
|
"end": 1894, |
|
"text": "Fig. 3c", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 2027, |
|
"end": 2033, |
|
"text": "Fig. 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "GLUE benchmark with artificial data scarcity:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Fusion models robustly perform in data scarcity: One intriguing query on the fusion model would be to assess its ability to perform in data scarcity. An experiment designed to compare how the proposed fusion models and mBERT perform in data scarcity randomly segregates the Bengali 6-class emotion dataset into 25%, 50%, 75%, and 100% categories. The artificial data scarcity is analogous to the lowresource contexts, mimicking the lack of sufficient annotated data common for many low-resource languages. The comparison considers CNN + attn. + LSTM + CNN and CNN + attn. + BiLSTM + CNN and compare with mBERT. The fusion models perform better for the 25% case and match or surpass the mBERT performance in other scarce data cases (shown in Fig. 3a) . Besides, the fusion models decrease LD in all the artificially produced scarce cases studied. A close comparison (Fig. 3b) shows that the LD of mBERT (blue line) remains way above the LDs reported by the fusion models. For the 25% case, the LD value is doubled for mBERT, indicating an advantage of fusion models in low-resource contexts.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 741, |
|
"end": 749, |
|
"text": "Fig. 3a)", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 865, |
|
"end": 874, |
|
"text": "(Fig. 3b)", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "GLUE benchmark with artificial data scarcity:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Fusion models are computationally less expensive: Along with other factors, the computational cost of an NLP model also depends on its size and the FLOPs count. A comparison of these metrics between the baseline models and the fusion models exhibits that fusion models are more advantageous for a small number of annotated samples (shown in Table 2 ). For instance, the fusion model CNN + BiLSTM/LSTM + CNN roughly does 100 times fewer FLOPs. Also, for most GLUE datasets, the fusion model outperforms the TinyBERT in the presence of data scarcity. Some of the BERT models demonstrate equal accuracy for some GLUE benchmark datasets. However, these models are computationally extensive because of their high #Params and #FLOPs. Although costs related to FLOPs are decreasing, it requires hardware upgradation from GPU to TPU. Whereas the GPU itself is a computationally extensive device in lowresource environments, let alone the use of TPU. So, the low #FLOPs requirement in CNN + BiL-STM + CNN provides an edge over the memoryhungry BERT models in low-resource contexts. Besides, the possibility of a low computational cost of the CNN + BiLSTM + CNN model can also be predicted by comparing the average time per epoch calculation, an ensemble representation of all the individual times per epoch for alternative GLUE benchmark data considered. The average time per epoch over GLUE benchmark data is about 3 seconds for the CNN + BiLSTM + CNN model. In contrast, the same becomes as high as 1000 seconds or more for the different baseline models implemented in the experiment. Besides, pruning and retraining reduce the fusion models further and increase their deployability in low-end devices and web platforms. Precisely, the CNN + LSTM + CNN model achieves almost a 5\u00d7 reduction in size from 34.81MB to a model size of 6.60MB, as in Table 7 . The TinyBERT model may be as small as about 16MB, but it is pre-trained in the English language requiring further tuning in other languages for better accuracy. For instance, in experiments on a Bengali 6-class emotion dataset, the TinyBERT, pre-trained in English, achieves an accuracy of 33.42%. This accuracy drops to 24% if annotated data is reduced to 25%. So, Tiny-BERT requires training of the pre-trained model and suffers because of data scarcity. Whereas, for the proposed fusion model CNN + BiLST/LSTM + CNN, the initial accuracy (86.61) is almost retrievable (86.36) upon pruning and retraining (data shown in Table 7 ). Also, the model size reduces to around 5MB after pruning compared to the 16MB of the pre-trained TinyBERT.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 341, |
|
"end": 348, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 1837, |
|
"end": 1844, |
|
"text": "Table 7", |
|
"ref_id": "TABREF9" |
|
}, |
|
{ |
|
"start": 2469, |
|
"end": 2476, |
|
"text": "Table 7", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "GLUE benchmark with artificial data scarcity:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Generally, the RNN and CNN models are computationally less intensive but compromise accuracy in textual classification. In contrast, BERT-variants and other advanced transformer-based implementations demonstrate improved performance but are computationally intensive. This study analyzed a few low-resource textual classification contexts to identify CPU-trainable and comparatively smaller deployable DNN models sufficiently accurate in textual classification tasks. These identified lessintensive DNN fusion models attained accuracy that frequently surpasses BERT performance in low-resource contexts. Interestingly, the efficacy of CNN + BiLSTM + CNN remains equally applicable in other alternative languages, tasks. This study also demonstrates that the fusion models are all CPU-trainable, making them easily accessible for communities suffering from an infrastructural deficiency. Moreover, low-resource languages always suffer from smaller corpus, infrequent research initiatives, and a lack of intensive computational facilities. These hinder the potential deployment of DNN models to monitor toxic and abusive elements in the ever-increasing social media platforms. Because of its relatively small size and acceptable classification accuracy, the fusion models are a suitable alternative to computationally intensive BERT variants for deployment in low-end devices.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Further improvement of the fusion models may consider a multichannel word-embedding technique, equipping the models better for out of vocabulary words now common in the era of social media platforms, POS-tagging to exploit the key phrases of the sentiment better. Such extensions, alone or in a cohort, can improve the fusion models to tackle the long-term dependencies analysis by forming phrases from the dependent and related words in longer sentences. Overall, this work provides sufficiently accurate, computationally less intensive CPU-trainable DNN models for NLP tasks for low-resource languages and may serve as the blueprint to identify the deployable NLP models for low-resource languages and environments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Data and codes are available here in this link", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We express our gratitude to Dr. Shafin Rahman, Department of Electrical and Computer Engineering at the North South University, Bangladesh and all the anonymous reviewers for their sincere comments, suggestions, and criticisms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Load what you need: Smaller versions of multilingual bert", |
|
"authors": [ |
|
{ |
|
"first": "Amine", |
|
"middle": [], |
|
"last": "Abdaoui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Camille", |
|
"middle": [], |
|
"last": "Pradel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gr\u00e9goire", |
|
"middle": [], |
|
"last": "Sigel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.05609" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amine Abdaoui, Camille Pradel, and Gr\u00e9goire Sigel. 2020. Load what you need: Smaller versions of multilingual bert. arXiv preprint arXiv:2010.05609.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Abcdm: An attention-based bidirectional cnn-rnn deep model for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Shahla", |
|
"middle": [], |
|
"last": "Mohammad Ehsan Basiri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nemati", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Future Generation Computer Systems", |
|
"volume": "115", |
|
"issue": "", |
|
"pages": "279--294", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Ehsan Basiri, Shahla Nemati, Moloud Ab- dar, Erik Cambria, and U Rajendra Acharya. 2021. Abcdm: An attention-based bidirectional cnn-rnn deep model for sentiment analysis. Future Genera- tion Computer Systems, 115:279-294.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "The fifth pascal recognizing textual entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Giampiccolo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luisa Bentivogli, Peter Clark, Ido Dagan, and Danilo Giampiccolo. 2009. The fifth pascal recognizing textual entailment challenge. In TAC.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Language models are few-shot learners", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Tom B Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Mann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Ryder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jared", |
|
"middle": [], |
|
"last": "Subbiah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prafulla", |
|
"middle": [], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Dhariwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Neelakantan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Girish", |
|
"middle": [], |
|
"last": "Shyam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Sastry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Askell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2005.14165" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. arXiv preprint arXiv:2005.14165.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Quora question pairs", |
|
"authors": [ |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongbo", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoji", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leqi", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--7", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zihan Chen, Hongbo Zhang, Xiaoji Zhang, and Leqi Zhao. 2018. Quora question pairs. University of Waterloo, pages 1-7.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Named entity recognition with bidirectional lstm-cnns", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Jason", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nichols", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "357--370", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason PC Chiu and Eric Nichols. 2016. Named entity recognition with bidirectional lstm-cnns. Transac- tions of the Association for Computational Linguis- tics, 4:357-370.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "On the properties of neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Van Merri\u00ebnboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1409.1259" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho, Bart Van Merri\u00ebnboer, Dzmitry Bah- danau, and Yoshua Bengio. 2014. On the properties of neural machine translation: Encoder-decoder ap- proaches. arXiv preprint arXiv:1409.1259.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Keras: The python deep learning library", |
|
"authors": [ |
|
{ |
|
"first": "Fran\u00e7ois", |
|
"middle": [], |
|
"last": "Chollet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Astrophysics source code library", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fran\u00e7ois Chollet et al. 2018. Keras: The python deep learning library. Astrophysics source code library, pages ascl-1806.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Emotion classification in a resource constrained language using transformerbased approach", |
|
"authors": [ |
|
{ |
|
"first": "Avishek", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "Sharif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammed", |
|
"middle": [ |
|
"Moshiul" |
|
], |
|
"last": "Hoque", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sarker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2104.08613" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Avishek Das, Omar Sharif, Mohammed Moshiul Hoque, and Iqbal H Sarker. 2021. Emotion classification in a resource constrained language using transformer- based approach. arXiv preprint arXiv:2104.08613.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.04805" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Finding structure in time", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jeffrey L Elman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Cognitive science", |
|
"volume": "14", |
|
"issue": "", |
|
"pages": "179--211", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey L Elman. 1990. Finding structure in time. Cog- nitive science, 14(2):179-211.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Fine-tuning bert for low-resource natural language understanding via active learning", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Grie\u00dfhaber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Maucher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngoc", |
|
"middle": [ |
|
"Thang" |
|
], |
|
"last": "Vu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2012.02462" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Grie\u00dfhaber, Johannes Maucher, and Ngoc Thang Vu. 2020. Fine-tuning bert for low-resource natural language understanding via active learning. arXiv preprint arXiv:2012.02462.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding", |
|
"authors": [ |
|
{ |
|
"first": "Song", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huizi", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Dally", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1510.00149" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Song Han, Huizi Mao, and William J Dally. 2015a. Deep compression: Compressing deep neural net- works with pruning, trained quantization and huff- man coding. arXiv preprint arXiv:1510.00149.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Learning both weights and connections for efficient neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Song", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Pool", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Tran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Dally", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1506.02626" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Song Han, Jeff Pool, John Tran, and William J Dally. 2015b. Learning both weights and connec- tions for efficient neural networks. arXiv preprint arXiv:1506.02626.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Emotion recognition for vietnamese social media text", |
|
"authors": [ |
|
{ |
|
"first": "Anh", |
|
"middle": [], |
|
"last": "Vong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duong", |
|
"middle": [], |
|
"last": "Ho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danh", |
|
"middle": [], |
|
"last": "Huynh-Cong Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linh", |
|
"middle": [], |
|
"last": "Hoang Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duc-Vu", |
|
"middle": [], |
|
"last": "Thi-Van Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiet", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngan", |
|
"middle": [], |
|
"last": "Van Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-Thuy", |
|
"middle": [], |
|
"last": "Luu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference of the Pacific Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "319--333", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vong Anh Ho, Duong Huynh-Cong Nguyen, Danh Hoang Nguyen, Linh Thi-Van Pham, Duc-Vu Nguyen, Kiet Van Nguyen, and Ngan Luu-Thuy Nguyen. 2019. Emotion recognition for vietnamese social media text. In International Con- ference of the Pacific Association for Computational Linguistics, pages 319-333. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735- 1780.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Bidirectional lstm-crf models for sequence tagging", |
|
"authors": [ |
|
{ |
|
"first": "Zhiheng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1508.01991" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiheng Huang, Wei Xu, and Kai Yu. 2015. Bidirec- tional lstm-crf models for sequence tagging. arXiv preprint arXiv:1508.01991.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Tinybert: Distilling bert for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoqi", |
|
"middle": [], |
|
"last": "Jiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yichun", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lifeng", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linlin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.10351" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaoqi Jiao, Yichun Yin, Lifeng Shang, Xin Jiang, Xiao Chen, Linlin Li, Fang Wang, and Qun Liu. 2019. Tinybert: Distilling bert for natural language under- standing. arXiv preprint arXiv:1909.10351.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The state and fate of linguistic diversity and inclusion in the nlp world", |
|
"authors": [ |
|
{ |
|
"first": "Pratik", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastin", |
|
"middle": [], |
|
"last": "Santy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amar", |
|
"middle": [], |
|
"last": "Budhiraja", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kalika", |
|
"middle": [], |
|
"last": "Bali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Monojit", |
|
"middle": [], |
|
"last": "Choudhury", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.09095" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pratik Joshi, Sebastin Santy, Amar Budhiraja, Kalika Bali, and Monojit Choudhury. 2020. The state and fate of linguistic diversity and inclusion in the nlp world. arXiv preprint arXiv:2004.09095.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Bag of tricks for efficient text classification", |
|
"authors": [ |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1607.01759" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2016. Bag of tricks for efficient text classification. arXiv preprint arXiv:1607.01759.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A convolutional neural network for modelling sentences", |
|
"authors": [ |
|
{ |
|
"first": "Nal", |
|
"middle": [], |
|
"last": "Kalchbrenner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Grefenstette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1404.2188" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nal Kalchbrenner, Edward Grefenstette, and Phil Blun- som. 2014. A convolutional neural network for mod- elling sentences. arXiv preprint arXiv:1404.2188.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Deephateexplainer: Explainable hate speech detection in under-resourced bengali language", |
|
"authors": [ |
|
{ |
|
"first": "Md", |
|
"middle": [], |
|
"last": "Karim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bharathi", |
|
"middle": [], |
|
"last": "Sumon Kanti Dey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Raja Chakravarthi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2012.14353" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Md Karim, Sumon Kanti Dey, Bharathi Raja Chakravarthi, et al. 2020. Deephateexplainer: Ex- plainable hate speech detection in under-resourced bengali language. arXiv preprint arXiv:2012.14353.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "An evaluation dataset for intent classification and out-of-scope prediction", |
|
"authors": [ |
|
{ |
|
"first": "Stefan", |
|
"middle": [], |
|
"last": "Larson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anish", |
|
"middle": [], |
|
"last": "Mahendran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Joseph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Peper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Clarke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parker", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Kummerfeld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Leach", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lingjia", |
|
"middle": [], |
|
"last": "Laurenzano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1909.02027" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefan Larson, Anish Mahendran, Joseph J Peper, Christopher Clarke, Andrew Lee, Parker Hill, Jonathan K Kummerfeld, Kevin Leach, Michael A Laurenzano, Lingjia Tang, et al. 2019. An evalua- tion dataset for intent classification and out-of-scope prediction. arXiv preprint arXiv:1909.02027.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Gradient-based learning applied to document recognition", |
|
"authors": [ |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L\u00e9on", |
|
"middle": [], |
|
"last": "Bottou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Haffner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of the IEEE", |
|
"volume": "86", |
|
"issue": "11", |
|
"pages": "2278--2324", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yann LeCun, L\u00e9on Bottou, Yoshua Bengio, and Patrick Haffner. 1998. Gradient-based learning applied to document recognition. Proceedings of the IEEE, 86(11):2278-2324.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Learning word vectors for sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Maas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Daly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Andrew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th annual meeting of the association for computational linguistics: Human language technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "142--150", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew Maas, Raymond E Daly, Peter T Pham, Dan Huang, Andrew Y Ng, and Christopher Potts. 2011. Learning word vectors for sentiment analysis. In Proceedings of the 49th annual meeting of the associ- ation for computational linguistics: Human language technologies, pages 142-150.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositionality. In Advances in neural information processing sys- tems, pages 3111-3119.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Deep learning-based text classification: A comprehensive review", |
|
"authors": [ |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Minaee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nal", |
|
"middle": [], |
|
"last": "Kalchbrenner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Narjes", |
|
"middle": [], |
|
"last": "Nikzad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meysam", |
|
"middle": [], |
|
"last": "Chenaghlu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ACM Computing Surveys (CSUR)", |
|
"volume": "54", |
|
"issue": "3", |
|
"pages": "1--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shervin Minaee, Nal Kalchbrenner, Erik Cambria, Nar- jes Nikzad, Meysam Chenaghlu, and Jianfeng Gao. 2021. Deep learning-based text classification: A comprehensive review. ACM Computing Surveys (CSUR), 54(3):1-40.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "How multilingual is multilingual bert? arXiv preprint", |
|
"authors": [ |
|
{ |
|
"first": "Telmo", |
|
"middle": [], |
|
"last": "Pires", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eva", |
|
"middle": [], |
|
"last": "Schlinger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Garrette", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.01502" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual bert? arXiv preprint arXiv:1906.01502.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Danq: a hybrid convolutional and recurrent deep neural network for quantifying the function of dna sequences", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Quang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohui", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Nucleic acids research", |
|
"volume": "44", |
|
"issue": "11", |
|
"pages": "107--107", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Quang and Xiaohui Xie. 2016. Danq: a hybrid convolutional and recurrent deep neural network for quantifying the function of dna sequences. Nucleic acids research, 44(11):e107-e107.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Squad: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.05250" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100,000+ questions for machine comprehension of text. arXiv preprint arXiv:1606.05250.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Hate speech detection in the bengali language: A dataset and its baseline evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Nauros", |
|
"middle": [], |
|
"last": "Romim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mosahed", |
|
"middle": [], |
|
"last": "Ahmed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hriteshwar", |
|
"middle": [], |
|
"last": "Talukder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Md Saiful", |
|
"middle": [], |
|
"last": "Islam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of International Joint Conference on Advances in Computational Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "457--468", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nauros Romim, Mosahed Ahmed, Hriteshwar Talukder, and Md Saiful Islam. 2021. Hate speech detection in the bengali language: A dataset and its baseline evaluation. In Proceedings of International Joint Conference on Advances in Computational Intelli- gence, pages 457-468. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter", |
|
"authors": [ |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.01108" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Emotion classification on indonesian twitter dataset", |
|
"authors": [ |
|
{ |
|
"first": "Rahmad", |
|
"middle": [], |
|
"last": "Mei Silviana Saputri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirna", |
|
"middle": [], |
|
"last": "Mahendra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Adriani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 International Conference on Asian Language Processing (IALP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "90--95", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mei Silviana Saputri, Rahmad Mahendra, and Mirna Adriani. 2018. Emotion classification on indonesian twitter dataset. In 2018 International Conference on Asian Language Processing (IALP), pages 90-95. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Carer: Contextualized affect representations for emotion recognition", |
|
"authors": [ |
|
{ |
|
"first": "Elvis", |
|
"middle": [], |
|
"last": "Saravia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hsien-Chi Toby", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yen-Hao", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junlin", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi-Shin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3687--3697", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elvis Saravia, Hsien-Chi Toby Liu, Yen-Hao Huang, Junlin Wu, and Yi-Shin Chen. 2018. Carer: Con- textualized affect representations for emotion recog- nition. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3687-3697.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Perelygin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Andrew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1631--1642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D Manning, Andrew Y Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 conference on empiri- cal methods in natural language processing, pages 1631-1642.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Energy and policy considerations for deep learning in nlp", |
|
"authors": [ |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Strubell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ananya", |
|
"middle": [], |
|
"last": "Ganesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1906.02243" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emma Strubell, Ananya Ganesh, and Andrew McCal- lum. 2019. Energy and policy considerations for deep learning in nlp. arXiv preprint arXiv:1906.02243.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Mobilebert: a compact task-agnostic bert for resource-limited devices", |
|
"authors": [ |
|
{ |
|
"first": "Zhiqing", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongkun", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Renjie", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denny", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.02984" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. 2020. Mobilebert: a compact task-agnostic bert for resource-limited de- vices. arXiv preprint arXiv:2004.02984.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Glue: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel R", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.07461" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2018. Glue: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Neural network acceptability judgments", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Warstadt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "625--641", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Warstadt, Amanpreet Singh, and Samuel R Bow- man. 2019. Neural network acceptability judgments. Transactions of the Association for Computational Linguistics, 7:625-641.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Exploring the boundaries of low-resource bert distillation", |
|
"authors": [ |
|
{ |
|
"first": "Moshe", |
|
"middle": [], |
|
"last": "Wasserblat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Pereg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Izsak", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of SustaiNLP: Workshop on Simple and Efficient Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "35--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moshe Wasserblat, Oren Pereg, and Peter Izsak. 2020. Exploring the boundaries of low-resource bert distil- lation. In Proceedings of SustaiNLP: Workshop on Simple and Efficient Natural Language Processing, pages 35-40.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Comparative study of cnn and rnn for natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Wenpeng", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katharina", |
|
"middle": [], |
|
"last": "Kann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mo", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1702.01923" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenpeng Yin, Katharina Kann, Mo Yu, and Hinrich Sch\u00fctze. 2017. Comparative study of cnn and rnn for natural language processing. arXiv preprint arXiv:1702.01923.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Abcnn: Attention-based convolutional neural network for modeling sentence pairs", |
|
"authors": [ |
|
{ |
|
"first": "Wenpeng", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "259--272", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenpeng Yin, Hinrich Sch\u00fctze, Bing Xiang, and Bowen Zhou. 2016. Abcnn: Attention-based convolutional neural network for modeling sentence pairs. Transac- tions of the Association for Computational Linguis- tics, 4:259-272.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Dependency sensitive convolutional neural networks for modeling sentences and documents", |
|
"authors": [ |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Honglak", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dragomir", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.02361" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rui Zhang, Honglak Lee, and Dragomir Radev. 2016. Dependency sensitive convolutional neural networks for modeling sentences and documents. arXiv preprint arXiv:1611.02361.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "A c-lstm neural network for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Chunting", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chonglin", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francis", |
|
"middle": [], |
|
"last": "Lau", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1511.08630" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chunting Zhou, Chonglin Sun, Zhiyuan Liu, and Fran- cis Lau. 2015. A c-lstm neural network for text clas- sification. arXiv preprint arXiv:1511.08630.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Long short-term memory over recursive structures", |
|
"authors": [ |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parinaz", |
|
"middle": [], |
|
"last": "Sobihani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongyu", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1604--1612", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaodan Zhu, Parinaz Sobihani, and Hongyu Guo. 2015. Long short-term memory over recursive structures. In International Conference on Machine Learning, pages 1604-1612. PMLR.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": ". Many NLP tasks such as sentiment analysis, emotion detection, have striking similarity, as the attributes are largely keywords dependent. Because of the sequential structures of : a, b, c) Optimal chain length for the three alternative fusion chain models studied extensively as part of the NAS.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": ") irrespective of the fact whether fusion models start with any of the CNN, LSTM, BiLSTM layers. The NAS considers three fusion chains: \u2022 CNN + LSTM + CNN + LSTM + . . . + CNN \u2022 LSTM + CNN + LSTM + CNN + . . . + LSTM \u2022 BiLSTM + CNN + BiLSTM + . . . + BiLSTM", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "Performance comparison between the fusion (CNN + attn. + LSTM/BiLSTM + CNN) and mBERT model on 25%, 50%, 75% and 100% of a new 6-class Bengali emotion dataset. The dataset was split randomly to produce an artificial scarcity. InFig. 3a-b, the green (square), red (circle), blue (asterisks), and yellow (diamond) lines represent CNN + attn. + LSTM + CNN (Fusion: LSTM), CNN + attn. + BiLSTM + CNN (Fusion: BiLSTM), mBERT and BanglaBERT models' performance, respectively. a) Accuracy comparison of all the four models for varying data size. b) The loss difference (LD) progression for different data sizes-the smaller the loss, the better the performance is. c) An inclusion of a self-attention layer improves fusion models' performance (blue lines).", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"3\">Model structure Classical Machine Learning Models Accuracy (T) 1. SVM 41.93</td><td>LD NA</td></tr><tr><td>2. KNN</td><td/><td>72.79</td><td>NA</td></tr><tr><td>3. Random Forest</td><td/><td>81.43</td><td>NA</td></tr><tr><td colspan=\"2\">Fusion models 4. CNN + CNN + CNN</td><td>85.62</td><td>0.491</td></tr><tr><td>5. LSTM + LSTM</td><td/><td>85.43</td><td>0.541</td></tr><tr><td colspan=\"2\">6. CNN + LSTM + CNN</td><td>86.61</td><td>0.283</td></tr><tr><td colspan=\"2\">7. LSTM + CNN + LSTM</td><td>85.74</td><td>0.483</td></tr><tr><td>8. BiLSTM + BiLSTM</td><td/><td>86.54</td><td>0.126</td></tr><tr><td colspan=\"2\">9. BiLSTM + CNN + CNN</td><td>85.25</td><td>0.143</td></tr><tr><td colspan=\"2\">10. CNN + BiLSTM + CNN</td><td>84.54</td><td>-0.058</td></tr><tr><td>11. BiLSTM + LSTM</td><td/><td>85.14</td><td>0.206</td></tr><tr><td colspan=\"2\">12. BiLSTM + LSTM + BiLSTM</td><td>85.49</td><td>0.057</td></tr><tr><td colspan=\"2\">13. BiLSTM + CNN + BiLSTM</td><td>85.86</td><td>-0.005</td></tr><tr><td colspan=\"3\">Fusion models + attention 14. CNN + attn. + BiLSTM + CNN 86.83</td><td/></tr><tr><td colspan=\"2\">15. CNN + attn. + LSTM + CNN</td><td>86.91</td><td/></tr><tr><td>16. mBERT</td><td>BERT models</td><td>86.62</td><td>0.457</td></tr><tr><td>17. Bangla BERT</td><td/><td>86.17</td><td>0.177</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Performance of alternative fusion models for the new 6-class emotion Bengali dataset.", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"7\"># Params # FLOPs CoLA WNLI QQP QNLI RTE</td></tr><tr><td>BERT-base</td><td>109M</td><td>22.04B</td><td>63</td><td>46</td><td>61</td><td>70</td><td>75</td></tr><tr><td>mBERT DistilBERT TinyBERT</td><td>110M 52.2M 14.5M</td><td>22.04B 22.04B 0.119B</td><td>64 65 48</td><td>49 47 39</td><td>66 65 49</td><td>73 74 53</td><td>71 76 57</td></tr><tr><td colspan=\"2\">CNN + BiLSTM + CNN 0.4M CNN + LSTM + CNN 0.37M CNN + BiLSTM 0.38M</td><td>1.50M 1.43M 1.47M</td><td>64 60 62</td><td>65 64 62</td><td>71 69 70</td><td>73 74 71</td><td>81 81 79</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Efficacy study of CNN + BiLSTM + CNN fusion model considers GLUE benchmark datasets. Here, M and B stand for Millions and Billions, respectively. Only 250 samples were collected randomly to mimic a low-resource setup artificially for each class, among which 80% and 20% were for training and testing purposes. Here, accuracy colored in red is the highest, whereas the bold black is the next highest accuracy attained. The baseline models are all pre-trained versions available in https://huggingface.co/models", |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"text": "Comparison between CNN + BiLSTM + CNN model and BERT with frozen layers as in", |
|
"num": null |
|
}, |
|
"TABREF6": { |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"3\">Group Model structure</td><td>Accuracy (%)</td><td>Ref.</td></tr><tr><td>DNN</td><td colspan=\"4\">Six-class emotion Bengali dataset (Das et al., 2021) CNN + A + LSTM + CNN 64.26 Ours CNN + A + BiLSTM + CNN 65.24 CNN + A + GRU + CNN 64.73 CNN + BiLSTM 55.68 (2021)</td></tr><tr><td/><td/><td>BiLSTM</td><td>58.08</td><td>(2021)</td></tr><tr><td/><td/><td>mBERT</td><td>64.63</td></tr><tr><td colspan=\"2\">BERT</td><td>Bangla-BERT</td><td>62.24</td><td>(2021)</td></tr><tr><td/><td/><td>XLM-R</td><td>69.61</td></tr><tr><td>ML</td><td/><td colspan=\"3\">Hate Speech Bengali dataset (Romim et al., 2021) SVM 87.80 (2021)</td></tr><tr><td/><td/><td>fasttext + LSTM</td><td>84.30</td></tr><tr><td>DNN</td><td/><td>fasttext + BiLSTM word2vec + LSTM</td><td>86.55 83.85</td><td>(2021)</td></tr><tr><td>DNN</td><td/><td colspan=\"2\">CNN + A + BiLSTM + CNN DeepHateExplainer (Karim et al., 2020) 88.65 LSTM 75 BiLSTM 78</td><td>Ours (2020)</td></tr><tr><td/><td/><td>CNN + A + BiLSTM + CNN</td><td>83.56</td><td>Ours</td></tr><tr><td/><td/><td>Bangla-BERT</td><td>86</td></tr><tr><td colspan=\"2\">BERT</td><td>mBERT-cased</td><td>85</td><td>(2020)</td></tr><tr><td/><td/><td>XML-Roberta</td><td>87</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Performance comparison between fusion models and alternative DNN and BERT models for various NLP-tasks in Bengali language. Here, A \u2261 selfattention layer.", |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"html": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"5\">Average Time per Epoch (second) CoLA WNLI QQP QNLI RTE</td></tr><tr><td>BERT-base</td><td>1286</td><td>1321</td><td>895</td><td colspan=\"2\">1421 783</td></tr><tr><td>mBERT</td><td>2540</td><td>1721</td><td colspan=\"3\">1296 2671 1026</td></tr><tr><td>DistilBERT</td><td>783</td><td>982</td><td>662</td><td>941</td><td>386</td></tr><tr><td>TinyBERT</td><td>19.6</td><td>24.4</td><td colspan=\"2\">19.8 24.4</td><td>18.8</td></tr><tr><td>CNN + BiLSTM + CNN</td><td>1.92</td><td>3.36</td><td colspan=\"2\">3.33 3.36</td><td>2.21</td></tr><tr><td>CNN + LSTM + CNN</td><td>1.25</td><td>3.26</td><td colspan=\"2\">2.25 3.18</td><td>1.11</td></tr><tr><td colspan=\"2\">CNN + BiLSTM 1.23</td><td>4.21</td><td>3</td><td>4.16</td><td>2.58</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Training cost comparison between the baseline and fusion models using the average time per epoch for all the GLUE benchmark datasets studied.", |
|
"num": null |
|
}, |
|
"TABREF8": { |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"2\">Method Model structure</td><td>Accuracy (%)</td><td>Ref.</td></tr><tr><td colspan=\"4\">Artificial scarcity: (5%, 10%) of IMDB dataset (Maas et al., 2011) Fusion CNN + A + BiLSTM + CNN (84.79, 85.10) Ours</td></tr><tr><td>BERT</td><td>mBERT</td><td>(81.40, 84.79)</td><td>-</td></tr><tr><td colspan=\"4\">Scarcity: (0.01%, 0.02%) Emotion dataset (Saravia et al., 2018) Fusion CNN + A + LSTM + CNN (84.65, 89.87) Ours</td></tr><tr><td>BERT</td><td>mBERT</td><td>(79.5, 89.57)</td><td>-</td></tr><tr><td colspan=\"4\">100% of Intent Classification dataset (Larson et al., 2019) BERT BERT-base 94.3</td></tr><tr><td>Others</td><td>CNN MLP</td><td>89.8 90.1</td><td>(2019)</td></tr><tr><td>Fusion</td><td>CNN + BiLSTM + CNN CNN + LSTM + CNN</td><td>93.62 93.28</td><td>Ours</td></tr><tr><td>Fusion</td><td colspan=\"2\">100% of the Vietnamese dataset (Ho et al., 2019) CNN + LSTM + CNN 54.76 CNN + BiLSTM + CNN 54.54</td><td>Ours</td></tr><tr><td>BERT</td><td>BERT-base</td><td>53.18</td><td/></tr><tr><td>Fusion</td><td colspan=\"2\">100% of the Indonesian dataset (Saputri et al., 2018) CNN + LSTM + CNN 54.76 CNN + BiLSTM + CNN 54.54</td><td>Ours</td></tr><tr><td>BERT</td><td>BERT-base</td><td>53.18</td><td/></tr></table>", |
|
"type_str": "table", |
|
"text": "Performance comparison between fusion models and alternative DNN and transformers models across different languages and datasets. Here, A \u2261 attn.", |
|
"num": null |
|
}, |
|
"TABREF9": { |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"2\">Serial Fusion architecture</td><td>Retrained Accuracy</td><td colspan=\"4\">Accuracy Before pruning After Pruning Before Pruning After Pruning Size (zip, MB)</td></tr><tr><td>1</td><td>LSTM + LSTM</td><td>86.19</td><td>85.43</td><td>85.18</td><td>33.45</td><td>6.32</td></tr><tr><td>2 3</td><td>CNN + LSTM + CNN LSTM + CNN + LSTM</td><td>86.36 85.28</td><td>86.61 85.74</td><td>85.54 84.24</td><td>34.81 34.27</td><td>6.60 6.45</td></tr></table>", |
|
"type_str": "table", |
|
"text": "Deployable form for a few DNN-based fusion models before and after the pruning and retraining for the six-class Bengali emotion dataset developed in this study.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |