|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T05:58:17.984581Z" |
|
}, |
|
"title": "ARAELECTRA: Pre-Training Text Discriminators for Arabic Language Understanding", |
|
"authors": [ |
|
{ |
|
"first": "Wissam", |
|
"middle": [], |
|
"last": "Antoun", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "American University of Beirut", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Fady", |
|
"middle": [], |
|
"last": "Baly", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "American University of Beirut", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hazem", |
|
"middle": [], |
|
"last": "Hajj", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "American University of Beirut", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Advances in English language representation enabled a more sample-efficient pre-training task by Efficiently Learning an Encoder that Classifies Token Replacements Accurately (ELECTRA). Which, instead of training a model to recover masked tokens, it trains a discriminator model to distinguish true input tokens from corrupted tokens that were replaced by a generator network. On the other hand, current Arabic language representation approaches rely only on pretraining via masked language modeling. In this paper, we develop an Arabic language representation model, which we name ARAELECTRA. Our model is pretrained using the replaced token detection objective on large Arabic text corpora. We evaluate our model on multiple Arabic NLP tasks, including reading comprehension, sentiment analysis, and named-entity recognition and we show that ARAELEC-TRA outperforms current state-of-the-art Arabic language representation models, given the same pretraining data and with even a smaller model size.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Advances in English language representation enabled a more sample-efficient pre-training task by Efficiently Learning an Encoder that Classifies Token Replacements Accurately (ELECTRA). Which, instead of training a model to recover masked tokens, it trains a discriminator model to distinguish true input tokens from corrupted tokens that were replaced by a generator network. On the other hand, current Arabic language representation approaches rely only on pretraining via masked language modeling. In this paper, we develop an Arabic language representation model, which we name ARAELECTRA. Our model is pretrained using the replaced token detection objective on large Arabic text corpora. We evaluate our model on multiple Arabic NLP tasks, including reading comprehension, sentiment analysis, and named-entity recognition and we show that ARAELEC-TRA outperforms current state-of-the-art Arabic language representation models, given the same pretraining data and with even a smaller model size.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Recently, pre-trained language representation models have demonstrated state-of-the-art performance on multiple NLP tasks and in different languages. Pre-training is commonly done via Masked Language Modeling (MLM) (Devlin et al., 2019; Liu et al., 2019; Conneau et al., 2019) , where an input sequence has some of its tokens randomly hidden and the model is tasked to recover the original masked tokens. While this approach has proven successful, recent works have shown that MLM is not sample-efficient (Clark et al., 2020b) , since the network only learns from the small subset of masked tokens per sequence (15% of the tokens in BERT). Clark et al. (2020b) proposed an approach called Efficiently Learning an Encoder that Classi-fies Token Replacements Accurately (ELECTRA). The method uses a pre-training technique based on replaced token detection (RTD) task is more efficient than MLM, and thus achieved state-of-theart results on English benchmarks. RTD is a pretraining task where a model is tasked to distinguish true input tokens from synthetically generated ones. RTD solves the issue of the mismatch created in MLM, where the model only sees the [MASK] token during pre-training but not during fine-tuning. In ELECTRA, a small masked language generator network G is used to generate used to generate the corrupted tokens, and BERT-based discriminator model D predicts for whether a token is an original or a replacement.", |
|
"cite_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 236, |
|
"text": "(Devlin et al., 2019;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 254, |
|
"text": "Liu et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 276, |
|
"text": "Conneau et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 505, |
|
"end": 526, |
|
"text": "(Clark et al., 2020b)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 640, |
|
"end": 660, |
|
"text": "Clark et al. (2020b)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Current state-of-the-art language representation models for Arabic employ MLM as a pre-training objective (Antoun et al., 2020; Safaya et al., 2020; Lan et al., 2020; Abdul-Mageed et al., 2020b; Chowdhury et al., 2020; Abdul-Mageed et al., 2020a) . In this paper, we describe the process of pre-training a transformer encoder model for Arabic language understanding using the RTD objective, which we call ARAELECTRA. We also evaluate ARAELECTRA on multiple Arabic NLP tasks and show empirically that ARAELECTRA outperforms current state-of-the-art Arabic pre-trained models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 127, |
|
"text": "(Antoun et al., 2020;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 128, |
|
"end": 148, |
|
"text": "Safaya et al., 2020;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 166, |
|
"text": "Lan et al., 2020;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 167, |
|
"end": 194, |
|
"text": "Abdul-Mageed et al., 2020b;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 195, |
|
"end": 218, |
|
"text": "Chowdhury et al., 2020;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 219, |
|
"end": 246, |
|
"text": "Abdul-Mageed et al., 2020a)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contributions can be summarized as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Pre-training the ELECTRA model on a largescale Arabic corpus. \u2022 Reaching a new state-of-the-art on multiple Arabic NLP tasks. \u2022 Publicly releasing ARAELECTRA on popular NLP libraries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of the paper is organized as follows. Section 2 provides a review of previous Arabic language representation literature. Section 3 details the methodology used in developing ARAELEC-TRA. Section 4 describes the experimental setup, evaluation procedures, and experiment results. Finally, we conclude in Section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Recently, work on Arabic language representation have been on the rise due to the performance benefits that transfer learning approaches have brought. Early transfer learning approaches in Arabic relied on using pre-trained word embeddings i.e. Ar-aVec (Soliman et al., 2017) . Model-level transfer learning was shown to work on Arabic with hULMonA (ElJundi et al., 2019), a recurrent neural network-based language modeling approach. Antoun et al. (2020) and Safaya et al. (2020) improved on hULMonA, and pre-trained transformerbased models with MLM with large scale Arabic corpora. Other approaches addressed issues with the early BERT-based models such as training on code-switched English-Arabic corpora to improve performance on information retrieval tasks (Lan et al., 2020) , and training on dialectal Arabic (DA) corpora to address the domain miss-match between MSA and DA during pre-training and finetuning (Abdul-Mageed et al., 2020b; Chowdhury et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 245, |
|
"end": 275, |
|
"text": "Ar-aVec (Soliman et al., 2017)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 454, |
|
"text": "Antoun et al. (2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 479, |
|
"text": "Safaya et al. (2020)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 761, |
|
"end": 779, |
|
"text": "(Lan et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 915, |
|
"end": 943, |
|
"text": "(Abdul-Mageed et al., 2020b;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 944, |
|
"end": 967, |
|
"text": "Chowdhury et al., 2020)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We hence propose an Arabic ELECTRA-based language representation model pre-trained using the RTD objective on large MSA corpora.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this paper, we develop an ELECTRA-based Arabic language representation model to improve the state-of-the-art in Arabic reading comprehension. We create ARAELECTRA a bidirectional transformer encoder model with 12 encoder layers, 12 attention heads, 768 hidden size, and 512 maximum input sequence length for a total of 136M parameters. The pre-training setup and dataset of ARAELECTRA are described in the following sections.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ARAELECTRA: Methodology", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "While ARABERT was trained using the MLM objective, ARAELECTRA is pre-trained using the RTD objective. The RTD approach trains two neural network models, a generator G and a discriminator D or ARAELECTRA, as shown in Figure 1 . G takes a corrupted input sequence, where random tokens are replaced with the [MASK] token, and learns to predict the original tokens that have been masked. The generator network G is in our case a small BERT model with 12 encoder layers, 4 attention heads, and 256 hidden size 1 . The discriminator network D then takes as input the recovered sequence from the output of G and tries to predict which tokens were replaced and which tokens are from the original text.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 216, |
|
"end": 224, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Pre-training Setup", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "While this approach may look similar to a generative adversarial network (GAN) (Goodfellow et al., 2014) , the generator network in ELECTRA is trained with maximum-likelihood instead of adversarial training to fool the discriminator and the input to the generator is not a random noise vector, but a corrupted sequence of tokens.", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 104, |
|
"text": "(Goodfellow et al., 2014)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pre-training Setup", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We chose to pre-train on the same dataset as ARABERTV0.2 (Antoun et al., 2020) , to make the comparison between models fair. The dataset is a collection of the Arabic corpora list below:", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 78, |
|
"text": "(Antoun et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "pre-training Dataset", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 The OSCAR corpus (Ortiz Su\u00e1rez et al., 2020 (Zeroual et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 45, |
|
"text": "(Ortiz Su\u00e1rez et al., 2020", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 46, |
|
"end": 68, |
|
"text": "(Zeroual et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "pre-training Dataset", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 News articles provided by As-Safir newspaper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "pre-training Dataset", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The total size of the training dataset is 77GB or 8.8 billion words, and comprises mostly news articles. For validation, we use new Wikipedia articles that were published after the September 2020 dump. The same wordpiece vocabulary from ARABERTv0.2 was used for tokenization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "pre-training Dataset", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Since the discriminator network has the same architecture and layers as a BERT model, we add a linear classification layer on top of ELECTRA's output, and fine-tune the whole model with the added layer on new tasks. ARAELECTRA's performance is validated on three Arabic NLP tasks i.e. question answering (QA), sentiment analysis (SA) and named-entity recognition (NER). pre-training For pre-training, 15% of the 512 input tokens were masked. The model was pretrained for 2 million steps with a batch size of 256. Pre-training took 24 days to finish on a TPUv3-8 slice. The learning rate was set to 2e-4, with 10000 warm-up steps.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-tuning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Fine-tuning All the models were fine-tuned with batch size set to 32, maximum sequence length of 384, and a stride of 128 for QA, and a maximum sequence length of 256 for SA and NER. Experiments were only performed with the following learning rates [2e-5, 3e-5, 5e-5], since model specific hyper-parameter optimization is computationally expensive.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-tuning", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The question answering task examines the model's reading comprehension and language understanding capabilities. The datasets of choice are the Arabic Reading Comprehension Dataset (ARCD) (Mozannar et al., 2019) and the Typologically Diverse Question Answering dataset (Ty-DiQA) (Clark et al., 2020a) . Both datasets follow the SQuAD (Rajpurkar et al., 2016) format where the model is required to extract the span of the answer, given a question and a context. The ARCD (Mozannar et al., 2019) training set consists of 48344 machine-translated questions and answers from English, with 693 questions and answers from the ARCD set. The test was performed on the remaining 702 questions from the ARCD set. From the TyDiQA (Clark et al., 2020a) , we chose the Arabic examples from the training and development sets of subtask 2, for a total of 14508 pairs for training and 921 pairs for testing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 210, |
|
"text": "(Mozannar et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 299, |
|
"text": "(Clark et al., 2020a)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 333, |
|
"end": 357, |
|
"text": "(Rajpurkar et al., 2016)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 469, |
|
"end": 492, |
|
"text": "(Mozannar et al., 2019)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 718, |
|
"end": 739, |
|
"text": "(Clark et al., 2020a)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Question Answering", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "Arabic sentiment Analysis evaluation is done on the Arabic Sentiment Twitter Dataset for LEVantine (ArSenTD-Lev) (Baly et al., 2018) . The dataset contains 4000 tweets written in the Levantine Arabic dialect and annotated for the sentiment (5 classes), topic, and sentiment target. The data was split 80-20 for training and testing.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 132, |
|
"text": "(Baly et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Analysis", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "For Arabic NER recognition, the model is evaluated on the ANERcorp dataset (Benajiba et al., 2007) , with the data split from CAMeL Lab (Obeid et al., 2020) . The train split has 125,102 words and the test split has 25,008 words, labeled for organization (ORG), person (PER), location (LOC), and miscellaneous (MISC).", |
|
"cite_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 98, |
|
"text": "(Benajiba et al., 2007)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 136, |
|
"end": 156, |
|
"text": "(Obeid et al., 2020)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Named-Entity Recognition", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "We evaluate our model against a collection of Arabic transformer models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference Models", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "\u2022 ARABERTV0.1 (Antoun et al., 2020 ). \u2022 ARABERTv0.2 base, large (Antoun et al., 2020 ). \u2022 ARABIC-BERT base, medium, large (Safaya et al., 2020) . \u2022 ARABIC ALBERT base, large, xlarge 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 34, |
|
"text": "(Antoun et al., 2020", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 64, |
|
"end": 84, |
|
"text": "(Antoun et al., 2020", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 122, |
|
"end": 143, |
|
"text": "(Safaya et al., 2020)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference Models", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "\u2022 ARBERT (Abdul-Mageed et al., 2020a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 37, |
|
"text": "(Abdul-Mageed et al., 2020a)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference Models", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "Experimental results for the different datasets and models are shown in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 79, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "TyDiQA The results show that ARAELECTRA achieved the highest performance on all tested datasets when compared to the other base models, and only fell short on ARCD to Arabic-ALBERT-xlarge, a model 4 times its size, in exact match scores, and to ARABERTv0.2-large in F1-score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "ARCD ArSenTD", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The performance difference between both QA datasets is due to the poor quality of the ARCD training examples, which are translated from English SQuAD. ARCD training examples also contained text in languages other than Arabic and English, which further reduced performance due to the occurrence of unknowns subwords and characters. It is also to be noted, that some training examples in Arabic TyDiQA contained HTML artifacts which appeared in the training context and answer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "As for the ArSenTD-LEV scores, all test Arabic models still struggle with fine-grained labelling of ArSenTD-Lev. Mainly because the dataset only contains 4K examples distributed between 5 sentiment classes and on 6 diverse topics, with high class-imbalance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "These results clearly demonstrate that ELEC-TRA's RTD objective achieves higher performance especially on QA tasks and improved semantic representation compared to MLM on Arabic text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, we showed that pre-training using the RTD objective on Arabic text is more efficient and produces pre-trained language representation models better than the MLM objective. Our ARAELECTRA model improves the stateof-the-art for Arabic Question Answering, senti-ment analysis and named-entity recognition, and achieves higher performance compared to other models pre-trained with the same dataset and with larger model sizes. Our model will be publicly available, along with our pre-training and finetuning code, in our repository github.com/aubmind/arabert/tree/master/araelectra", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the generator, the input embeddings of size 768 are first projected into the generator hidden size with the addition of a linear layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/KUIS-AI-Lab/Arabic-ALBERT/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The author would like to thank Tarek Naous for the constructive criticism of the manuscript. This research was supported by the University Research Board (URB) at the American University of Beirut (AUB), and by the TFRC program, which we thank for the free access to cloud TPUs. We also thank As-Safir newspaper for the data access.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "2020a. Arbert & marbert: Deep bidirectional transformers for arabic", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2101.01785" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Muhammad Abdul-Mageed, AbdelRahim Elmadany, and El Moatez Billah Nagoudi. 2020a. Arbert & marbert: Deep bidirectional transformers for arabic. arXiv preprint arXiv:2101.01785.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Abdel-Rahim Elmadany, and Lyle Ungar. 2020b. Toward micro-dialect identification in diaglossic and code-switched environments", |
|
"authors": [ |
|
{ |
|
"first": "Muhammad", |
|
"middle": [], |
|
"last": "Abdul-Mageed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chiyu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.04900" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Muhammad Abdul-Mageed, Chiyu Zhang, Abdel- Rahim Elmadany, and Lyle Ungar. 2020b. To- ward micro-dialect identification in diaglossic and code-switched environments. arXiv preprint arXiv:2010.04900.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Arabert: Transformer-based model for arabic language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Wissam", |
|
"middle": [], |
|
"last": "Antoun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fady", |
|
"middle": [], |
|
"last": "Baly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hazem", |
|
"middle": [], |
|
"last": "Hajj", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "LREC 2020 Workshop Language Resources and Evaluation Conference 11-16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wissam Antoun, Fady Baly, and Hazem Hajj. 2020. Arabert: Transformer-based model for arabic lan- guage understanding. In LREC 2020 Workshop Lan- guage Resources and Evaluation Conference 11-16 May 2020, page 9.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Arsentd-lev: A multi-topic corpus for target-based sentiment analysis in arabic levantine tweets", |
|
"authors": [ |
|
{ |
|
"first": "Ramy", |
|
"middle": [], |
|
"last": "Baly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alaa", |
|
"middle": [], |
|
"last": "Khaddaj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hazem", |
|
"middle": [], |
|
"last": "Hajj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wassim", |
|
"middle": [], |
|
"last": "El-Hajj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khaled", |
|
"middle": [], |
|
"last": "Bashir Shaban", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramy Baly, Alaa Khaddaj, Hazem Hajj, Wassim El- Hajj, and Khaled Bashir Shaban. 2018. Arsentd-lev: A multi-topic corpus for target-based sentiment anal- ysis in arabic levantine tweets. In OSACT 3: The 3rd", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Workshop on Open-Source Arabic Corpora and Processing Tools", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Workshop on Open-Source Arabic Corpora and Pro- cessing Tools, page 37.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Anersys: An arabic named entity recognition system based on maximum entropy", |
|
"authors": [ |
|
{ |
|
"first": "Yassine", |
|
"middle": [], |
|
"last": "Benajiba", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jos\u00e9 Miguel", |
|
"middle": [], |
|
"last": "Bened\u00edruiz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Computational Linguistics and Intelligent Text Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "143--153", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yassine Benajiba, Paolo Rosso, and Jos\u00e9 Miguel Bened\u00edRuiz. 2007. Anersys: An arabic named entity recognition system based on maximum en- tropy. In Computational Linguistics and Intelligent Text Processing, pages 143-153, Berlin, Heidelberg. Springer Berlin Heidelberg.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Improving Arabic text categorization using transformer training diversification", |
|
"authors": [ |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Shammur Absar Chowdhury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Abdelali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jung", |
|
"middle": [], |
|
"last": "Darwish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joni", |
|
"middle": [], |
|
"last": "Soon-Gyo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernard", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Salminen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jansen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Fifth Arabic Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "226--236", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shammur Absar Chowdhury, Ahmed Abdelali, Ka- reem Darwish, Jung Soon-Gyo, Joni Salminen, and Bernard J. Jansen. 2020. Improving Arabic text cate- gorization using transformer training diversification. In Proceedings of the Fifth Arabic Natural Language Processing Workshop, pages 226-236, Barcelona, Spain (Online). Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Tydi qa: A benchmark for information-seeking question answering in typologically diverse languages", |
|
"authors": [ |
|
{ |
|
"first": "Jonathan", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eunsol", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Garrette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Kwiatkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vitaly", |
|
"middle": [], |
|
"last": "Nikolaev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennimaria", |
|
"middle": [], |
|
"last": "Palomaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonathan H. Clark, Eunsol Choi, Michael Collins, Dan Garrette, Tom Kwiatkowski, Vitaly Nikolaev, and Jennimaria Palomaki. 2020a. Tydi qa: A benchmark for information-seeking question answering in typo- logically diverse languages. Transactions of the As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Electra: Pretraining text encoders as discriminators rather than generators", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minh-Thang", |
|
"middle": [], |
|
"last": "Luong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.10555" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Minh-Thang Luong, Quoc V Le, and Christopher D Manning. 2020b. Electra: Pre- training text encoders as discriminators rather than generators. arXiv preprint arXiv:2003.10555.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Unsupervised cross-lingual representation learning at scale", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kartikay", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Wenzek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1911.02116" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2019. Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "1.5 billion words arabic corpus", |
|
"authors": [ |
|
{ |
|
"first": "Ibrahim Abu", |
|
"middle": [], |
|
"last": "El-Khair", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.04033" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ibrahim Abu El-Khair. 2016. 1.5 billion words arabic corpus. arXiv preprint arXiv:1611.04033.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Wassim El-Hajj, and Khaled Shaban. 2019. hulmona: The universal language model in arabic", |
|
"authors": [ |
|
{ |
|
"first": "Obeida", |
|
"middle": [], |
|
"last": "Eljundi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wissam", |
|
"middle": [], |
|
"last": "Antoun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nour", |
|
"middle": [ |
|
"El" |
|
], |
|
"last": "Droubi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hazem", |
|
"middle": [], |
|
"last": "Hajj", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "68--77", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Obeida ElJundi, Wissam Antoun, Nour El Droubi, Hazem Hajj, Wassim El-Hajj, and Khaled Shaban. 2019. hulmona: The universal language model in arabic. In Proceedings of the Fourth Arabic Natural Language Processing Workshop, pages 68-77.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Generative adversarial nets", |
|
"authors": [ |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Goodfellow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Pouget-Abadie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mehdi", |
|
"middle": [], |
|
"last": "Mirza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Warde-Farley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sherjil", |
|
"middle": [], |
|
"last": "Ozair", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Courville", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "27", |
|
"issue": "", |
|
"pages": "2672--2680", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. 2014. Generative ad- versarial nets. Advances in neural information pro- cessing systems, 27:2672-2680.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Gigabert: Zero-shot transfer learning from english to arabic", |
|
"authors": [ |
|
{ |
|
"first": "Wuwei", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [], |
|
"last": "Ritter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of The 2020 Conference on Empirical Methods on Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wuwei Lan, Yang Chen, Wei Xu, and Alan Ritter. 2020. Gigabert: Zero-shot transfer learning from english to arabic. In Proceedings of The 2020 Conference on Empirical Methods on Natural Language Process- ing (EMNLP).", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Neural Arabic question answering", |
|
"authors": [ |
|
{ |
|
"first": "Hussein", |
|
"middle": [], |
|
"last": "Mozannar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elie", |
|
"middle": [], |
|
"last": "Maamary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karl", |
|
"middle": [ |
|
"El" |
|
], |
|
"last": "Hajal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hazem", |
|
"middle": [], |
|
"last": "Hajj", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "108--118", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-4612" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hussein Mozannar, Elie Maamary, Karl El Hajal, and Hazem Hajj. 2019. Neural Arabic question answer- ing. In Proceedings of the Fourth Arabic Natu- ral Language Processing Workshop, pages 108-118, Florence, Italy. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "CAMeL tools: An open source python toolkit for Arabic natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Ossama", |
|
"middle": [], |
|
"last": "Obeid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nasser", |
|
"middle": [], |
|
"last": "Zalmout", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salam", |
|
"middle": [], |
|
"last": "Khalifa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dima", |
|
"middle": [], |
|
"last": "Taji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mai", |
|
"middle": [], |
|
"last": "Oudah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bashar", |
|
"middle": [], |
|
"last": "Alhafni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Go", |
|
"middle": [], |
|
"last": "Inoue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fadhl", |
|
"middle": [], |
|
"last": "Eryani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Erdmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nizar", |
|
"middle": [], |
|
"last": "Habash", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7022--7032", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ossama Obeid, Nasser Zalmout, Salam Khalifa, Dima Taji, Mai Oudah, Bashar Alhafni, Go Inoue, Fadhl Eryani, Alexander Erdmann, and Nizar Habash. 2020. CAMeL tools: An open source python toolkit for Arabic natural language processing. In Proceed- ings of the 12th Language Resources and Evaluation Conference, pages 7022-7032, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A monolingual approach to contextualized word embeddings for mid-resource languages", |
|
"authors": [ |
|
{ |
|
"first": "Pedro Javier Ortiz", |
|
"middle": [], |
|
"last": "Su\u00e1rez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Romary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beno\u00eet", |
|
"middle": [], |
|
"last": "Sagot", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1703--1714", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pedro Javier Ortiz Su\u00e1rez, Laurent Romary, and Beno\u00eet Sagot. 2020. A monolingual approach to contextual- ized word embeddings for mid-resource languages. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1703-1714, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Squad: 100,000+ questions for machine comprehension of text", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Rajpurkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Konstantin", |
|
"middle": [], |
|
"last": "Lopyrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1606.05250" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100,000+ questions for machine comprehension of text. arXiv preprint arXiv:1606.05250.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "KUISAIL at SemEval-2020 task 12: BERT-CNN for offensive speech identification in social media", |
|
"authors": [ |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Safaya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Moutasem", |
|
"middle": [], |
|
"last": "Abdullatif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deniz", |
|
"middle": [], |
|
"last": "Yuret", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Fourteenth Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2054--2059", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ali Safaya, Moutasem Abdullatif, and Deniz Yuret. 2020. KUISAIL at SemEval-2020 task 12: BERT- CNN for offensive speech identification in social me- dia. In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 2054-2059, Barcelona (online). International Committee for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Aravec: A set of arabic word embedding models for use in arabic nlp", |
|
"authors": [ |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Abu Bakr Soliman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Eissa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Samhaa R El-Beltagy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Procedia Computer Science", |
|
"volume": "117", |
|
"issue": "", |
|
"pages": "256--265", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abu Bakr Soliman, Kareem Eissa, and Samhaa R El- Beltagy. 2017. Aravec: A set of arabic word embed- ding models for use in arabic nlp. Procedia Com- puter Science, 117:256-265.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Osian: Open source international arabic news corpus-preparation and integration into the clarin-infrastructure", |
|
"authors": [ |
|
{ |
|
"first": "Imad", |
|
"middle": [], |
|
"last": "Zeroual", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Goldhahn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Eckart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdelhak", |
|
"middle": [], |
|
"last": "Lakhouaja", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "175--182", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Imad Zeroual, Dirk Goldhahn, Thomas Eckart, and Ab- delhak Lakhouaja. 2019. Osian: Open source inter- national arabic news corpus-preparation and integra- tion into the clarin-infrastructure. In Proceedings of the Fourth Arabic Natural Language Processing Workshop, pages 175-182.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "Figure 1: Replaced Token Detection pre-training task", |
|
"type_str": "figure", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"num": null, |
|
"text": "Performance of all tested model on the various Arabic downstream tasks. Overall best scores are highlighted in bold, while the best score within base-sized models is underlined.", |
|
"content": "<table/>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |