|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:52:25.103229Z" |
|
}, |
|
"title": "Interactive Learning Approach for Arabic Target-Based Sentiment Analysis", |
|
"authors": [ |
|
{ |
|
"first": "Husamelddin", |
|
"middle": [ |
|
"A M N" |
|
], |
|
"last": "Balla", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technological University Dublin Dublin", |
|
"location": { |
|
"country": "Ireland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Marisa", |
|
"middle": [], |
|
"last": "Llorens", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technological University Dublin Dublin", |
|
"location": { |
|
"country": "Ireland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [ |
|
"Jane" |
|
], |
|
"last": "Delany", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technological University Dublin Dublin", |
|
"location": { |
|
"country": "Ireland" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Recently, the majority of sentiment analysis researchers focus on target-based sentiment analysis because it delivers in-depth analysis with more accurate results as compared to traditional sentiment analysis. In this paper, we propose an interactive learning approach to tackle a target-based sentiment analysis task for the Arabic language. The proposed IA-LSTM model uses an interactive attentionbased mechanism to force the model to focus on different parts (targets) of a sentence. We investigate the ability to use targets, right and left contexts, and model them separately to learn their own representations via interactive modeling. We evaluated our model on two different datasets: Arabic hotel review and Arabic book review datasets. The results demonstrate the effectiveness of using this interactive modeling technique for the Arabic targetbased sentiment analysis task. The model obtained accuracy values of 83.10 compared to SOTA models such as AB-LSTM-PC which obtained 82.60 for the same dataset.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Recently, the majority of sentiment analysis researchers focus on target-based sentiment analysis because it delivers in-depth analysis with more accurate results as compared to traditional sentiment analysis. In this paper, we propose an interactive learning approach to tackle a target-based sentiment analysis task for the Arabic language. The proposed IA-LSTM model uses an interactive attentionbased mechanism to force the model to focus on different parts (targets) of a sentence. We investigate the ability to use targets, right and left contexts, and model them separately to learn their own representations via interactive modeling. We evaluated our model on two different datasets: Arabic hotel review and Arabic book review datasets. The results demonstrate the effectiveness of using this interactive modeling technique for the Arabic targetbased sentiment analysis task. The model obtained accuracy values of 83.10 compared to SOTA models such as AB-LSTM-PC which obtained 82.60 for the same dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Sentiment analysis (SA) is one of the most prolific research areas in computer sciences, which aims to identify and extract user opinions from reviews. This technique has become an essential part of a wide range of applications in the areas of politics, business, advertising and marketing as it can help in identifying people's opinions towards related targets (Tang et al., 2015) . Arabic is considered among the top 4 languages in terms of internet usage (Boudad et al., 2018) . With the rapid growth of Arabic web content and low resources for analyzing Arabic opinion mining, the need for accurate Arabic sentiment analysis tools is very necessary.", |
|
"cite_spans": [ |
|
{ |
|
"start": 362, |
|
"end": 381, |
|
"text": "(Tang et al., 2015)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 458, |
|
"end": 479, |
|
"text": "(Boudad et al., 2018)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There are three main classification levels in sentiment analysis: document-level, sentence-level, and target-level. Document-level SA aims to classify the sentiment expressed in the whole document. It considers the whole document as a basic information unit (talking about one topic). Sentence-level SA aims to classify the sentiment expressed in each sentence. In traditional sentiment analysis, the detailed opinions of all targets of the entity (which are required in many applications) are not provided. To acquire these details, we need to use the target level. Target-based sentiment analysis (TBSA) aims to classify the sentiment with respect to the specific targets of entities. The opinion holders can give different opinions for different targets of the same entity, like in this sentence: \"The hotel is clean with good services, but the room was too small\". Target-based sentiment analysis is a finegrained task in sentiment analysis. This kind of fine-grained target-based analysis generally relies on machine learning techniques that require large domain-specific datasets with manual training data (Hu and Liu, 2004) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 1112, |
|
"end": 1130, |
|
"text": "(Hu and Liu, 2004)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Target-based sentiment analysis has become more popular in recent research as it delivers more accurate results compared to traditional sentiment analysis. Given a plain text, the trained model is able to detect the targets that were seen in the training set; the context is simply the sequence of words or tokens around the targets. Referring to the previous example, Target1: hotel, Context: clean, good service, Polarity: positive; Target2: room, Context: too small, Polarity: negative. In traditional sentiment analysis (non-target-based SA), this detailed level of analysis is not possible as the analysis is performed at sentence level and hence the entire sentence is classified as either positive or negative.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The research area of Arabic SA is relatively new. Recently, the work on Arabic SA has received a lot of attention and a number of papers on traditional SA have been published in the last couple of years (Shoukry and Rafea, 2012; Duwairi and El-Orfali, 2014; Nabil et al., 2015; Al-Rubaiee et al., 2016) . However, research work on Arabic TBSA has not been addressed in depth yet. Most of the existing TBSA research focuses on English (Xue and Li, 2018; Mowlaei et al., 2020; Ma et al., 2018) with very little work on other languages such as Arabic.", |
|
"cite_spans": [ |
|
{ |
|
"start": 203, |
|
"end": 228, |
|
"text": "(Shoukry and Rafea, 2012;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 257, |
|
"text": "Duwairi and El-Orfali, 2014;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 258, |
|
"end": 277, |
|
"text": "Nabil et al., 2015;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 302, |
|
"text": "Al-Rubaiee et al., 2016)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 452, |
|
"text": "(Xue and Li, 2018;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 453, |
|
"end": 474, |
|
"text": "Mowlaei et al., 2020;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 475, |
|
"end": 491, |
|
"text": "Ma et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we address the task of TBSA in Arabic. The proposed model uses a neural network with an attention mechanism to force the model to attend to the important parts of a sentence. To achieve that, an interactive attention-based long short-term memory network (IA-LSTM) with an interaction technique is used to capture important information related to a given target.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Previous work in Arabic target-based SA used deep learning represented in Recurrent Neural Network (RNN) and LSTM models and developed several methods aiming to model contexts through the generation of target-based representations (Ruder et al., 2016; Tamchyna and Veselovsk\u00e1, 2016; Al-Smadi et al., 2019) . However, the modeling of interactive targets with contexts using attention mechanisms was not addressed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 251, |
|
"text": "(Ruder et al., 2016;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 252, |
|
"end": 282, |
|
"text": "Tamchyna and Veselovsk\u00e1, 2016;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 283, |
|
"end": 305, |
|
"text": "Al-Smadi et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The model proposed in this paper (IA-LSTM) is based on a model (IAN) proposed for English language TBSA in a previously published paper (Ma et al., 2017) that addresses the separately modeled targets and contexts that jointly interact with each other with an attention mechanism. However, the left context was ignored in the previously proposed approach in (Ma et al., 2017) . Given the fact that Arabic language is written from right to left, the addition of the left context may improve the performance as the most of the opinionated words come after the target (left context). The proposed model (IA-LSTM) is obtained by adding the left context to the IAN model from (Ma et al., 2017) , to end up with a model that considers the three main elements (left context, target, and right context). Code for the proposed model is publicly available on Github 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 153, |
|
"text": "(Ma et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 374, |
|
"text": "(Ma et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 670, |
|
"end": 687, |
|
"text": "(Ma et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of this paper is organized as follows: section 2 presents the related work of SA and TBSA in English and Arabic. Section 3 discusses the methodology used for the proposed model in this paper. Section 4 describes the datasets used for evaluations. Section 5 explains the baselines, evaluations, the experimental results and analysis. Section 6 concludes this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The research work on Arabic TBSA has few examples as the published papers are limited, so, the 1 https://github.com/HUSTUD/IA-LSTM first subsection of related work mainly focus on traditional Arabic SA while the second subsection covers the published work on TBSA on both Arabic and other languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The early attempts on Arabic SA relied on the methods applied for English SA as it is more mature with rich resources in terms of SA. An example of this, Ahmad (2006) employed a rule-based approach that was originally designed for English. Almas and Ahmad (2007) modified the approach to accept other languages such as English and Urdu. Those two attempts used financial news datasets. The results from these experiments were similar across all of languages tested.", |
|
"cite_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 166, |
|
"text": "Ahmad (2006)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 262, |
|
"text": "Almas and Ahmad (2007)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Arabic SA", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Continuing in a business-oriented and rule-based approach, Elhawary and Elfeky (2010) performed experiments on Arabic SA. The authors addressed the problem of SA using large data and MapReduce in an attempt to enhance the performance. Another work following a similar approach was proposed by Farra et al. (2010) , which used a precompiled lexicon to improve the performance on both sentence-level and document-level SA. However, there were no significant improvements in general performance, except at sentence level SA with slight improvement.", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 85, |
|
"text": "Elhawary and Elfeky (2010)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 293, |
|
"end": 312, |
|
"text": "Farra et al. (2010)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Arabic SA", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Work on Arabic SA or any other language always requires datasets to test the used approach. As Arabic is a language with low resources, very few datasets have been created for Arabic SA over the past years. AWATIF Abdul-Mageed and Diab (2012) is one example of the existing datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Arabic SA", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "One of the earliest papers in TBSA was for English, and used a frequency-based approach proposed by Hu and Liu (2004) . The basic idea behind this approach is counting the nouns in the text and considering the most frequently mentioned ones as targets. The authors also tried to avoid the error of incorrectly identifying infrequent nouns as targets by using the nearest opinion words. This approach was also used in several other papers (Qiu et al., 2009; Zhuang et al., 2006) . To enhance this approach, Popescu and Etzioni (2007) proposed using a technique named \"part of relationship\" to eliminate the frequent nouns that are incorrectly identified as targets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 117, |
|
"text": "Hu and Liu (2004)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 438, |
|
"end": 456, |
|
"text": "(Qiu et al., 2009;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 457, |
|
"end": 477, |
|
"text": "Zhuang et al., 2006)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 506, |
|
"end": 532, |
|
"text": "Popescu and Etzioni (2007)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Based Sentiment Analysis (TBSA)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "TBSA is more challenging than the traditional task of sentiment analysis (non-target-based) be-cause the model needs to include the impact of the context words on the target. A deep learning approach in this task can be performed by representing context, generating a target representation, and then detecting the important parts of the sentence (i.e. the targets). Again, RNNs have proven their competitive performance in this task in terms of capturing long-term dependency in sentences and general semantic classification. Moreover, the best RNN performers are the ones that include attention or memory networks. This shows that the models can learn how to concentrate on different parts of the sentence with an attention weight aggregated from a lower level to classify targets and opinion words and the link between them. Several researchers adopted an attention mechanism for this task in English (Gers et al., 2000; Song et al., 2019; . In general, there are not many techniques using deep learning for Arabic sentiment analysis and, in particular, TBSA (Dahou et al., 2016; Ruder et al., 2016; Tamchyna and Veselovsk\u00e1, 2016; Al-Smadi et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 895, |
|
"end": 922, |
|
"text": "English (Gers et al., 2000;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 923, |
|
"end": 941, |
|
"text": "Song et al., 2019;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1061, |
|
"end": 1081, |
|
"text": "(Dahou et al., 2016;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1082, |
|
"end": 1101, |
|
"text": "Ruder et al., 2016;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1102, |
|
"end": 1132, |
|
"text": "Tamchyna and Veselovsk\u00e1, 2016;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 1133, |
|
"end": 1155, |
|
"text": "Al-Smadi et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Based Sentiment Analysis (TBSA)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The International Workshop on Semantic Evaluation (SemEval), one of the most significant events in natural language processing (NLP) research, is concerned with the evaluation of computational semantic analysis systems. A special event focused on TBSA was organized in 2014, 2015 and 2016. In this workshop, Al-Smadi et al. 2018proposed Arabic TBSA paper which performs a comparison between a deep neural network and SVM models. The authors used an RNN framework named Deeplearning4j that provides a set of implementations for different deep neural network algorithms. They evaluated their models on the Arabic hotel review dataset . Their deep neural network model outperformed the SVM model in accuracy of sentiment polarity while SVM outperformed the deep learning model in target extraction. They also proposed another study , which was part of the SemEval-TBSA 2016 competition. In this study they created a hotel review dataset with baselines obtained by using the SVM model with only unigram feature.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Based Sentiment Analysis (TBSA)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Focusing on Arabic TBSA and using the same dataset that this research is using (Arabic hotel reviews), three papers were recently published (Ruder et al., 2016; Tamchyna and Veselovsk\u00e1, 2016; Al-Smadi et al., 2019) . Ruder et al. (2016) proposed a deep learning-based approach (INSIGHT-1) for multi-lingual TBSA as one of SemEval-2016 participants, which used a convolutional neural network for target extraction and sentiment analysis. Using the Arabic hotel review dataset their model outperformed the other participants in the workshop. Another approach proposed by Tamchyna and Veselovsk\u00e1 (2016) for the same task used an RNN-based binary classifier for the task of target-category identification. Their model was trained using word embedding features and achieved good performance as the second rank after (Ruder et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 160, |
|
"text": "(Ruder et al., 2016;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 191, |
|
"text": "Tamchyna and Veselovsk\u00e1, 2016;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 192, |
|
"end": 214, |
|
"text": "Al-Smadi et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 217, |
|
"end": 236, |
|
"text": "Ruder et al. (2016)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 599, |
|
"text": "Tamchyna and Veselovsk\u00e1 (2016)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 811, |
|
"end": 831, |
|
"text": "(Ruder et al., 2016)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Based Sentiment Analysis (TBSA)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "A recent attempt by Al-Smadi et al. (2019) used targets and context embeddings in their proposed model (AB-LSTM-PC). The approach models the context words via LSTM networks and then combines the word's hidden states with target embeddings to generate the attention vectors. In addition, to further strengthen the effect of target embeddings, the model appends target embeddings, with each word embedding vector forming the context. This is used to produce the final representation for TBSA. Their model performance was the highest among all previously published papers evaluated on the same dataset we are using in this research.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Based Sentiment Analysis (TBSA)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "An approach (IAN) for English TBSA using interactive targets and context representations was proposed in Ma et al. (2017) . The model uses attention mechanisms to concatenate the separately modeled targets and context as final representation before it is fed to the softmax layer. This model only considers the right context of targets, ignoring the left context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 121, |
|
"text": "Ma et al. (2017)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Based Sentiment Analysis (TBSA)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "All the research mentioned in this section ignores the the left and right context of targets and the interactions between them. Using these extra features can increase the amount of information about the context by providing a more comprehensive approach to context. In this paper, we propose an interactive learning approach based on (Ma et al., 2017) to tackle sentiment polarity identification for the Arabic language which includes the use of left and right context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 335, |
|
"end": 352, |
|
"text": "(Ma et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Target Based Sentiment Analysis (TBSA)", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Recurrent neural networks (RNNs) are deep learning neural networks designed specifically to learn sequences of data and are mainly used for textual data classification. The learning process is done at hidden recurrent nodes depending on their previous layers of nodes. However, RNNs suffer from the vanishing gradient problem when handling long sequences of data. Bi-directional long short-term memory (Bi-LSTM) (Schuster and Paliwal, 1997) was proposed as a solution for this problem and have proven to be efficient in many NLP-related problems. In contrast to the standard RNNs, Bi-LSTM units have a major role in extracting and learning important features out of the input or computed data and keeping the computed values as long as they are needed in the memory vector.", |
|
"cite_spans": [ |
|
{ |
|
"start": 412, |
|
"end": 440, |
|
"text": "(Schuster and Paliwal, 1997)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this research, we focus on target-based sentiment polarity classification considering the right and left contexts. The enhancement of sentiment classification performance can be achieved by considering the targets and their contexts. The good performance relies on simultaneously modeling targets and contexts precisely. Targets and contexts can influence the representation of each other. For example, the target word \"hotel\" can be naturally associated with the context word \"clean\" and vice versa. Therefore, targets and contexts can be modeled individually but learn from their interactions. In the input text, each word usually has its own contribution or importance, which is different from other words in the final representation for sentiment analysis. For instance, the importance of the word \"room\" is higher in the representation of the target \"room price\", which is described by \"expensive\". Therefore, in the proposed model, the attention weights for both targets and contexts are computed to respectively capture their important information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The proposed model is an interactive attention LSTM-based (IA-LSTM) model which employs long short-term memory networks (LSTM) and attention mechanisms. To get important information from the left and right context, the model uses an attention mechanism associated with a target then computes context representation for sentiment polarity identification. In addition, the proposed model makes use of the interactive information from the word's context to supervise the target modeling. Finally, the model concatenates the attended left context representations, target representations, and right context representations, then uses them to predict the sentiment polarity (see Fig. 1 ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 673, |
|
"end": 679, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Following the model notation in Fig. 1 , assume that a left context consists of k words [w LC1 , w LC2 , ..., w LCk ], a target consists of m words [w t1 , w t2 , ..., w tm ], and a right context consists of n words [w RC1 , w RC2 , ..., w RCn ]. We use pre-trained word embeddings for word representation of contexts and targets. Then, since there is a strong dependence between words in a sentence, the LSTM network is used to learn the hidden word semantics as LSTM is good at learning long-term dependencies. Next, LSTM produces the hidden states [h LC1 , h LC2 , ..., h LCk ] for the left context words, [h t1 , h t2 , ..., h tm ] for the target words, and [h RC1 , h RC2 , ..., h RCn ] for the right context as word representations. Then, the model calculates the average of the hidden states to get the initial representations of the contexts and target.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 38, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The attention mechanism in the proposed model is adopted by using the initial representations of the contexts and target as input to help in selecting important information for classifying the sentiment polarity. To form the attention mechanism, we use the average as well as the last hidden state output of target and contexts to capture an abstract representations of the input sequence. The use of the average value is to form the initial representation of the other component (target or context) and combine it with the last hidden state of the current component. We found this is the best way to form the attention mechanism and it help to reduce the noise and the sparse information associated with the Arabic language (Elnagar et al., 2020) . The attention process is described by considering the left context, target, and the right context, as shown in Fig. 1 . Using the left context words representation [h LC1 , h LC2 , ..., h LCk ] and the average of hidden states of target representation T avg , the model computes the left attention vector L\u03b1 i . Similarly, the model computes the target attention vector \u03b2 i by using the average of left context words LC avg , the target words representation [h t1 , h t2 , ..., h tm ], and the average of the right context vector RC avg . The same technique is followed for the right attention vector R\u03b1 i obtained using the right context words representation [h RC1 , h RC2 , ..., h RCn ] and the average of hidden states of target representation T avg . Then, the attention vectors L\u03b1 i ,\u03b2 i , and R\u03b1 i are used to obtain the word attention weights and concatenated as one vector before being fed to the softmax classifier.", |
|
"cite_spans": [ |
|
{ |
|
"start": 725, |
|
"end": 747, |
|
"text": "(Elnagar et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 861, |
|
"end": 867, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Formally, given the word embeddings of the contexts and target, the hidden state representations of the contexts ( 1and right -equation 2) and targets equation 3is obtained by calculating the average of each as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "LC avg = k i=1 h LC i k", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "(1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "RC avg = n i=1 h RC i n (2) T avg = m i=1 h T i m", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "To make the model focus on the important parts of the representations, an attention mechanism that employs the initial representations of the contexts and target is used. The target influence on the left context and the influence of the left context on the target is considered, as well as the influence of the target on the right context and the influence of the right context on the target. The left attention vector L\u03b1 i is generated using both the left context and target representations as in equation 4:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "L\u03b1 i = e \u03b3 (h LC i , T avg ) k j=1 e \u03b3 (h LC j , T avg )", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Where \u03b3 is a score function that computes the importance of h LC i and h RC i in the left and right context respectively. This function is defined as equation 5:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u03b3(h LC j , T avg ) = tanh(h LC j .w.T avg t + b) (5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Where w is a weight matrix, b is a bias, and T avg t is a transpose of T avg . The right attention vector R\u03b1 i is generated using both the right context and target representations as equation 6:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "R\u03b1 i = e \u03b3 (h RC i , T avg ) n j=1 e \u03b3 (h RC j , T avg )", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Similarly, the target attention vector \u03b2 i is calculated using all of the right context, left context, and the target as 7:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b2 i = e \u03b3 (h t i , LC avg ) m j=1 e \u03b3 (h t j , LC avg ) + e \u03b3 (h t i , RC avg ) m j=1 e \u03b3 (h t j , RC avg )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The representations of the left context, target, and right context are computed as in equations (8), (9), (10) respectively:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "C l = k i=1 L\u03b1 i h LC i (8) T = m i=1 \u03b2 i h RC i h LC i (9) C r = n i=1 R\u03b1 i h RC i (10)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The final representation is done by concatenating the representations of the three components of the left context, target, and the right context (C l , T , and C r respectively) as one vector (F). Finally, the prediction of sentiment polarity (positive, neutral, negative) is done by using the softmax layer as in equation 11:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "x = tanh(w l .F + b l )", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Where w l is the weight matrix and b l is the bias.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Proposed Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The pre-trained Arabic word embeddings \"AraVec\" (Soliman et al., 2017) was used for the target and the context word embeddings with a dimension of 300 nodes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hyperparameters and Model Training", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "AraVec is an open-source project which provides free-to-use Arabic word embeddings trained on more than 3 billion words from web pages and Wikipedia. A uniform \u00df distribution U(-0:1; 0:1) was used to initialize all outof-vocabulary words and weights. The model employs the momentum optimization algorithm (Qian, 1999) to train the parameters, which adds a fraction of the update vector in the preceding step to the current update vector. The dropout rate is set to 0.5, and the normalization coefficient L2 in the objective function is set to 10 \u2212 5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 305, |
|
"end": 317, |
|
"text": "(Qian, 1999)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hyperparameters and Model Training", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "There are two benchmark datasets were used to evaluate the proposed approach, Arabic hotel reviews and Arabic book reviews datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "This dataset was part of the work proposed in task 5 of the Sem-Eval 2016 workshop on target-based sentiment analysis (Pontiki et al., 2016; . The dataset contains the 24,028 TBSA annotated tuples provided (19,226 tuples for training and 4,802 tuples for testing). For the sake of generalization and to avoid the single dialect problem, the original dataset was collected from well-known different Hotels' booking websites such as Booking.com, TripAdvisor.com. The selected reviews in the datasets belongs to Hotels from different Arabian cities in different countries such as Dubai, Beirut, Amman, Mecca, etc. In addition, the dataset was annotated on both text-level (2,291 reviews' texts) and sentence-level (6,029 annotated sentences). In this research, we consider only the sentence-level tasks. This is a manually annotated dataset, whereas, for each sentence, a tuple of target category, opinion target expression, and target polarity were annotated. The sentiment polarity labels (positive, negative, neutral) were used to annotate the polarity of each target or category.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 140, |
|
"text": "(Pontiki et al., 2016;", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Arabic Hotel Reviews", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "This dataset was provided by (Al-Smadi et al., 2015) as a benchmark Human Annotated Arabic Dataset (HAAD). HAAD is a book review dataset in Arabic, which has been constructed and annotated by humans, taking into account the target terms and their polarities. For each review sentence, a tuple consisting of an target-category, targetcategory polarity, target-term, and target-term polarity was extracted and annotated. A sentiment polarity (positive, negative, conflict, neutral) was used to represent both the target-category and targetterm sentiment polarity based on the annotated sentences. This dataset consists of 1513 Arabic book reviews annotated with aspect terms, aspect term polarity, aspect category, and aspect category polarity. In our experiments, we considered the labels positive, negative, and neutral excluding conflict label. Lastly, both datasets were proposed as three separated machine-readable XML format files annotated training, test, and gold test with polarity distribution in Table 1 . In addition, one sentence can contains more than one target which can be assigned with different polarity label. SemEval-TBSA designed a specific way of evaluations for all of the models evaluated on the associated datasets. F1 score is usually used as a metric for category and target execration. However, the metric used for sentiment polarity in the literature review is the accuracy of each model. A sample of Arabic hotel reviews dataset is shown in Fig. 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 52, |
|
"text": "(Al-Smadi et al., 2015)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1005, |
|
"end": 1012, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1470, |
|
"end": 1476, |
|
"text": "Fig. 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Arabic Book Reviews", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We evaluated the proposed model on the Arabic hotel review and the Arabic book review. The evaluation is done by computing the accuracy of the polarity. The used accuracy metric defined as the number of correctly predicted polarity labels of the (gold) targets, divided by the total number of the gold targets. The test set contains sentences without polarity labels which is expected to be predicted by the model and compared with the labels in the test gold set containing the same sentences. We compared the proposed model performance with the related work and SOTA approaches stated as LSTM (Gers et al., 2000) , IAN (Ma et al., 2017) , INSIGHT-1 (Ruder et al., 2016) , and AB-LSTM-PC (Al-Smadi et al., 2019) evaluated on the same Arabic hotel review dataset. For the Arabic book review dataset, the proposed model is compared with a subset of these approaches: LSTM (Gers et al., 2000) and IAN (Ma et al., 2017) . In addition, the baselines labelled as \"Baseline\" in Table 2 are the baselines provided with the published work of dataset creation. The baseline for the Arabic hotel review dataset was obtained by using SVM with N-grams as features. The baseline for the Arabic book reviews was obtained by using a frequency approach.", |
|
"cite_spans": [ |
|
{ |
|
"start": 595, |
|
"end": 614, |
|
"text": "(Gers et al., 2000)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 621, |
|
"end": 638, |
|
"text": "(Ma et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 651, |
|
"end": 671, |
|
"text": "(Ruder et al., 2016)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 871, |
|
"end": 890, |
|
"text": "(Gers et al., 2000)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 899, |
|
"end": 916, |
|
"text": "(Ma et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 972, |
|
"end": 979, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The models used in the comparison in this paper are as follows: (i) LSTM uses one LSTM network to model the context and get the hidden state of each word. The average value of all the hidden states is obtained as the final representation and fed to a softmax function to estimate the probability of each sentiment label (Gers et al., 2000) . (ii) IAN uses an interactive attention-based LSTM considering the target and right context only (ignoring the left context) (Ma et al., 2017) . (iii) INSIGHT-1 uses a convolutional neural network (CNN). The model concatenates the target vector with each word embedding and then applies a convolution over it to identify the sentiment polarity (Ruder et al., 2016) . (iv) AB-LSTM-PC uses an attention-based LSTM by adding the target embedding in the input. To generate the attention vectors, the approach models the context using LSTM networks and combines the hidden states with the target embeddings (Al-Smadi et al., 2019) . For INSIGHT-1 and AB-LSTM-PC, we report the previously published results without implementing the models as they were evaluated on the same dataset. The rest of the compared models (LSTM and IAN) were implemented and evaluated on the datasets. Table 2 shows the proposed model's performance compared with other models. The worst performance in this table was for the LSTM model. This is most likely due to the fact that it does not make use of the attention mechanism, confirming findings in previous research (Jiang et al., 2011) . Both the AB-LSTM-PC model (SOTA) and INSIGHT-1 have similar performance and outperform the LSTM model, confirming that the attention mechanism enhances the ability to identify sentiment polarity. Unlike IAN in (Ma et al., 2017) , our proposed IA-LSTM model (represented by \"IA-LSTM\" in the table) takes a further step towards confirming the importance of considering the targets and contexts in the learning process interactively. As shown in Table 2 , the IA-LSTM model achieved the highest performance, outperforming all baselines and the other approaches. This enhancement can be explained by the fact that our model uses three connected attention networks to model the target and contexts. Using such a design, the model can effectively learn the representations of targets and contexts, which can jointly enhance the overall performance of target-based sentiment classification. As we can notice from Table 2 , different approaches show that the more attention that is paid to targets, the higher the accuracy. As we can notice for Table 1 , the class distribution is imbalanced in both Hotel review and Book review datasets. Therefore, we calculate the accuracy for each polarity class in Table 3 . A lower accuracy obtained by class \"Neutral\" as it has the lowest amount of data and a higher accuracy obtained by class \"Positive\".", |
|
"cite_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 339, |
|
"text": "(Gers et al., 2000)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 483, |
|
"text": "(Ma et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 685, |
|
"end": 705, |
|
"text": "(Ruder et al., 2016)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 943, |
|
"end": 966, |
|
"text": "(Al-Smadi et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 1479, |
|
"end": 1499, |
|
"text": "(Jiang et al., 2011)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1712, |
|
"end": 1729, |
|
"text": "(Ma et al., 2017)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1213, |
|
"end": 1220, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 1945, |
|
"end": 1952, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 2408, |
|
"end": 2415, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 2539, |
|
"end": 2546, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 2697, |
|
"end": 2704, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Compared Models", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Lastly, we verified the effectiveness of using targets and interaction with the left and right contexts in modeling the attention mechanism and the results are displayed in Table 4 . The first model (labelled No-interaction) completely ignored the interaction between the targets and contexts. This model uses three LSTM networks to learn the representations of the left context, target, and right context in their own local attentions without any interaction. In the second model (labelled Rightside interaction), we showed the impact of using the right-side of the interaction mechanism that enables the target to interact with the right contexts. This model uses two LSTM networks to learn the representations of the target and right context and interact with the right context only. Similarly, in the third model (labelled Left-side interaction) the target interacts with the left context only. The fourth model (labelled Full-model) uses the target interaction with both left and right contexts. As shown in Table 4 : Accuracy Analysis of the proposed IA-LSTM model in terms of using the target and interaction techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 173, |
|
"end": 180, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
}, |
|
{ |
|
"start": 1013, |
|
"end": 1020, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results and Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "two-side interaction showed some improvement in the performance specially the left-side as Arabic is written from right to left which means most of the opinion words comes on the left-side after the target. The best performance was achieved by the Full-model, as we expected, where using the target interactions with the left and right contexts is fully considered, which enhances the overall model performance. Therefore, from this table, we can observe that the interaction of the target between the left and right contexts can contribute greatly in enhancing TBSA. The performance investigation of using target-attention only was well represented by the AB-LSTM-PC model in Table 2 which we avoid to repeat it in our comparison in Table 4 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 677, |
|
"end": 684, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 734, |
|
"end": 741, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results and Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In this paper, we proposed a deep learning-based approach to tackle Arabic TBSA. The proposed model \"IA-LSTM\" uses an interactive attentionbased technique for the task. The main idea of the proposed IA-LSTM model is to use three attention networks to interactively model the target and contexts (left and right). It is based on previous work in English that uses only the right context but given the way Arabic is written from right to left, the addition of the left context provides important context information. The model can focus on the important parts in the sentence and identify the sentiment polarity. The proposed approach was evaluated on two different datasets: Arabic hotel reviews and book reviews. Experiments verify that the proposed approach outperforms the baselines and other approaches evaluated on the same datasets. Implementing the interactive attention-based model demonstrated that the model can learn effective features for targets and contexts (left and right) and enhance the performance of Arabic TBSA.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Awatif: A multi-genre corpus for modern standard arabic subjectivity and sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Muhammad", |
|
"middle": [], |
|
"last": "Abdul-Mageed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Diab", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "LREC", |
|
"volume": "515", |
|
"issue": "", |
|
"pages": "3907--3914", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Muhammad Abdul-Mageed and Mona T Diab. 2012. Awatif: A multi-genre corpus for modern standard arabic subjectivity and sentiment analysis. In LREC, volume 515, pages 3907-3914.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Multi-lingual sentiment analysis of financial news streams", |
|
"authors": [ |
|
{ |
|
"first": "Khurshid", |
|
"middle": [], |
|
"last": "Ahmad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "PoS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Khurshid Ahmad. 2006. Multi-lingual sentiment anal- ysis of financial news streams. PoS, page 001.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Identifying mubasher software products through sentiment analysis of arabic tweets", |
|
"authors": [ |
|
{ |
|
"first": "Hamed", |
|
"middle": [], |
|
"last": "Al-Rubaiee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Renxi", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dayou", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "2016 International Conference on Industrial Informatics and Computer Systems (CIICS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamed Al-Rubaiee, Renxi Qiu, and Dayou Li. 2016. Identifying mubasher software products through sen- timent analysis of arabic tweets. In 2016 Inter- national Conference on Industrial Informatics and Computer Systems (CIICS), pages 1-6. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Deep recurrent neural network vs. support vector machine for aspect-based sentiment analysis of arabic hotels' reviews", |
|
"authors": [ |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Al-Smadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "Qawasmeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [], |
|
"last": "Al-Ayyoub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Jararweh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brij", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of computational science", |
|
"volume": "27", |
|
"issue": "", |
|
"pages": "386--393", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Al-Smadi, Omar Qawasmeh, Mahmoud Al-Ayyoub, Yaser Jararweh, and Brij Gupta. 2018. Deep recurrent neural network vs. support vector machine for aspect-based sentiment analysis of ara- bic hotels' reviews. Journal of computational sci- ence, 27:386-393.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Human annotated arabic dataset of book reviews for aspect based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Al-Smadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "Qawasmeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bashar", |
|
"middle": [], |
|
"last": "Talafha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muhannad", |
|
"middle": [], |
|
"last": "Quwaider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "2015 3rd International Conference on Future Internet of Things and Cloud", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "726--730", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Al-Smadi, Omar Qawasmeh, Bashar Ta- lafha, and Muhannad Quwaider. 2015. Human an- notated arabic dataset of book reviews for aspect based sentiment analysis. In 2015 3rd International Conference on Future Internet of Things and Cloud, pages 726-730. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Using long short-term memory deep neural networks for aspectbased sentiment analysis of arabic reviews", |
|
"authors": [ |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Al-Smadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bashar", |
|
"middle": [], |
|
"last": "Talafha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [], |
|
"last": "Al-Ayyoub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Jararweh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Journal of Machine Learning and Cybernetics", |
|
"volume": "10", |
|
"issue": "8", |
|
"pages": "2163--2175", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Al-Smadi, Bashar Talafha, Mahmoud Al- Ayyoub, and Yaser Jararweh. 2019. Using long short-term memory deep neural networks for aspect- based sentiment analysis of arabic reviews. Interna- tional Journal of Machine Learning and Cybernet- ics, 10(8):2163-2175.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A note on extracting 'sentiments' in financial news in english, arabic & urdu", |
|
"authors": [ |
|
{ |
|
"first": "Yousif", |
|
"middle": [], |
|
"last": "Almas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khurshid", |
|
"middle": [], |
|
"last": "Ahmad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "The Second Workshop on Computational Approaches to Arabic Script-based Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yousif Almas and Khurshid Ahmad. 2007. A note on extracting 'sentiments' in financial news in english, arabic & urdu. In The Second Workshop on Com- putational Approaches to Arabic Script-based Lan- guages, pages 1-12.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Sentiment analysis in arabic: A review of the literature", |
|
"authors": [ |
|
{ |
|
"first": "Naaima", |
|
"middle": [], |
|
"last": "Boudad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rdouan", |
|
"middle": [], |
|
"last": "Faizi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Ain Shams Engineering Journal", |
|
"volume": "9", |
|
"issue": "4", |
|
"pages": "2479--2490", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Naaima Boudad, Rdouan Faizi, Rachid Oulad Haj Thami, and Raddouane Chiheb. 2018. Sentiment analysis in arabic: A review of the literature. Ain Shams Engineering Journal, 9(4):2479-2490.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Word embeddings and convolutional neural network for arabic sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Abdelghani", |
|
"middle": [], |
|
"last": "Dahou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shengwu", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junwei", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of coling 2016, the 26th international conference on computational linguistics: Technical papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2418--2427", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdelghani Dahou, Shengwu Xiong, Junwei Zhou, Mohamed Houcine Haddoud, and Pengfei Duan. 2016. Word embeddings and convolutional neural network for arabic sentiment classification. In Pro- ceedings of coling 2016, the 26th international con- ference on computational linguistics: Technical pa- pers, pages 2418-2427.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "A study of the effects of preprocessing strategies on sentiment analysis for arabic text", |
|
"authors": [ |
|
{ |
|
"first": "Rehab", |
|
"middle": [], |
|
"last": "Duwairi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [], |
|
"last": "El-Orfali", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of Information Science", |
|
"volume": "40", |
|
"issue": "4", |
|
"pages": "501--513", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rehab Duwairi and Mahmoud El-Orfali. 2014. A study of the effects of preprocessing strategies on senti- ment analysis for arabic text. Journal of Information Science, 40(4):501-513.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Mining arabic business reviews", |
|
"authors": [ |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Elhawary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Elfeky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "2010 ieee international conference on data mining workshops", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1108--1113", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohamed Elhawary and Mohamed Elfeky. 2010. Min- ing arabic business reviews. In 2010 ieee interna- tional conference on data mining workshops, pages 1108-1113. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Arabic text classification using deep learning models", |
|
"authors": [ |
|
{ |
|
"first": "Ashraf", |
|
"middle": [], |
|
"last": "Elnagar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ridhwan", |
|
"middle": [], |
|
"last": "Al-Debsi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "Einea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Information Processing & Management", |
|
"volume": "57", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashraf Elnagar, Ridhwan Al-Debsi, and Omar Einea. 2020. Arabic text classification using deep learning models. Information Processing & Management, 57(1):102121.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Sentence-level and documentlevel sentiment mining for arabic texts", |
|
"authors": [ |
|
{ |
|
"first": "Noura", |
|
"middle": [], |
|
"last": "Farra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elie", |
|
"middle": [], |
|
"last": "Challita", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rawad", |
|
"middle": [], |
|
"last": "Abou Assi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hazem", |
|
"middle": [], |
|
"last": "Hajj", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "2010 IEEE international conference on data mining workshops", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1114--1119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Noura Farra, Elie Challita, Rawad Abou Assi, and Hazem Hajj. 2010. Sentence-level and document- level sentiment mining for arabic texts. In 2010 IEEE international conference on data mining work- shops, pages 1114-1119. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Learning to forget: Continual prediction with lstm", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Felix A Gers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "J\u00fcrgen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fred", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cummins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Neural Computation", |
|
"volume": "12", |
|
"issue": "10", |
|
"pages": "2451--2471", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felix A Gers, J\u00fcrgen A Schmidhuber, and Fred A Cum- mins. 2000. Learning to forget: Continual predic- tion with lstm. Neural Computation, 12(10):2451- 2471.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Mining and summarizing customer reviews", |
|
"authors": [ |
|
{ |
|
"first": "Minqing", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the tenth ACM SIGKDD international conference on Knowledge discovery and data mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "168--177", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minqing Hu and Bing Liu. 2004. Mining and summa- rizing customer reviews. In Proceedings of the tenth ACM SIGKDD international conference on Knowl- edge discovery and data mining, pages 168-177.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Target-dependent twitter sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mo", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohua", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tiejun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 49th annual meeting of the association for computational linguistics: human language technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "151--160", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Long Jiang, Mo Yu, Ming Zhou, Xiaohua Liu, and Tiejun Zhao. 2011. Target-dependent twitter senti- ment classification. In Proceedings of the 49th an- nual meeting of the association for computational linguistics: human language technologies, pages 151-160.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Aspect term extraction with history attention and selective transformation", |
|
"authors": [ |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidong", |
|
"middle": [], |
|
"last": "Bing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piji", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhimou", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1805.00760" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Li, Lidong Bing, Piji Li, Wai Lam, and Zhimou Yang. 2018. Aspect term extraction with history at- tention and selective transformation. arXiv preprint arXiv:1805.00760.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Interactive attention networks for aspect-level sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Dehong", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sujian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1709.00893" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dehong Ma, Sujian Li, Xiaodong Zhang, and Houfeng Wang. 2017. Interactive attention networks for aspect-level sentiment classification. arXiv preprint arXiv:1709.00893.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Targeted aspect-based sentiment analysis via embedding commonsense knowledge into an attentive lstm", |
|
"authors": [ |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haiyun", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yukun Ma, Haiyun Peng, and Erik Cambria. 2018. Tar- geted aspect-based sentiment analysis via embed- ding commonsense knowledge into an attentive lstm. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "An enhanced framework for aspect-based sentiment analysis of hotels' reviews: Arabic reviews case study", |
|
"authors": [ |
|
{ |
|
"first": "Al-Smadi", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "Qwasmeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bashar", |
|
"middle": [], |
|
"last": "Talafha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [], |
|
"last": "Al-Ayyoub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaser", |
|
"middle": [], |
|
"last": "Jararweh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elhadj", |
|
"middle": [], |
|
"last": "Benkhelifa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "11th International Conference for Internet Technology and Secured Transactions (ICITST)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "98--103", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "AL-Smadi Mohammad, Omar Qwasmeh, Bashar Ta- lafha, Mahmoud Al-Ayyoub, Yaser Jararweh, and Elhadj Benkhelifa. 2016. An enhanced framework for aspect-based sentiment analysis of hotels' re- views: Arabic reviews case study. In 2016 11th International Conference for Internet Technology and Secured Transactions (ICITST), pages 98-103. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Aspectbased sentiment analysis using adaptive aspectbased lexicons", |
|
"authors": [ |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Mohammad Erfan Mowlaei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamidreza", |
|
"middle": [], |
|
"last": "Saniee Abadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Keshavarz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Expert Systems with Applications", |
|
"volume": "148", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohammad Erfan Mowlaei, Mohammad Saniee Abadeh, and Hamidreza Keshavarz. 2020. Aspect- based sentiment analysis using adaptive aspect- based lexicons. Expert Systems with Applications, 148:113234.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Astd: Arabic sentiment tweets dataset", |
|
"authors": [ |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [], |
|
"last": "Nabil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohamed", |
|
"middle": [], |
|
"last": "Aly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Atiya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2515--2519", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mahmoud Nabil, Mohamed Aly, and Amir Atiya. 2015. Astd: Arabic sentiment tweets dataset. In Proceed- ings of the 2015 conference on empirical methods in natural language processing, pages 2515-2519.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Semeval-2016 task 5: Aspect based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Pontiki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitrios", |
|
"middle": [], |
|
"last": "Galanis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haris", |
|
"middle": [], |
|
"last": "Papageorgiou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ion", |
|
"middle": [], |
|
"last": "Androutsopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suresh", |
|
"middle": [], |
|
"last": "Manandhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Al-Smadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [], |
|
"last": "Al-Ayyoub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orph\u00e9e", |
|
"middle": [], |
|
"last": "De Clercq", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "International workshop on semantic evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Pontiki, Dimitrios Galanis, Haris Papageor- giou, Ion Androutsopoulos, Suresh Manandhar, Mo- hammad Al-Smadi, Mahmoud Al-Ayyoub, Yanyan Zhao, Bing Qin, Orph\u00e9e De Clercq, et al. 2016. Semeval-2016 task 5: Aspect based sentiment anal- ysis. In International workshop on semantic evalua- tion, pages 19-30.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Extracting product features and opinions from reviews", |
|
"authors": [ |
|
{ |
|
"first": "Ana-Maria", |
|
"middle": [], |
|
"last": "Popescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orena", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Natural language processing and text mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ana-Maria Popescu and Orena Etzioni. 2007. Extract- ing product features and opinions from reviews. In Natural language processing and text mining, pages 9-28. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "On the momentum term in gradient descent learning algorithms", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ning Qian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Neural networks", |
|
"volume": "12", |
|
"issue": "1", |
|
"pages": "145--151", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ning Qian. 1999. On the momentum term in gradi- ent descent learning algorithms. Neural networks, 12(1):145-151.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Expanding domain sentiment lexicon through double propagation", |
|
"authors": [ |
|
{ |
|
"first": "Guang", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajun", |
|
"middle": [], |
|
"last": "Bu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "IJCAI", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "1199--1204", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guang Qiu, Bing Liu, Jiajun Bu, and Chun Chen. 2009. Expanding domain sentiment lexicon through dou- ble propagation. In IJCAI, volume 9, pages 1199- 1204. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Insight-1 at semeval-2016 task 5: Deep learning for multilingual aspect-based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parsa", |
|
"middle": [], |
|
"last": "Ghaffari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John G", |
|
"middle": [], |
|
"last": "Breslin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1609.02748" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Ruder, Parsa Ghaffari, and John G Breslin. 2016. Insight-1 at semeval-2016 task 5: Deep learn- ing for multilingual aspect-based sentiment analysis. arXiv preprint arXiv:1609.02748.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Bidirectional recurrent neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kuldip", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Paliwal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "IEEE transactions on Signal Processing", |
|
"volume": "45", |
|
"issue": "11", |
|
"pages": "2673--2681", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Schuster and Kuldip K Paliwal. 1997. Bidirec- tional recurrent neural networks. IEEE transactions on Signal Processing, 45(11):2673-2681.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Sentencelevel arabic sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Amira", |
|
"middle": [], |
|
"last": "Shoukry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Rafea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "2012 International Conference on Collaboration Technologies and Systems (CTS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "546--550", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amira Shoukry and Ahmed Rafea. 2012. Sentence- level arabic sentiment analysis. In 2012 Interna- tional Conference on Collaboration Technologies and Systems (CTS), pages 546-550. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Aravec: A set of arabic word embedding models for use in arabic nlp", |
|
"authors": [ |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Abu Bakr Soliman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Eissa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Samhaa R El-Beltagy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Procedia Computer Science", |
|
"volume": "117", |
|
"issue": "", |
|
"pages": "256--265", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abu Bakr Soliman, Kareem Eissa, and Samhaa R El- Beltagy. 2017. Aravec: A set of arabic word embed- ding models for use in arabic nlp. Procedia Com- puter Science, 117:256-265.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Attentional encoder network for targeted sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Youwei", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiahai", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyue", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanghui", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1902.09314" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Youwei Song, Jiahai Wang, Tao Jiang, Zhiyue Liu, and Yanghui Rao. 2019. Attentional encoder network for targeted sentiment classification. arXiv preprint arXiv:1902.09314.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Ufal at semeval-2016 task 5: recurrent neural networks for sentence classification", |
|
"authors": [ |
|
{ |
|
"first": "Ale\u0161", |
|
"middle": [], |
|
"last": "Tamchyna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kate\u0159ina", |
|
"middle": [], |
|
"last": "Veselovsk\u00e1", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 10th international workshop on semantic evaluation (SEMEVAL-2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "367--371", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ale\u0161 Tamchyna and Kate\u0159ina Veselovsk\u00e1. 2016. Ufal at semeval-2016 task 5: recurrent neural networks for sentence classification. In Proceedings of the 10th international workshop on semantic evaluation (SEMEVAL-2016), pages 367-371.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Effective lstms for targetdependent sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "Duyu", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaocheng", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1512.01100" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Duyu Tang, Bing Qin, Xiaocheng Feng, and Ting Liu. 2015. Effective lstms for target- dependent sentiment classification. arXiv preprint arXiv:1512.01100.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Aspect based sentiment analysis with gated convolutional networks", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1805.07043" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Xue and Tao Li. 2018. Aspect based sentiment analysis with gated convolutional networks. arXiv preprint arXiv:1805.07043.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Movie review mining and summarization", |
|
"authors": [ |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Zhuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Feng", |
|
"middle": [], |
|
"last": "Jing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiao-Yan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 15th ACM international conference on Information and knowledge management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "43--50", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Li Zhuang, Feng Jing, and Xiao-Yan Zhu. 2006. Movie review mining and summarization. In Proceedings of the 15th ACM international conference on Infor- mation and knowledge management, pages 43-50.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "The general architecture of the proposed model for Arabic target-based sentiment analysis.", |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Sample of Arabic hotel reviews dataset.", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"content": "<table/>", |
|
"text": "left and right) and target are [h LC1 , h LC2 , ..., h LCk ], [h RC1 , h RC2 , ..., h RCn ] and [h t1 , h t2 , ..., h tm ] respectively. The initial representation of contexts (left -equation", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"content": "<table/>", |
|
"text": "Polarity distribution in both Hotel and Book reviews datasets", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td colspan=\"4\">Dataset Positive Negative Neutral</td></tr><tr><td>Hotel R</td><td>89.86</td><td>83.29</td><td>76.14</td></tr><tr><td>Book R</td><td>86.42</td><td>87.93</td><td>65.82</td></tr></table>", |
|
"text": "Accuracy comparison between the proposed IA-LSTM model performance for the TBSA task and the baseline results based on SVM along with n-gram features and approaches from related work.", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"text": "Accuracy for each class in both Hotel reviews and Book reviews datasets", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"content": "<table><tr><td>Model</td><td>Hotel R</td><td>Book R</td></tr><tr><td>No-interaction</td><td>76.75</td><td>74.64</td></tr><tr><td colspan=\"2\">Right-side interaction 78.21</td><td>75.32</td></tr><tr><td>Left-side interaction</td><td>81.90</td><td>78.96</td></tr><tr><td>Full-model</td><td>83.10</td><td>80.82</td></tr></table>", |
|
"text": ", a lower performance was achieved by the No-interaction model. Using", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |