|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T06:34:47.466254Z" |
|
}, |
|
"title": "Intelligent Analyses on Storytelling for Impact Measurement", |
|
"authors": [ |
|
{ |
|
"first": "Koen", |
|
"middle": [], |
|
"last": "Kicken", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"U" |
|
], |
|
"last": "Leuven", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Tessa", |
|
"middle": [], |
|
"last": "De Maesschalck", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Vanrumste", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "De Keyser", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hee", |
|
"middle": [], |
|
"last": "Reen", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Shim", |
|
"middle": [], |
|
"last": "Ku Leuven", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper explores how Dutch diary fragments, written by family coaches in the social sector, can be analysed automatically using machine learning techniques to quantitatively measure the impact of social coaching. The focus lays on two tasks: determining which sentiment a fragment contains (sentiment analysis) and investigating which fundamental social rights (education, employment, legal aid, etc.) are addressed in the fragment. To train and test the new algorithms, a dataset consisting of 1715 Dutch diary fragments is used. These fragments are manually labelled on sentiment and on the applicable fundamental social rights. The sentiment analysis models were trained to classify the fragments into three classes: negative, neutral or positive. Fine-tuning the Dutch pre-trained Bidirectional Encoder Representations from Transformers (BERTje) (de Vries et al., 2019) language model surpassed the more classic algorithms by correctly classifying 79.6% of the fragments on the sentiment analysis, which is considered as a good result. This technique also achieved the best results in the identification of the fundamental rights, where for every fragment the three most likely fundamental rights were given as output. In this way, 93% of the present fundamental rights were correctly recognised. To our knowledge, we are the first to try to extract social rights from written text with the help of Natural Language Processing techniques.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper explores how Dutch diary fragments, written by family coaches in the social sector, can be analysed automatically using machine learning techniques to quantitatively measure the impact of social coaching. The focus lays on two tasks: determining which sentiment a fragment contains (sentiment analysis) and investigating which fundamental social rights (education, employment, legal aid, etc.) are addressed in the fragment. To train and test the new algorithms, a dataset consisting of 1715 Dutch diary fragments is used. These fragments are manually labelled on sentiment and on the applicable fundamental social rights. The sentiment analysis models were trained to classify the fragments into three classes: negative, neutral or positive. Fine-tuning the Dutch pre-trained Bidirectional Encoder Representations from Transformers (BERTje) (de Vries et al., 2019) language model surpassed the more classic algorithms by correctly classifying 79.6% of the fragments on the sentiment analysis, which is considered as a good result. This technique also achieved the best results in the identification of the fundamental rights, where for every fragment the three most likely fundamental rights were given as output. In this way, 93% of the present fundamental rights were correctly recognised. To our knowledge, we are the first to try to extract social rights from written text with the help of Natural Language Processing techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In Leuven, Belgium, there are many charitable organisations that support socially vulnerable people. For evaluating the progress of their work, each of them has their own system, mostly handwritten on paper. In 2018, the local community organisation vzw Buurtwerk 't Lampeke started a cooperation with software company Kunlabora. They wanted to obtain qualitative insights in their coaching, since they were lumbered with a lot of administration. The result was a tailor-made software tool named Mezuri 1 , a Java application for organisations supporting socially vulnerable people to understand and measure the impact of their coaching tracks. In this tool, collaborators called bridging coaches can keep diary fragments (written in Dutch) for different families. In this way, the bridging coaches can, with the help of intelligent analyses, keep track of how the family is doing and which fundamental social rights (regarding education, work, etc.) are acquired. The most important aspect of Mezuri is that these diaries have open-ended instead of closed-ended inputs. This allows the coaches of the organisations to write free text and focus on the family instead of having to tick boxes or fill in scales. It is then the task of the Mezuri programme itself to get more objective, scale-like information out of these text fragments with the help of intelligent analyses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, there is a focus on improving the following two algorithms important for the bridging coaches:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1. Sentiment analysis: To find out how a family is doing, Mezuri determines how positive or how negative a diary fragment is.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "2. Extracting the social rights: Social rights are basic rights every human should have, for example legal assistance, healthcare and education. There are eight of them (see Table 2 ) and the bridging coaches strive to accomplish them for every family. To this end, it is important to know on which rights they have already focused.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 182, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This paper investigates which algorithms obtain the most accurate analysis on these Dutch text fragments. This involves several challenges. Firstly, little data exists and the available data are private. Secondly, the diary fragments consist of subjective information which is also not always 100% correct: sometimes, a family does not immediately tell the truth or perhaps glosses over reality. This make it even more difficult to objectify and quantify the information written in the diary fragments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Contributions 1) We show that by fine-tuning the existing BERTje language model on classifying a Dutch diary fragment into three classes (negative, neutral, positive) an accuracy of 80% can be reached. 2) We show that this technique can also be used to fine-tune the model to recognise fundamental rights: when for every fragment the three most likely rights are given as output, 93% of the present rights are correctly recognised.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Sentiment Analysis (SA) is a hot topic in Natural Language Processing (NLP). It is often used on reviews or social media posts to monitor the reputation of a service, person or product. A text fragment is then classified as positive or negative (i.e. binary classification) or, in case of a ternary classification, as neutral.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In the past, often lexicon-based (Aaldering and Vliegenthart, 2016) or machine learning with bagof-words (Pang et al., 2002) approaches were used for sentiment analysis. More recently, the use of embeddings (Rudkowsky et al., 2018) and neural networks became more popular (Prabha and Umarani Srikanth, 2019) in NLP. However, a disadvantage of using neural networks is the large amount of training data they require, which can be limited by using transfer learning. In NLP, this is often done using pre-trained language models. BERT (Bidirectional Encoder Representations from Transformers) is such a language model made available by Google (Devlin et al., 2018) . BERT has proven to achieve state-of-the-art results on various tasks including sentiment analysis, as in the study of Munikar et al. (2019) where English 1-sentence movie reviews were classified into 5 classes, reaching accuracies of up to 84%. Therefore, it is investigated whether this approach can also achieve high performance on sentiment analysis with the Mezuri dataset. However, the dataset of this paper contains Dutch text fragments and mostly longer than one sentence, making the task more complex.", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 67, |
|
"text": "(Aaldering and Vliegenthart, 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 105, |
|
"end": 124, |
|
"text": "(Pang et al., 2002)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 207, |
|
"end": 231, |
|
"text": "(Rudkowsky et al., 2018)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 640, |
|
"end": 661, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 782, |
|
"end": 803, |
|
"text": "Munikar et al. (2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Since December 2019, there also exists a Dutch BERT model BERTje (de Vries et al., 2019). Trained on 2.4 billion Dutch tokens, this monolingual model outperforms BERT's equally-sized multilingual model in various tasks, including sentiment analysis. To this end, BERTje will be used instead of the multi-language version of BERT.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "There is a lot of research done on SA in other use-cases. For example, Gr\u00e4bner et al. (2012) classified customer reviews of hotels as good or bad (i.e. binary classification) with a Lexicon-based approach yielding an accuracy of 90%. Bouazizi and Ohtsuki (2016) uses machine learning algorithms to classify tweets into 3 different classes achieving an accuracy equal to 70%. However, the task in Mezuri is more difficult than the task in Gr\u00e4bner et al. (2012) in several ways. Firstly, in Mezuri, a fragment is classified into three classes instead of two. Secondly, the fragments in Mezuri are written in Dutch, a language on which less research has been done than on English. Lastly, assigning labels to the fragments of Mezuri is a subjective task, while when two people label reviews or social media posts, they will probably reach a higher agreement score.", |
|
"cite_spans": [ |
|
{ |
|
"start": 71, |
|
"end": 92, |
|
"text": "Gr\u00e4bner et al. (2012)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 234, |
|
"end": 261, |
|
"text": "Bouazizi and Ohtsuki (2016)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 438, |
|
"end": 459, |
|
"text": "Gr\u00e4bner et al. (2012)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To our knowledge, we are the first to try to extract social rights from written text. This is considered a multi-label problem, as a single fragment can contain multiple social rights. The task is then to predict the set of correct labels. This is different from the sentiment analysis task, where a fragment belongs to a single class. According to Madjarov et al. (2012) , there are three ways to tackle the multi-label classification problem: adapt the method, transform the problem and ensembles.", |
|
"cite_spans": [ |
|
{ |
|
"start": 349, |
|
"end": 371, |
|
"text": "Madjarov et al. (2012)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As described by Szyma\u0144ski and Kajdanowicz (2017) , the first method is based on the idea to adapt the single-label methods in a way they can cope with multi-labelled data. A method that uses this principal is Multi-label k Nearest Neighbours (MLkNN) (Szyma\u0144ski and Kajdanowicz, 2017) . An advantage of this method is that the correlations between the labels are taken into account.", |
|
"cite_spans": [ |
|
{ |
|
"start": 16, |
|
"end": 48, |
|
"text": "Szyma\u0144ski and Kajdanowicz (2017)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 250, |
|
"end": 283, |
|
"text": "(Szyma\u0144ski and Kajdanowicz, 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The second idea is to transform the multi-label problem into multiple single-label problems. Binary Relevance, Classifier Chains and Label Powerset (Szyma\u0144ski and Kajdanowicz, 2017) use this approach.", |
|
"cite_spans": [ |
|
{ |
|
"start": 148, |
|
"end": 181, |
|
"text": "(Szyma\u0144ski and Kajdanowicz, 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "A third manner of extracting social rights in a supervised way is with ensemble methods. An example is RAkEL (Szyma\u0144ski and Kajdanowicz, 2017) , where random k-labelsets are given to the Label Powerset method.", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 142, |
|
"text": "(Szyma\u0144ski and Kajdanowicz, 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "About ten bridging coaches of CAW Oost-Brabant and Werfgezinscoach wrote the diary fragments in which they reflect on a meeting with a family. Together they coached nineteen families, from which they made 460 high-quality, Dutch text fragments available. For this project, they anonimysed these text fragments by replacing the names with initials. The original diary fragments have an average of about 188 words per diary fragment with a standard deviation of 198.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "First, the diary fragments are split into smaller fragments since it enlarges the number of fragments, as more training examples generally means better performance of machine learning models. Moreover, it makes it easier to label a fragment since a longer fragment often consists of multiple parts talking about different subjects, making it more complex. This splitting is done automatically on every new line character (\\n). This splitting enlarged the dataset from 460 to 1715 fragments. Figure 1 shows the variation in length of this new dataset, with a new average length of 50 words and standard deviation of 47.5. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 491, |
|
"end": 499, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Splitting the dataset", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Sentiment labels Every fragment is manually labelled (-1, 0 or 1) describing the sentiment going from negative (-1) to positive (1), with 0 representing a neutral fragment. However, assigning a label to each fragment is a subjective task: different people label some fragments differently than others. To quantify and inspect these differences, two people labelled the dataset. When analysing the labels, 16.4% (i.e. 271 fragments) were found to be labelled differently. 1.6% was even labelled inversely: a fragment once labelled as being positive, once as being negative. For determining the final label, these differences were discussed to come to a consensus on the best suitable label. Table 1 shows some examples of fragments that were labelled differently. This shows that capturing the overall sentiment of a diary fragment is a rather complex and subjective task. When splitting in a training and a test set, all fragments are shuffled and divided randomly.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 690, |
|
"end": 697, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Labelling the dataset", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The result is a dataset with 1715 fragments labelled on their sentiment. Figure 2 shows the distribution among the different labels. This makes clear that this dataset has more negative than positive fragments.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 81, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Labelling the dataset", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Pauze. Iets gezellig gaan drinken in caf\u00e9 X. Het is\u00e9\u00e9n van de weinige caf\u00e9s waar X nog binnen mag. Hoe lang zou dat nog duren? (Break. Going for a cozy drink in bar x. It is one of the few places where x is still welcome. But for how long?) Schriftelijk is inderdaad moeilijker. Hij schrijft zeer onbeholpen, een beetje op het niveau van de lagere school. Spelling is ook erg moeilijk voor hem. Hij maakt wel vooruitgang, mede omdat hij zo gemotiveerd is. Zijn handschrift wordt met de week leesbaarder en hij begint de juiste strategie\u00ebn toe te passen voor spelling (Writing is indeed more difficult. Spelling is also very hard for him. He does make progress, partly because he is so motivated. His handwriting is becoming more readable and he starts to apply the right strategies.) De ouders hebben al veel samen gepraat en gehuild. Ze hebben veel verdriet. Ik benoem deze sterkte want emotie tonen is geen evidentie voor papa (The parents have already talked and cried a lot together. They are very sad. I mention this strength because showing emotion is not obvious for dad.) Het valt me op hoe vaak er iemand ziek is van het gezin. Gelukkig kan er steeds beroep gedaan worden op de huisarts. (I notice how often someone is sick in the family. Fortunately, the doctor can always be called upon.) In de auto vraag hij nog even het Frans te oefenen met hem. Het gaat echter nog altijd heel moeizaam. (In the car, he asks to practice French with him. However, it is still very difficult for him.) De ouders hadden veel problemen veroorzaakt op de school. Maar de school heeft deze ondertussen kunnen oplossen. (The parents had caused many problems at the school. But the school has now been able to solve these.) Table 2 ). This task was considered as less subjective than labelling the sentiment of a fragment, and hence it was not investigated what the agreement would be between different people labelling the same fragments. Figure 3 shows how many fragments there are for each social right. In this figure, one can see that the distribution among these social rights is not balanced. Every fragment may contain none, one or several social rights. A fragment may, for example, contain some sentences about school and some sentences about health, which are 2 different social rights. Figure 4 shows that in most fragments there is only one right present and that there are 215 fragments in which there are 2 social rights mentioned. On the other hand, it is also possible that a fragment does not contain any social right. The fragment is then labelled as category 8: not applicable or miscellaneous. This is the case for 556 fragments.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1714, |
|
"end": 1721, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1930, |
|
"end": 1938, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 2288, |
|
"end": 2296, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Labelling the dataset", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Before feeding the data to the Machine Learning model, the data needs to be standardised. This can be done by removing the special characters and the unnecessary blank spaces. Furthermore, the replacement of capital letters by lowercase letters, the removal of stop words and stemming or lemmatisation (Jurafsky and Martin, 2014) is investigated. To avoid bias (i.e. assigning a sentiment to gender specific pronouns), personal pronouns and names Next, the text fragments need to be vectorised (i.e. transformed into a numerical representation). In this paper, this is performed in two ways. Firstly, with a bag-of-words (BOW) approach (Jurafsky and Martin, 2014) and secondly, with word embeddings (Levy and Goldberg, 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 302, |
|
"end": 329, |
|
"text": "(Jurafsky and Martin, 2014)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 636, |
|
"end": 663, |
|
"text": "(Jurafsky and Martin, 2014)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 699, |
|
"end": 724, |
|
"text": "(Levy and Goldberg, 2014)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Previously used algorithm -Pattern In the past, Pattern (De Smedt and Daelemans, 2012) was used to perform the sentiment analysis in Mezuri. It returns a continuous score between -1 (very negative) and +1 (very positive). The algorithm is based on a lexicon of adjectives and then calculates a score based on the presence of certain adjectives, as mentioned by De Smedt and Daelemans (2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 361, |
|
"end": 390, |
|
"text": "De Smedt and Daelemans (2012)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Algorithms for sentiment analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Fine-tuning BERTje In this paper, the Dutch pre-trained language model BERTje (de Vries et al., 2019), which has the same architecture as BERT (Devlin et al., 2018) , is used. To fine-tune BERTje on this specific task (classifying a diary fragment as being negative, neutral or positive), first the data is standardised as mentioned above. Then, every fragment is split into tokens and to the start of every fragment and to the end of each sentence, the tokens [CLS] and [SEP] are added respectively. Finally, the tokens are mapped to their vector representation. For more information on how this is performed, consult the paper of Devlin et al. (2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 164, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 632, |
|
"end": 652, |
|
"text": "Devlin et al. (2018)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Algorithms for sentiment analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Next, all fragments are truncated so that they have the same length. In case the fragment consists of too many tokens, the last ones are ignored, in case the fragment is too short, it is padded with zeros. The ideal length for this is examined by varying it, see section 6.1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Algorithms for sentiment analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For fine-tuning, an additional pooler layer (with a linear layer and a tanh as activation function) and an extra single linear layer are added on top of BERTje for classification, as Figure 5 shows. These linear layers apply a linear transformation on the data (y = xA T + b, with x the input vector of dimension 768, and y the output vector of dimension 768 for the pooler layer, and dimension 3 for the classification layer). For these layers, only the vector corresponding to the [CLS] token is used, since BERT is trained to use this vector for classification tasks (Devlin et al., 2018) . This is possible thanks to the transformer encoder layers where the whole fragment gets encoded in this single 768-wide vector. The activation and classification layers are added using a model named BertForSequenceClassification from Transformers (a package from Hugging Face which provides an interface to efficiently work with pre-trained language models, provided by Wolf et al. (2019) ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 570, |
|
"end": 591, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 964, |
|
"end": 982, |
|
"text": "Wolf et al. (2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 191, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Algorithms for sentiment analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Then, the network is trained using the AdamW optimisation algorithm (Kingma and Ba, 2017) . For training, the data is divided in batches of size 16. To find the optimal number of epochs, this number is varied. The learning rate is set to 2e-5, which was found by Sun et al. (2019) to be a good number to avoid catastrophic forgetting. Another method to avoid this is to freeze certain layers of the model. The parameters of a frozen layer then no longer change when fine-tuning on a specific task. Often, the lower layers are frozen, as also performed by Lee et al. (2019) . Therefore, in this paper it is investigated what the influence is of freezing the first N layers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 89, |
|
"text": "(Kingma and Ba, 2017)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 263, |
|
"end": 280, |
|
"text": "Sun et al. (2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 572, |
|
"text": "Lee et al. (2019)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Algorithms for sentiment analysis", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Extracting socials rights is considered as a multilabel problem as a single fragment can contain multiple social rights (see Figure 4 ). To solve this prob-lem, MLkNN, Binary Relevance, Classifier Chains, Label Powerset and RAkEL are used (Szyma\u0144ski and Kajdanowicz, 2017) , as explained in section 2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 272, |
|
"text": "(Szyma\u0144ski and Kajdanowicz, 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 125, |
|
"end": 133, |
|
"text": "Figure 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Algorithms for extracting social rights", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "However, apart from all these methods, BERTje is also fine-tuned on the task of extracting the social rights. This is done using Simple Transformers 2 , a library built on top of the Huggingface Transformers library. This library is used since it offers a framework that directly accepts multi-labelled data. The used BERT model (BERTje) is the same as used for the sentiment analysis. However, now the linear fully connected classifier layer added to the network has eight outputs (one for every social right) instead of the three used for sentiment analysis. In addition, instead of applying a softmax function to the outputs of the classifier layer, a sigmoid function is used because the probabilities do not have to sum to one as it is a multi-label problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Algorithms for extracting social rights", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Bridging coaches have indicated that it is interesting to output the n most probable social rights. In this way, the coach can manually select the correct social rights out of the n most probable given by the model. To accomplish this, an array containing a probability for every right indicating how likely it is to be present in the fragment is used.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Algorithms for extracting social rights", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The models for sentiment analysis and extracting the social rights are evaluated using the accuracy score. This approach is valid since the data is not very skewed. The accuracies are calculated using 5-fold cross validation (cv) by splitting the fragments into a training set (80%) and a test set (20%) for every fold, which results in a test set of 343 fragments in every fold. A 1% increase in accuracy corresponds to 17 extra fragments classified correctly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation metric", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "BOW-based The results of the BOW approach strongly depend on which classifier is used. Several machine learning classifiers from scikit-learn (Pedregosa et al., 2011) are tested out. To see which one works best, all classifiers are tested in the same conditions: all on the same (shuffled) lemmatised dataset with the same pre-processing steps and using 5-fold cv. Embeddings-based When using embeddings, the accuracy heavily depends on which tool is used to generate these embeddings. To identify the best tool, different vectorising tools such as fastText (Bojanowski et al., 2017) , spaCy 3 , Dutch embeddings from CLiPS (Tulkens et al., 2016) , Wikipedia2Vec (Yamada et al., 2020) , NLPL (Fares et al., 2017) , Dutch Word2Vec 4 are tested with the same classifier. In addition, the data is preprocessed in the same way for every tool: removing special characters, spaces and upper cases and tokenising the sentences. The Doc2Vec tools (fast-Text and spaCy) generate a single embedding vector for the whole fragment, while the other tools generate an embedding for every word, which are averaged element-wise afterwards.", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 166, |
|
"text": "(Pedregosa et al., 2011)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 558, |
|
"end": 583, |
|
"text": "(Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 624, |
|
"end": 646, |
|
"text": "(Tulkens et al., 2016)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 663, |
|
"end": 684, |
|
"text": "(Yamada et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 692, |
|
"end": 712, |
|
"text": "(Fares et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments for sentiment analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Fine-tuning BERTje The results of the model created by fine-tuning BERTje depend on which setup is used. To identify the best setup, several parameters are varied, such as the number of epochs, the maximum length to which a fragment is truncated and the number of frozen layers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments for sentiment analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "To examine whether a BOW or an embedding approach works best when doing multi-label classification, both methods are compared. The accura-cies (obtained using 5-fold cv) are defined as the number of correct predictions divided by the total number of predictions, where one prediction is considered as correct if the set of predicted social rights exactly matches the corresponding set of social rights as manually labelled.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments for extracting social rights", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Next, when giving the n most probable social rights as output, the accuracy is calculated by dividing the number of correctly predicted social rights by the number of rights manually labelled, using 5-fold cv. The discussed methods LabelPowerset, BinaryRelevance, ClassifierChain, RAkEL (all using logistic regression) and BERTje are compared when using varying number of outputs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments for extracting social rights", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The previously used algorithm Pattern (De Smedt and Daelemans, 2012) reaches an accuracy of 48% on the dataset of this paper. Table 3 shows the results of the BOW approach obtained with different classifiers, with Logistic Regression as best result reaching an accuracy of 68%.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 133, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentiment Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Embeddings-based To investigate which tool suits best, different embeddings generators are tested with the same classifier, being logistic regression since this one gave best results when using Table 3 : The results of the different classifiers on the SA task. The accuracy is obtained by using 5-fold cv. All classifiers are obtained from the scikit-learn library (Pedregosa et al., 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 365, |
|
"end": 389, |
|
"text": "(Pedregosa et al., 2011)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 194, |
|
"end": 201, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "BOW-based", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BOW. Table 4 shows that only when using embeddings generated with the Dutch Word2Vec tool, a higher accuracy of 71% is reached than with BOW. BERTje-based Table 5 shows the influence of the maximum length when using 3 epochs and without freezing. Table 5 : The influence of the maximum length of a fragment (in tokens) on the SA accuracy with BERTje. These results are obtained by fine-tuning for 3 epochs without freezing, using 5-fold cross validation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 12, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 162, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 254, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "BOW-based", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "When using BERTje with the best settings (i.e. maximum length of 350, freezing layers 0-6 and training for 6 epochs), the sentiment analysis reaches an accuracy of 79.6%. Table 6 : The influence of freezing layers of the pretrained BERTje on the SA accuracy. The column frozen layers indicates which layers are frozen (i.e. not finetuned), then for every case the accuracy (obtained using 5-fold cross validation) is determined after training for 3, 6 or 10 epochs. Table 7 shows the results when comparing whether an embedding (generated with the Dutch Word2Vec since this gave the best result for the sentiment analysis) or a BOW approach works best when doing multi-label classification. For the techniques requiring a BaseEstimator, logistic regression is used. When fine-tuning BERTje, it was found that training for 5 epochs instead of 3 and restricting the input to 350 tokens was slightly beneficial for the results. Table 8 shows the results when giving the n most probable social rights as output of LabelPowerset, BinaryRelevance, ClassifierChain, RAkEL (all using logistic regression) and BERTje when using varying number of outputs. Table 8 : The results when a certain number of social rights are given as output based on the highest probabilities of the social rights, using the different multi-label classification approaches with embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 178, |
|
"text": "Table 6", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 466, |
|
"end": 473, |
|
"text": "Table 7", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 925, |
|
"end": 932, |
|
"text": "Table 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1146, |
|
"end": 1153, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Category", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "When compared to the manually given labels, the previously used algorithm for sentiment analysis reaches an accuracy of about 48%, serving as baseline. This low accuracy can be explained by the fact that the sentiment analysis tool from CLiPS is not developed specifically for data from the social context, which is typically more complex.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Analysis", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "With an accuracy of 79.6%, fine-tuning BERTje outperforms the BOW and embeddings-based approaches.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Analysis", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "This accuracy (79.6%) is considered as a good result if compared to other use-cases which also perform ternary classification. As mentioned in section 2, Bouazizi and Ohtsuki (2016) , for example, achieves an accuracy equal to 70.1% when classifying tweets into 3 different classes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 154, |
|
"end": 181, |
|
"text": "Bouazizi and Ohtsuki (2016)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentiment Analysis", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "Moreover, the influence of a few hyperparameters on the performance of this model is investigated. Table 5 does not show a trend in the length of a fragment (i.e. increasing the length does not increase the accuracy or vice versa). As the influence on the performance is not clear, the maximum length was set to 350, as most fragments (99.5%) are shorter than this number and will thus be taken completely as input.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 106, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentiment Analysis", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "Next, Table 6 shows that the accuracy depends on whether layers are frozen or not. The last row of this table shows that when freezing all encoder layers, the accuracy drops significantly since the more epochs and the less frozen layers, the higher the risk to overfit. When freezing fewer layers, the accuracy rises, reaching a maximum when freezing about half of the model. Besides this, the table shows that when more layers are frozen, the differences between the accuracy when training for three, six or ten epochs is much larger than when fewer layers are frozen. This could be explained by the fact that when freezing more layers, overfitting occurs only after extensive training with more epochs, and it is then beneficial to train longer. Therefore, it may also be possible that a high accuracy can also be achieved when freezing many layers, but that in that case more than ten epochs would be required. However, table 6 shows freezing layers 0-6 and training for six epochs yields the best result. Table 7 also shows that for recognising social rights, embeddings are more suitable than BOW. This can be explained by the fact that for extracting the social rights, the model has to understand the topics of the fragments and embeddings are made to capture this meaning in a vector. This table also shows that fine-tuning BERTje yields the best results with an accuracy of 66%.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 13, |
|
"text": "Table 6", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 1009, |
|
"end": 1016, |
|
"text": "Table 7", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentiment Analysis", |
|
"sec_num": "7.1" |
|
}, |
|
{ |
|
"text": "Since the user is interested in seeing the most probable social rights instead of the exact prediction of the model, the probability-based results (i.e. selecting top-k outputs based on their probability value) are considered as the most important measures. When giving the three most probable rights as output, the BERTje-based model detects 93% of all social rights. It is remarkable that BERTje with n social rights as output reaches a higher accuracy than all the other methods with n + 1 social rights as output. From this can be concluded that BERTje is superior to LabelPowerset, BinaryRelevance, ClassifierChain and RakelD and thus should be used to predict the social rights.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extracting Social Rights", |
|
"sec_num": "7.2" |
|
}, |
|
{ |
|
"text": "In this paper, we investigate the best way to perform sentiment analysis and extract social rights from subjective Dutch text fragments with the help of manually given labels. The results demonstrate that fine-tuning BERTje outperforms other techniques with an accuracy of 80% on sentiment analysis and 93% on extracting social rights when using the 3 most probable rights as output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Further research directions could explore other pre-trained language models or exploit automatic data augmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "https://www.kunlabora.be/blog/2018/ 11/15/mezuri-1.0-is-live/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/ThilinaRajapakse/ simpletransformers", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://spacy.io/models/nl 4 https://github.com/coosto/ dutch-word-embeddings", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Political leaders and the media: can we measure political leadership images in newspapers using computer-assisted content analysis?", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Aaldering", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Vliegenthart", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Quality & quantity", |
|
"volume": "50", |
|
"issue": "5", |
|
"pages": "1871--1905", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L Aaldering and R Vliegenthart. 2016. Political lead- ers and the media: can we measure political leader- ship images in newspapers using computer-assisted content analysis? Quality & quantity, 50(5):1871- 1905.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "135--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Sentiment analysis: From binary to multi-class classification: A pattern-based approach for multi-class sentiment analysis in twitter", |
|
"authors": [ |
|
{ |
|
"first": "Mondher", |
|
"middle": [], |
|
"last": "Bouazizi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomoaki", |
|
"middle": [], |
|
"last": "Ohtsuki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "2016 IEEE International Conference on Communications (ICC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mondher Bouazizi and Tomoaki Ohtsuki. 2016. Sen- timent analysis: From binary to multi-class classi- fication: A pattern-based approach for multi-class sentiment analysis in twitter. In 2016 IEEE Interna- tional Conference on Communications (ICC), pages 1-6. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Pattern for python", |
|
"authors": [ |
|
{ |
|
"first": "De", |
|
"middle": [], |
|
"last": "Smedt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Journal Of Machine Learning Research", |
|
"volume": "13", |
|
"issue": "", |
|
"pages": "2063--2067", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T De Smedt and W Daelemans. 2012. Pattern for python. Journal Of Machine Learning Research, 13:2063-2067.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Word vectors, reuse, and replicability: Towards a community repository of largetext resources", |
|
"authors": [ |
|
{ |
|
"first": "Murhaf", |
|
"middle": [], |
|
"last": "Fares", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrey", |
|
"middle": [], |
|
"last": "Kutuzov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Oepen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Velldal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 21st Nordic Conference on Computational Linguistics", |
|
"volume": "131", |
|
"issue": "", |
|
"pages": "271--276", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Murhaf Fares, Andrey Kutuzov, Stephan Oepen, and Erik Velldal. 2017. Word vectors, reuse, and repli- cability: Towards a community repository of large- text resources. In Proceedings of the 21st Nordic Conference on Computational Linguistics, NoDaL- iDa, 22-24 May 2017, Gothenburg, Sweden, 131, pages 271-276. Link\u00f6ping University Electronic Press, Link\u00f6pings universitet.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Classification of customer reviews based on sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Dietmar", |
|
"middle": [], |
|
"last": "Gr\u00e4bner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Markus", |
|
"middle": [], |
|
"last": "Zanker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G\u00fcnther", |
|
"middle": [], |
|
"last": "Fliedl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Fuchs", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "EN-TER", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "460--470", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dietmar Gr\u00e4bner, Markus Zanker, G\u00fcnther Fliedl, Matthias Fuchs, et al. 2012. Classification of cus- tomer reviews based on sentiment analysis. In EN- TER, pages 460-470. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Speech and language processing: an introduction to natural language processing, computational linguistics, and speech recognition", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "James", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Martin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Jurafsky and James H Martin. 2014. Speech and language processing: an introduction to natu- ral language processing, computational linguistics, and speech recognition, new international ed., 2nd ed. edition. Pearson, Harlow.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Adam: A method for stochastic optimization. arXiv.org", |
|
"authors": [ |
|
{ |
|
"first": "Diederik", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik Kingma and Jimmy Ba. 2017. Adam: A method for stochastic optimization. arXiv.org.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "What would elsa do? freezing layers during transformer fine-tuning", |
|
"authors": [ |
|
{ |
|
"first": "Jaejun", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raphael", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jaejun Lee, Raphael Tang, and Jimmy Lin. 2019. What would elsa do? freezing layers during transformer fine-tuning. arXiv.org.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Dependencybased word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "302--308", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omer Levy and Yoav Goldberg. 2014. Dependency- based word embeddings. In Proceedings of the 52nd Annual Meeting of the Association for Compu- tational Linguistics (Volume 2: Short Papers), pages 302-308.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "An extensive experimental comparison of methods for multi-label learning. Pattern recognition", |
|
"authors": [ |
|
{ |
|
"first": "Gjorgji", |
|
"middle": [], |
|
"last": "Madjarov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dragi", |
|
"middle": [], |
|
"last": "Kocev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dejan", |
|
"middle": [], |
|
"last": "Gjorgjevikj", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sa\u0161o", |
|
"middle": [], |
|
"last": "D\u017eeroski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "45", |
|
"issue": "", |
|
"pages": "3084--3104", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gjorgji Madjarov, Dragi Kocev, Dejan Gjorgjevikj, and Sa\u0161o D\u017eeroski. 2012. An extensive experimental comparison of methods for multi-label learning. Pat- tern recognition, 45(9):3084-3104.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Sushil Shakya, and Aakash Shrestha. 2019. Fine-grained sentiment classification using bert. arXiv.org", |
|
"authors": [ |
|
{ |
|
"first": "Manish", |
|
"middle": [], |
|
"last": "Munikar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manish Munikar, Sushil Shakya, and Aakash Shrestha. 2019. Fine-grained sentiment classification using bert. arXiv.org.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Thumbs up? sentiment classification using machine learning techniques", |
|
"authors": [ |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lillian", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shivakumar", |
|
"middle": [], |
|
"last": "Vaithyanathan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bo Pang, Lillian Lee, and Shivakumar Vaithyanathan. 2002. Thumbs up? sentiment classification us- ing machine learning techniques. arXiv preprint cs/0205070.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Scikit-learn: Machine learning in Python", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pedregosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Varoquaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gramfort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Thirion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Grisel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Blondel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Prettenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Dubourg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Vanderplas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Cournapeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Brucher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Perrot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Duchesnay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2825--2830", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Survey of sentiment analysis using deep learning techniques", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Indhraom", |
|
"middle": [], |
|
"last": "Prabha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G Umarani", |
|
"middle": [], |
|
"last": "Srikanth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Indhraom Prabha and G Umarani Srikanth. 2019. Survey of sentiment analysis using deep learning techniques. pages 1-9. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "More than bags of words: Sentiment analysis with word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Rudkowsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Haselmayer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Wastian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcelo", |
|
"middle": [], |
|
"last": "Jenny", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0160tefan", |
|
"middle": [], |
|
"last": "Emrich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Sedlmair", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Communication methods and measures", |
|
"volume": "12", |
|
"issue": "2-3", |
|
"pages": "140--157", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Rudkowsky, Martin Haselmayer, Matthias Was- tian, Marcelo Jenny,\u0160tefan Emrich, and Michael Sedlmair. 2018. More than bags of words: Senti- ment analysis with word embeddings. Communica- tion methods and measures, 12(2-3):140-157.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "How to fine-tune bert for text classification?", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Qiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "11856", |
|
"issue": "", |
|
"pages": "194--206", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Sun, X. Qiu, Y. Xu, and X. Huang. 2019. How to fine-tune bert for text classification? volume 11856, pages 194-206. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "A scikit-based python environment for performing multi-label classification", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Szyma\u0144ski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomasz", |
|
"middle": [], |
|
"last": "Kajdanowicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1702.01460" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Szyma\u0144ski and Tomasz Kajdanowicz. 2017. A scikit-based python environment for perform- ing multi-label classification. arXiv preprint arXiv:1702.01460.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Evaluating unsupervised dutch word embeddings as a linguistic resource", |
|
"authors": [ |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Tulkens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Emmery", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephan Tulkens, Chris Emmery, and Walter Daele- mans. 2016. Evaluating unsupervised dutch word embeddings as a linguistic resource. In Proceed- ings of the Tenth International Conference on Lan- guage Resources and Evaluation (LREC 2016), Paris, France. European Language Resources Asso- ciation (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Bertje: A dutch bert model", |
|
"authors": [ |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Wietse De Vries", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arianna", |
|
"middle": [], |
|
"last": "Van Cranenburgh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommaso", |
|
"middle": [], |
|
"last": "Bisazza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Caselli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Malvina", |
|
"middle": [], |
|
"last": "Gertjan Van Noord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nissim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wietse de Vries, Andreas van Cranenburgh, Arianna Bisazza, Tommaso Caselli, Gertjan van Noord, and Malvina Nissim. 2019. Bertje: A dutch bert model.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Huggingface's transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R'emi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Brew", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R'emi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. Huggingface's trans- formers: State-of-the-art natural language process- ing. ArXiv, abs/1910.03771.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Wikipedia2vec: An efficient toolkit for learning and visualizing the embeddings of words and entities from wikipedia", |
|
"authors": [ |
|
{ |
|
"first": "Ikuya", |
|
"middle": [], |
|
"last": "Yamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Akari", |
|
"middle": [], |
|
"last": "Asai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jin", |
|
"middle": [], |
|
"last": "Sakuma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hiroyuki", |
|
"middle": [], |
|
"last": "Shindo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hideaki", |
|
"middle": [], |
|
"last": "Takeda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshiyasu", |
|
"middle": [], |
|
"last": "Takefuji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuji", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ikuya Yamada, Akari Asai, Jin Sakuma, Hiroyuki Shindo, Hideaki Takeda, Yoshiyasu Takefuji, and Yuji Matsumoto. 2020. Wikipedia2vec: An efficient toolkit for learning and visualizing the embeddings of words and entities from wikipedia. arXiv preprint 1812.06280v3.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Distribution of the length of the fragments (in words) after splitting the original dataset", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Chart showing in how many fragments the different social rights occurFigure 4: Distribution of the number of social rights in a fragment (one fragment can contain several social rights) (i.e. initials) of persons are replaced by the words persoon (person) and naam (name) respectively.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "A simplified illustration of the complete architecture of the used model based on BERT, with a pooler layer and a classification layer on top", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"content": "<table><tr><td colspan=\"2\">Figure 2: Distribution among the sentiment labels</td></tr><tr><td>ID</td><td>Social right</td></tr><tr><td>0</td><td>legal assistance</td></tr><tr><td>1</td><td>sports, games, leisure, culture</td></tr><tr><td>2</td><td>belonging, network reinforcement</td></tr><tr><td>3</td><td>health</td></tr><tr><td>4</td><td>financial and material support</td></tr><tr><td>5</td><td>education and training</td></tr><tr><td>6</td><td>work, internship</td></tr><tr><td>7</td><td>healthy and affordable home</td></tr><tr><td>8</td><td>not applicable / miscellaneous</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Examples of fragments that are labelled differentlySocial right labels In addition to the sentiment label, the social rights are labelled in every fragment. There are eight social rights defined (see" |
|
}, |
|
"TABREF1": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "The possible social rights" |
|
}, |
|
"TABREF4": { |
|
"num": null, |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "Results of the different embedding generators on the SA task." |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"content": "<table><tr><td/><td>shows the influence of</td></tr><tr><td colspan=\"2\">freezing layer 0 until layer N , with varying number</td></tr><tr><td>of epochs.</td><td/></tr><tr><td colspan=\"2\">maximum length accuracy (%)</td></tr><tr><td>225</td><td>78.5</td></tr><tr><td>250</td><td>79.3</td></tr><tr><td>275</td><td>78.3</td></tr><tr><td>300</td><td>79.0</td></tr><tr><td>350</td><td>78.7</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"content": "<table><tr><td/><td/><td colspan=\"2\">accuracy (%)</td></tr><tr><td/><td colspan=\"4\">Number of social rights</td></tr><tr><td/><td/><td colspan=\"2\">in output</td></tr><tr><td/><td>3</td><td>4</td><td>5</td><td>6</td></tr><tr><td>LabelPowerset</td><td colspan=\"4\">84.6 89.3 93.3 97.1</td></tr><tr><td colspan=\"5\">BinaryRelevance 87.1 91.3 94.7 97.6</td></tr><tr><td>ClassifierChain</td><td colspan=\"4\">86.2 91.2 94.6 97.5</td></tr><tr><td>RAkEL</td><td colspan=\"4\">86.0 90.3 94.2 97.6</td></tr><tr><td>BERTje</td><td colspan=\"4\">93.0 96.0 97.7 98.9</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"text": "The results using different multi-label classification techniques to extract the social rights using a BOW and an embedding approach." |
|
} |
|
} |
|
} |
|
} |