|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:14:34.338472Z" |
|
}, |
|
"title": "Discriminating between standard Romanian and Moldavian tweets using filtered character ngrams", |
|
"authors": [ |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Ceolin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Pennsylvania", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Pennsylvania", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We applied word unigram models, character ngram models, and CNNs to the task of distinguishing tweets of two related dialects of Romanian (standard Romanian and Moldavian) for the VarDial 2020 RDI shared task (G\u0203man et al., 2020). The main challenge of the task was to perform cross-genre text classification: specifically, the models must be trained using text from news articles, and be used to predict tweets. Our best model was a Na\u00efve Bayes model trained on character ngrams, with the most common ngrams filtered out. We also applied SVMs and CNNs, but while they yielded the best performance on an evaluation dataset of news article, their accuracy significantly dropped when they were used to predict tweets. Our best model reached an F1 score of 0.715 on the evaluation dataset of tweets, and 0.667 on the held-out test dataset. The model ended up in the third place in the shared task. 2 Methods Previous methods used for language identification typically involve bag-of-words models (Huang and Lee, 2008), Na\u00efve Bayes models applied to word and character ngrams (Jauhiainen et al., 2016) and Support Vector Machines (Zampieri et al., 2019). Deep learning methods based on CNNs and LSTMs This work is licensed under a Creative Commons Attribution 4.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We applied word unigram models, character ngram models, and CNNs to the task of distinguishing tweets of two related dialects of Romanian (standard Romanian and Moldavian) for the VarDial 2020 RDI shared task (G\u0203man et al., 2020). The main challenge of the task was to perform cross-genre text classification: specifically, the models must be trained using text from news articles, and be used to predict tweets. Our best model was a Na\u00efve Bayes model trained on character ngrams, with the most common ngrams filtered out. We also applied SVMs and CNNs, but while they yielded the best performance on an evaluation dataset of news article, their accuracy significantly dropped when they were used to predict tweets. Our best model reached an F1 score of 0.715 on the evaluation dataset of tweets, and 0.667 on the held-out test dataset. The model ended up in the third place in the shared task. 2 Methods Previous methods used for language identification typically involve bag-of-words models (Huang and Lee, 2008), Na\u00efve Bayes models applied to word and character ngrams (Jauhiainen et al., 2016) and Support Vector Machines (Zampieri et al., 2019). Deep learning methods based on CNNs and LSTMs This work is licensed under a Creative Commons Attribution 4.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Language identification can be challenging for NLP techniques when languages are hardly distinguishable. One example of this challenge is the identification of Moldavian, a dialect of Romanian which exhibits almost no difference with standard Romanian. The distinction between Romanian and Moldavian is only motivated by the presence of a political boundary, which corresponds to no real isogloss. In spelling, the two languages are almost identical, with a minor exception involving the distribution of the letters '\u00e2' and '\u00ee', although other grammatical distinctions can be found in number, gender and case morphology, and in lexical choices. In particular, the lexical divergence was the result of Moldavian being under the influence of Russian, in the years in which it was part of the Soviet Union.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "An additional challenge for language identification is that existing resources might belong to domains that are different from the domain on which one needs to perform a classification task. For instance, in certain cases one can find data from textbooks, encyclopedias and newspaper articles, but not from social media, even though language identification is often used to classify online messages (Tromp and Pechenizkiy, 2011; Bergsma et al., 2012; Barman et al., 2014; Lui and Baldwin, 2014) . This raises the question of how to use out-of-domain data when performing language identification in a restricted domain.", |
|
"cite_spans": [ |
|
{ |
|
"start": 399, |
|
"end": 428, |
|
"text": "(Tromp and Pechenizkiy, 2011;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 429, |
|
"end": 450, |
|
"text": "Bergsma et al., 2012;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 451, |
|
"end": 471, |
|
"text": "Barman et al., 2014;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 472, |
|
"end": 494, |
|
"text": "Lui and Baldwin, 2014)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The VarDial 2020 RDI shared task (G\u0203man et al., 2020) invited participants to perform cross-genre language identification by training a classifier on newspaper articles, and using it to distinguish standard Romanian from Moldavian tweets. In this paper, we present the contribution of the team Phlyers to the task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 33, |
|
"end": 53, |
|
"text": "(G\u0203man et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "have also been successfully applied to similar tasks (Jaech et al., 2016; Butnaru and Ionescu, 2019; Hu et al., 2019; Tudoreanu, 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 73, |
|
"text": "(Jaech et al., 2016;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 74, |
|
"end": 100, |
|
"text": "Butnaru and Ionescu, 2019;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 101, |
|
"end": 117, |
|
"text": "Hu et al., 2019;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 118, |
|
"end": 134, |
|
"text": "Tudoreanu, 2019)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Last year's VarDial edition (Zampieri et al., 2019) proposed the first shared task based on distinguishing standard Romanian from Moldavian. The best model achieved an F1 score of 0.895 on the test set, using an ensemble method based on CNNs and Support Vector Machines (Tudoreanu, 2019) . The task consisted in training a classifier on news article in Romanian and Moldavian from the MOROCO corpus (Butnaru and Ionescu, 2019), and using it to classify other news articles yet to be added to the corpus. This year's task asked participants to train a classifier on the news articles of the MOROCO corpus in order to distinguish standard Romanian from Moldavian in a test dataset of tweets (G\u0203man and Ionescu, 2020) . The task was particularly challenging because the organizers provided a large evaluation dataset based on news articles (5923) and a small evaluation dataset based on tweets (215) (cf. Table 1 ). This made the evaluation stage particularly delicate, because on the one hand a good model tested on the news evaluation dataset could fail to generalize to a different domain, while on the other hand the size of the tweets evaluation dataset was so small that the risk of overfitting was considerable.", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 51, |
|
"text": "(Zampieri et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 270, |
|
"end": 287, |
|
"text": "(Tudoreanu, 2019)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 689, |
|
"end": 714, |
|
"text": "(G\u0203man and Ionescu, 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 902, |
|
"end": 909, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "VarDial 2020 We decided to train a variety of models, and to study their generizability to genres different from those in the training data. The models that we trained for the task are the following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Multinomial Na\u00efve Bayes -Words. This is a standard Na\u00efve Bayes model applied to word unigrams. The best performance on the news evaluation dataset was reached by using a TFIDF matrix instead of word counts. The optimal alpha was 0.0001 for both the unigram-and the TFIDF-based model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Multinomial Na\u00efve Bayes -Character Ngrams. This is a standard Na\u00efve Bayes model applied to character ngrams. The best performance on the news evaluation set was reached by a model which calculates ngrams in the window [5] [6] [7] [8] , with alpha=0.0001. Padding symbols (n-1) are added both before and after each word in order to retrieve ngrams for each value of n.", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 223, |
|
"text": "[5]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 224, |
|
"end": 227, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 228, |
|
"end": 231, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 235, |
|
"text": "[8]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Linear SVM -Words. This is a standard Support Vector Machine model with a linear kernel. The best performance on the news evaluation set was reached by using a TFIDF matrix instead of word counts. The optimal regularization parameter C was 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Linear SVM -Character Ngrams. Our best Support Vector Machine model with a linear kernel uses character ngrams in the window [6] [7] [8] , with C=1. Padding symbols (n-1) are added both before and after each word in order to retrieve ngrams for each value of n.", |
|
"cite_spans": [ |
|
{ |
|
"start": 127, |
|
"end": 130, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 131, |
|
"end": 134, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 135, |
|
"end": 138, |
|
"text": "[8]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Character CNN. We used the character-based CNN proposed in Zhang et al. (2015) , and modified it according to the baseline model in Butnaru and Ionescu (2019). We created an alphabet of 76 symbols representing all the characters that appear at least 50 times in the training data, plus a 'NA' symbol, and then we used one-hot encoding vectors as input to the CNN. The three hyperparameters we fine-tuned were the batch size (10), the learning rate (0.0001), and the size of the fully-connected layers (1000), while the other parameters were taken from Butnaru and Ionescu (2019). Training was performed for 20 epochs. See Figure 1 for a summary of the model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 61, |
|
"end": 80, |
|
"text": "Zhang et al. (2015)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 624, |
|
"end": 632, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Character TDNN. Inspired by the research in speech recognition community, we implemented a Time Delay Neural Network (TDNN) (Peddinti et al., 2015) , in order to better capture the morphological features of the two languages. Our model contains 2 stacked convolution blocks. Each block learns 100 k-by-3 filter banks, where k is the dimension of the input vectors and 3 is the window length, that maps a trigram character window to a scalar. Input sequences were padded to equal length. The output 1-by-n vector, where n is the input sequence length, is then passed through a fully connected layer. Subsampling was not performed in training. Training was performed for 50 epochs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 149, |
|
"text": "(Peddinti et al., 2015)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The architectures of the CNN and the TDNN are shown in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 55, |
|
"end": 63, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "3 Results", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The results of the models are summarized in Table 2 . All the models exhibit a drop in performance when used to classify tweets instead of news articles (Figure 2 ). The best model on the news evaluation dataset is the Linear SVM model trained on TFIDF transformed word unigrams, with an F1 score of 0.942. By inspecting the weight matrix, we were able to identify the words which have the highest contribution in determining the class of the news articles. Among the best words that are used to identify the Moldavian class, 's\u00eent' ('are') and 'c\u00eend' ('when') have the largest weights. These are frequent words which are spelled differently in Romanian ('sunt' and 'c\u00e2nd'). As for Romanian, 'news' and 'foto', which are loanwords, as well as the frequent word 's\u00e2mb\u0203t\u0203' ('Saturday'), which has a different spelling in Moldavian, ('s\u00eemb\u0203t\u0203'), carry the largest weights. The CNN model reaches a similar accuracy (F1 score: 0.931), which is almost identical to the accuracy obtained by Butnaru and Ionescu (2019) on the same dataset. An interesting observation for the neural network-based models is that although the accuracy of the models on the news dataset increases by iterating through the training set, the improvement does not generalize to the testing domain (see Figure 3) . The lack of cross-domain generalizability might indicate that naive implementations of deep neural network architectures are not well-suited for cross-genre classifications.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 51, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 153, |
|
"end": 162, |
|
"text": "(Figure 2", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 1271, |
|
"end": 1281, |
|
"text": "Figure 3)", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The models that best generalize to Twitter data are the Multinomial Na\u00efve Bayes (MNB) models trained on TFIDF transformed word unigrams (F1 score=0.892) and character ngrams (F1 score=0.883). In particular, the highest accuracy is reached by the character ngram model, which yields an F1 score of Table 2 : Performance of the models tested (F1 scores).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 297, |
|
"end": 304, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "0.674 on the tweets dataset. For these reasons, we decided to use the Multinomian Na\u00efve Bayes model based on character ngrams for the task, and we proceeded to the fine-tuning stage.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We fine-tuned the MNB character ngram model on the tweets dataset (Table 3) . We noticed that by removing the most common ngrams, performance improved. In particular, removing all the ngrams which appeared more than 1000 times overall improved the performance up to an F1 score of 0.890 on the news dataset (see Figure 4) . On the contrary, removing less frequent ngrams did not improve performance. This result is interesting, because usually performance is increased by removing the tail of the frequency distribution, not the head: in this case, since the TFIDF transformation was not sufficient to normalize the behavior of high-frequency ngrams, removing them turned out to be a better strategy to increase the performance of the classifier.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 75, |
|
"text": "(Table 3)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 312, |
|
"end": 321, |
|
"text": "Figure 4)", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fine-tuning on the News set", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Ch-ngram filtered News articles (19) Tweets (20) MNB -Char. ngrams [5] [6] [7] [8] Table 3 : Fine-tuning of the models tested. Total char. ngrams: 931786.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 70, |
|
"text": "[5]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 71, |
|
"end": 74, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 75, |
|
"end": 78, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 79, |
|
"end": 82, |
|
"text": "[8]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 83, |
|
"end": 90, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Filter applied based on total occurences", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "After further fine-tuning of the ngram window and the threshold of ngrams to filter, we obtained two best models on the tweets dataset (Table 4) . Both models reached an F1 score of 0.715. The performance did not improve after including in the training set the data coming from the evaluation set containing news articles. The two best models had the following settings:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 144, |
|
"text": "(Table 4)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fine-tuning on the Tweets set", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 MNB -Char. ngrams, [6] [7] [8] , filter <250, alpha=0.001", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 24, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 25, |
|
"end": 28, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 29, |
|
"end": 32, |
|
"text": "[8]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fine-tuning on the Tweets set", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 MNB -Char. ngrams, [5] [6] [7] , filter <200, alpha=0.001 Tweets 2020Train Train+Dev1 MNB -Char. ngrams [6] [7] [8] , filter <250 0.715 0.715 MNB -Char. ngrams [5] [6] [7] , filter <200 0.715 0.715 Table 4 : Final performance on the tweets evaluation dataset after fine-tuning the parameters of the MNB -ngrams model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 24, |
|
"text": "[5]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 25, |
|
"end": 28, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 29, |
|
"end": 32, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 106, |
|
"end": 109, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 110, |
|
"end": 113, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 114, |
|
"end": 117, |
|
"text": "[8]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 162, |
|
"end": 165, |
|
"text": "[5]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 166, |
|
"end": 169, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 170, |
|
"end": 173, |
|
"text": "[7]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 200, |
|
"end": 207, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Fine-tuning on the Tweets set", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We submitted three runs to the VarDial 2020 RDI shared task:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VarDial 2020 -RDI Shared Task", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "1. MNB -Char. ngrams, [5] [6] [7] [8] , filter <1000, alpha=0.0001. This was the best ngram model on the news articles.", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 25, |
|
"text": "[5]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 26, |
|
"end": 29, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 30, |
|
"end": 33, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 34, |
|
"end": 37, |
|
"text": "[8]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VarDial 2020 -RDI Shared Task", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "2. MNB -Char. ngrams, [6] [7] [8] , filter <250, alpha=0.001. This was one of the two best models on the tweets evaluation data set.", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 25, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 26, |
|
"end": 29, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 30, |
|
"end": 33, |
|
"text": "[8]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VarDial 2020 -RDI Shared Task", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "3. MNB -Char. ngrams, [5] [6] [7] , filter <200, alpha=0.001. This was one of the two best models on the tweets evaluation dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 25, |
|
"text": "[5]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 26, |
|
"end": 29, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 30, |
|
"end": 33, |
|
"text": "[7]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VarDial 2020 -RDI Shared Task", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "The results for our models on the test dataset of the task are summarized in Table 5 . The best model was the one which was fine-tuned on the news dataset. This result suggests that our fine-tuning strategy on the tweets dataset led to overfitting on the news articles, and thus poorer performance on tweets. Additionally, after our submissions, we realized that the tweets in the test data had not being preprocessed to remove punctuation, numbers and other symbols. Preprocessing increases the submission F1 score up to 0.692.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 84, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "VarDial 2020 -RDI Shared Task", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Both the Multinomial Na\u00efve Bayes model based on TFIDF word unigrams, which was the second best model on the tweets dataset, and the one based on unfiltered character ngrams, performed worse than the best model submitted. The same was true for the Linear SVM model based on character ngrams.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "VarDial 2020 -RDI Shared Task", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We applied word unigram models, character ngram models, and two neural network models to classify tweets of two related dialects (standard Romanian and Moldavian) for the VarDial 2020 RDI shared task (G\u0203man et al., 2020) , with training data from a different domain. Two of the models we proposed, a Linear SVM model based on TFIDF word unigrams and a CNN model, reached a high accuracy on the news evaluation dataset, but failed to generalize to the tweets evaluation dataset. On the contrary,", |
|
"cite_spans": [ |
|
{ |
|
"start": 200, |
|
"end": 220, |
|
"text": "(G\u0203man et al., 2020)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Tweets (2020) test data results no preprocessing, submitted preprocessing 1. MNB -Char. ngrams [5] [6] [7] [8] , filter <1000 0.666 0.692 2. MNB -Char. ngrams [6] [7] [8] , filter <250 0.651 0.678 3. MNB -Char. ngrams [5] [6] [7] , filter <200 0.645 0.675 MNB -Word unigrams -TFIDF 0.630 0.677 MNB -Char. ngrams [5] [6] [7] [8] 0.651 0.676 Linear SVM -ngrams [6] [7] [8] 0.593 0.590 Table 5 : Results on the test dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 98, |
|
"text": "[5]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 99, |
|
"end": 102, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 103, |
|
"end": 106, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 107, |
|
"end": 110, |
|
"text": "[8]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 159, |
|
"end": 162, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 166, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 167, |
|
"end": 170, |
|
"text": "[8]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 218, |
|
"end": 221, |
|
"text": "[5]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 222, |
|
"end": 225, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 226, |
|
"end": 229, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 312, |
|
"end": 315, |
|
"text": "[5]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 319, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 323, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 324, |
|
"end": 327, |
|
"text": "[8]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 359, |
|
"end": 362, |
|
"text": "[6]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 363, |
|
"end": 366, |
|
"text": "[7]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 367, |
|
"end": 370, |
|
"text": "[8]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 383, |
|
"end": 390, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Multinomial Na\u00efve Bayes models turned out to be the best performing models on the task. The more complex neural network models suffered from the problem of poor generalizability. In addition, we showed that removing high frequency ngrams can be a valid alternative when working on datasets for which a TFIDF transformation does not improve classification accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Code mixing: A challenge for language identification in the language of social media", |
|
"authors": [ |
|
{ |
|
"first": "Utsab", |
|
"middle": [], |
|
"last": "Barman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amitava", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joachim", |
|
"middle": [], |
|
"last": "Wagner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the first workshop on computational approaches to code switching", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "13--23", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Utsab Barman, Amitava Das, Joachim Wagner, and Jennifer Foster. 2014. Code mixing: A challenge for language identification in the language of social media. In Proceedings of the first workshop on computational approaches to code switching, pages 13-23.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Language identification for creating language-specific twitter collections", |
|
"authors": [ |
|
{ |
|
"first": "Shane", |
|
"middle": [], |
|
"last": "Bergsma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Mcnamee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mossaab", |
|
"middle": [], |
|
"last": "Bagdouri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clayton", |
|
"middle": [], |
|
"last": "Fink", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Theresa", |
|
"middle": [], |
|
"last": "Wilson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the second workshop on language in social media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--74", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shane Bergsma, Paul McNamee, Mossaab Bagdouri, Clayton Fink, and Theresa Wilson. 2012. Language identi- fication for creating language-specific twitter collections. In Proceedings of the second workshop on language in social media, pages 65-74.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "The Moldavian and Romanian dialectal corpus", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Andrei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu Tudor", |
|
"middle": [], |
|
"last": "Butnaru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ionescu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1901.06543" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrei M Butnaru and Radu Tudor Ionescu. 2019. Moroco: The Moldavian and Romanian dialectal corpus. arXiv preprint arXiv:1901.06543.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "The Unreasonable Effectiveness of Machine Learning in Moldavian versus Romanian Dialect Identification", |
|
"authors": [ |
|
{ |
|
"first": "Mihaela", |
|
"middle": [], |
|
"last": "G\u0203man", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu Tudor", |
|
"middle": [], |
|
"last": "Ionescu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2007.15700" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihaela G\u0203man and Radu Tudor Ionescu. 2020. The Unreasonable Effectiveness of Machine Learning in Molda- vian versus Romanian Dialect Identification. arXiv preprint arXiv:2007.15700.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Yves Scherrer, and Marcos Zampieri. 2020. A Report on the VarDial Evaluation Campaign 2020", |
|
"authors": [ |
|
{ |
|
"first": "Mihaela", |
|
"middle": [], |
|
"last": "G\u0203man", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tudor", |
|
"middle": [], |
|
"last": "Radu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heidi", |
|
"middle": [], |
|
"last": "Ionescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Jauhiainen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Krister", |
|
"middle": [], |
|
"last": "Jauhiainen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Lind\u00e9n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niko", |
|
"middle": [], |
|
"last": "Ljube\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christoph", |
|
"middle": [], |
|
"last": "Partanen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Purschke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the Seventh Workshop on NLP for Similar Languages, Varieties and Dialects", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihaela G\u0203man, Dirk Hovy, Radu Tudor Ionescu, Heidi Jauhiainen, Tommi Jauhiainen, Krister Lind\u00e9n, Nikola Ljube\u0161i\u0107, Niko Partanen, Christoph Purschke, Yves Scherrer, and Marcos Zampieri. 2020. A Report on the VarDial Evaluation Campaign 2020. In Proceedings of the Seventh Workshop on NLP for Similar Languages, Varieties and Dialects (VarDial).", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Ensemble Methods to Distinguish Mainland and Taiwan Chinese", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "He", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zuoyu", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiwen", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Sixth Workshop on NLP for Similar Languages, Varieties and Dialects", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "165--171", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hai Hu, Wen Li, He Zhou, Zuoyu Tian, Yiwen Zhang, and Liang Zou. 2019. Ensemble Methods to Distinguish Mainland and Taiwan Chinese. In Proceedings of the Sixth Workshop on NLP for Similar Languages, Varieties and Dialects, pages 165-171.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Contrastive approach towards text source classification based on top-bag-of-word similarity", |
|
"authors": [ |
|
{ |
|
"first": "Chu-Ren", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lung-Hao", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 22nd Pacific Asia conference on language, information and computation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "404--410", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chu-Ren Huang and Lung-Hao Lee. 2008. Contrastive approach towards text source classification based on top-bag-of-word similarity. In Proceedings of the 22nd Pacific Asia conference on language, information and computation, pages 404-410.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "A neural model for language identification in code-switched tweets", |
|
"authors": [ |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Jaech", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Mulcaire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah A", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of The Second Workshop on Computational Approaches to Code Switching", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "60--64", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aaron Jaech, George Mulcaire, Mari Ostendorf, and Noah A Smith. 2016. A neural model for language iden- tification in code-switched tweets. In Proceedings of The Second Workshop on Computational Approaches to Code Switching, pages 60-64.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "HeLI, a word-based backoff method for language identification", |
|
"authors": [ |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Sakari Jauhiainen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Krister Johan Linden", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heidi", |
|
"middle": [ |
|
"Annika" |
|
], |
|
"last": "Jauhiainen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Third Workshop on NLP for Similar Languages, Varieties and Dialects", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tommi Sakari Jauhiainen, Bo Krister Johan Linden, Heidi Annika Jauhiainen, et al. 2016. HeLI, a word-based backoff method for language identification. In Proceedings of the Third Workshop on NLP for Similar Lan- guages, Varieties and Dialects (VarDial 2016).", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Accurate language identification of twitter messages", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Lui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 5th workshop on language analysis for social media (LASM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "17--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Lui and Timothy Baldwin. 2014. Accurate language identification of twitter messages. In Proceedings of the 5th workshop on language analysis for social media (LASM), pages 17-25.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A time delay neural network architecture for efficient modeling of long temporal contexts", |
|
"authors": [ |
|
{ |
|
"first": "Vijayaditya", |
|
"middle": [], |
|
"last": "Peddinti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Povey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Khudanpur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Sixteenth Annual Conference of the International Speech Communication Association", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vijayaditya Peddinti, Daniel Povey, and Sanjeev Khudanpur. 2015. A time delay neural network architecture for efficient modeling of long temporal contexts. In Sixteenth Annual Conference of the International Speech Communication Association.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Graph-based n-gram language identification on short texts", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Tromp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mykola", |
|
"middle": [], |
|
"last": "Pechenizkiy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. 20th Machine Learning conference of Belgium and The Netherlands", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "27--34", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik Tromp and Mykola Pechenizkiy. 2011. Graph-based n-gram language identification on short texts. In Proc. 20th Machine Learning conference of Belgium and The Netherlands, pages 27-34.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "DTeam@ VarDial 2019: Ensemble based on skip-gram and triplet loss neural networks for Moldavian vs. Romanian cross-dialect topic identification", |
|
"authors": [ |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Tudoreanu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Sixth Workshop on NLP for Similar Languages, Varieties and Dialects", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "202--208", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diana Tudoreanu. 2019. DTeam@ VarDial 2019: Ensemble based on skip-gram and triplet loss neural networks for Moldavian vs. Romanian cross-dialect topic identification. In Proceedings of the Sixth Workshop on NLP for Similar Languages, Varieties and Dialects, pages 202-208.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A report on the third VarDial evaluation campaign", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Scherrer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanja", |
|
"middle": [], |
|
"last": "Samard\u017eic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francis", |
|
"middle": [], |
|
"last": "Tyers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pietari", |
|
"middle": [], |
|
"last": "Miikka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Silfverberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tung-Le", |
|
"middle": [], |
|
"last": "Klyueva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chu-Ren", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [ |
|
"Tudor" |
|
], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ionescu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Sixth Workshop on NLP for Similar Languages, Varieties and Dialects", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Shervin Malmasi, Yves Scherrer, Tanja Samard\u017eic, Francis Tyers, Miikka Pietari Silfverberg, Natalia Klyueva, Tung-Le Pan, Chu-Ren Huang, Radu Tudor Ionescu, et al. 2019. A report on the third VarDial evaluation campaign. In Proceedings of the Sixth Workshop on NLP for Similar Languages, Varieties and Dialects (VarDial 2019). The Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Character-level convolutional networks for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "649--657", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text classification. In Advances in neural information processing systems, pages 649-657.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "The architectures of our neural network models (CNN on the left, TDNN on the right)." |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Drop in performance of the best performing models from the News development set to the Tweets development set." |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Comparing the training and validation performance measured by F1 score after 20 epochs of training. Solid line: validation score on news articles; dotted line: validation score on tweets." |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Accuracy change on the news evaluation dataset after ngram filtering. Left: common ngrams removed; right: common ngrams preserved." |
|
}, |
|
"TABREF1": { |
|
"text": "Summary statistics of the VarDial 2020 shared task.", |
|
"type_str": "table", |
|
"num": null, |
|
"content": "<table/>", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |