|
{ |
|
"paper_id": "W19-0313", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T06:33:45.884953Z" |
|
}, |
|
"title": "Learning multilingual topics through aspect extraction from monolingual texts", |
|
"authors": [ |
|
{ |
|
"first": "Johannes", |
|
"middle": [], |
|
"last": "Huber", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Otto-von-Guericke-Universit\u00e4t Magdeburg & TrustYou GmbH", |
|
"location": { |
|
"settlement": "Munich" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Myra", |
|
"middle": [], |
|
"last": "Spiliopoulou", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Texts rating products and services of all kind are omnipresent on the internet. They come in various languages and often in such a large amount that it is very time-consuming to get an overview of all reviews. The goal of this work is to facilitate the summarization of opinions written in multiple languages, exemplified on a corpus of English and Finnish reviews. To this purpose, we propose a framework that extracts aspect terms from reviews and groups them to multilingual topic clusters. For aspect extraction we work on texts of each language separately. We evaluate three methods, all based on neural networks. One of them is supervised, one unsupervised, based on an attention mechanism and one a rule-based hybrid method. We then group the extracted aspect terms into multilingual clusters, whereby we evaluate three different clustering methods and juxtapose a method that creates clusters from multilingual word embeddings with a method that first creates monolingual clusters for each language separately and then merges them. We report on our results from a variety of experiments, observing the best results when clustering aspect terms extracted by the supervised method, using the k-means algorithm on multilingual embeddings. Tiivistelm\u00e4 Tekstej\u00e4, jotka arvostelevat erilaisia tuotteita ja palveluja l\u00f6ytyy kaikkialta netist\u00e4. Niit\u00e4 on usealla kielell\u00e4 ja niin monia, ett\u00e4 on hyvin aikaa viev\u00e4\u00e4 luoda yleiskuva kaikista arvosteluista. T\u00e4m\u00e4n ty\u00f6n p\u00e4\u00e4m\u00e4\u00e4r\u00e4 on helpottaa objektiivisen yhteenvedon luomista mielipiteist\u00e4, jotka ovat kirjoitettu useammalla kielell\u00e4, mik\u00e4 ty\u00f6ss\u00e4 on havainnollistettu niin englannin-kuin suomenkielisell\u00e4 aineistolla. T\u00e4h\u00e4n tarkoitukseen ty\u00f6 ehdottaa viitekehyst\u00e4 joka poimii aspektisanat arvosteluista ja ryhmitt\u00e4\u00e4 ne monikielisiin aiheklustereihin.", |
|
"pdf_parse": { |
|
"paper_id": "W19-0313", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Texts rating products and services of all kind are omnipresent on the internet. They come in various languages and often in such a large amount that it is very time-consuming to get an overview of all reviews. The goal of this work is to facilitate the summarization of opinions written in multiple languages, exemplified on a corpus of English and Finnish reviews. To this purpose, we propose a framework that extracts aspect terms from reviews and groups them to multilingual topic clusters. For aspect extraction we work on texts of each language separately. We evaluate three methods, all based on neural networks. One of them is supervised, one unsupervised, based on an attention mechanism and one a rule-based hybrid method. We then group the extracted aspect terms into multilingual clusters, whereby we evaluate three different clustering methods and juxtapose a method that creates clusters from multilingual word embeddings with a method that first creates monolingual clusters for each language separately and then merges them. We report on our results from a variety of experiments, observing the best results when clustering aspect terms extracted by the supervised method, using the k-means algorithm on multilingual embeddings. Tiivistelm\u00e4 Tekstej\u00e4, jotka arvostelevat erilaisia tuotteita ja palveluja l\u00f6ytyy kaikkialta netist\u00e4. Niit\u00e4 on usealla kielell\u00e4 ja niin monia, ett\u00e4 on hyvin aikaa viev\u00e4\u00e4 luoda yleiskuva kaikista arvosteluista. T\u00e4m\u00e4n ty\u00f6n p\u00e4\u00e4m\u00e4\u00e4r\u00e4 on helpottaa objektiivisen yhteenvedon luomista mielipiteist\u00e4, jotka ovat kirjoitettu useammalla kielell\u00e4, mik\u00e4 ty\u00f6ss\u00e4 on havainnollistettu niin englannin-kuin suomenkielisell\u00e4 aineistolla. T\u00e4h\u00e4n tarkoitukseen ty\u00f6 ehdottaa viitekehyst\u00e4 joka poimii aspektisanat arvosteluista ja ryhmitt\u00e4\u00e4 ne monikielisiin aiheklustereihin.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Texts expressing opinions about products are becoming important for a constantly increasing number of people. From 2011 to 2017, the percentage of customers in the United States that reads online reviews to determine if a business is good or bad at least occasionally has grown from 71% to 93% (Anderson, 2017) . Summarizing these reviews objectively can help customers in their choice of a product. As only about 40% of internet content is in English (Pimienta et al., 2009) , analyzing reviews also in other languages appears vital to give a full picture of opinions about an entity. In this work, we propose a framework that derives aspect terms from reviews written in different languages and then summarizes them into multilingual topics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 294, |
|
"end": 310, |
|
"text": "(Anderson, 2017)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 475, |
|
"text": "(Pimienta et al., 2009)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Aspect term extraction is a part of aspect-level sentiment analysis (ALSA). ALSA is able to provide a detailed analysis of opinions conveyed in a text by extracting the sentiment expressed towards each mentioned aspect. For example, given the sentence \"the waitress was friendly\", it should extract a positive sentiment towards the aspect \"waitress\". As creating summaries or statistics on these aspects alone would result in a lot of clutter, it is beneficial to group semantically similar words into \"topics\"; for example, aspect terms \"waitress\", \"waiter\" and \"bartender\" could form a topic \"staff\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "A survey by Schouten and Frasincar (2016) provides an overview about ALSA, but reports nearly exclusively on research on English corpora. Indeed, the vast majority of research on Sentiment Analysis and also natural language processing (NLP) in general has been done with English. Crosslingual NLP tries to utilize resources from a source language (generally English) for application on another target language. This is of advantage for languages where resources (in our work: opinions) are very rare. Multilingual NLP rather combines resources from different languages to analyze content written in them (Utt and Pad\u00f3, 2014) . In our work, we adhere to the second approach, in order to make full use of documents available in each of the languages under consideration.", |
|
"cite_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 41, |
|
"text": "Schouten and Frasincar (2016)", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 604, |
|
"end": 624, |
|
"text": "(Utt and Pad\u00f3, 2014)", |
|
"ref_id": "BIBREF52" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We address the question \"How can we extract mono-lingual aspect terms from reviews in different languages and then combine them into multilingual topics that describe the multilingual corpus?\". We evaluate different methods for both the aspect extraction and the clustering step, focusing on ways of reducing human involvement and automating the learning process with minimal human input.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As proof of concept of our approach, we study a corpus containing English and Finnish reviews of restaurants. These two languages belong to unrelated families (Indoeuropean vs Uralic), differ in the amount of available resources (Finnish resources are sparse), and are linguistically very different: English is a language with comparatively little morphology, while Finnish is an agglutinative language with very rich morphology (Pirkola, 2001) . Our results show that multilingual topics can be extracted for even so different languages, making full use of the resources available in each language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 429, |
|
"end": 444, |
|
"text": "(Pirkola, 2001)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This study is organized as follows. In the next section we discuss relevant research advances. In section 3 we describe our framework, its components and the mechanisms used to evaluate each component. Our experiments and results are presented in section 4. In section 5 we discuss our findings. The last section concludes the paper with and outlook on future work and extensions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Schouten and Frasincar in their 2016 survey classify the approaches to aspect detection into five different general methods: frequency-based, syntax-based, based on supervised learning, based on unsupervised learning and hybrids between the aforementioned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Current Research State 2.1 Aspect extraction", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The unsupervised methods presented in the survey are mostly based on Latent Dirichlet Allocation (LDA), in variants to make it work on the aspect level, which is far finer grained than the document level LDA was designed for. For example, the relatively recent Amplayo and Song (2017) combine LDA with Biterm Topic Models. Asnani and Pawar (2017) use more or less default LDA but combines it with semantic information from a multilingual dictionary, which allows them to extract aspects from code-mixed text, in this case social media content written in a combination of Hindi and English.", |
|
"cite_spans": [ |
|
{ |
|
"start": 323, |
|
"end": 346, |
|
"text": "Asnani and Pawar (2017)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised approaches", |
|
"sec_num": "2.1.1" |
|
}, |
|
{ |
|
"text": "There are also some unsupervised approaches not based on LDA. Schouten et al. (2018) presents an unsupervised method based on association rule mining on cooccurrence frequency data. Also very recently, Dragoni et al. (2018) use NLP methods to get grammar dependencies and POS of a sentence and use rules based on that information to extract aspects from real-time stream data. A different approach is taken in the paper by He et al. (2017) , which is based on an attention model, and which is the unsupervised method we decided to evaluate in this work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 84, |
|
"text": "Schouten et al. (2018)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 223, |
|
"text": "Dragoni et al. (2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 423, |
|
"end": 439, |
|
"text": "He et al. (2017)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised approaches", |
|
"sec_num": "2.1.1" |
|
}, |
|
{ |
|
"text": "For supervised approaches, we only examined methods that do not require the definition of a static set of aspects, but see the problem as a sequence labeling task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised approaches", |
|
"sec_num": "2.1.2" |
|
}, |
|
{ |
|
"text": "State-of-the-art approaches train Deep Neural Networks on word embeddings for aspect term extraction. Usually general purpose word embeddings are used, however Pham et al. (2017) focuses on training word embeddings specifically for aspect extraction. The first deep learning based method was Poria et al. (2016) , which uses word embeddings enriched by POS tags to surpass all previous approaches to aspect extraction significantly. The Xu et al. (2018) builds up on that, using double embeddings, which in this case means a combination of general-purpose embeddings with domain specific ones. Other recent supervised approaches using deep learning include Luo et al. (2018) , which uses embeddings acquired from a bidirectional dependency tree network to train a classifier, and , which uses an attention-based model in combination with selective transformation to not only extract aspect terms but also the corresponding opinion words. The last three papers mentioned report very similar performance values. We used (Xu et al., 2018) as the supervised method for our experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 178, |
|
"text": "Pham et al. (2017)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 311, |
|
"text": "Poria et al. (2016)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 437, |
|
"end": 453, |
|
"text": "Xu et al. (2018)", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 657, |
|
"end": 674, |
|
"text": "Luo et al. (2018)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1018, |
|
"end": 1035, |
|
"text": "(Xu et al., 2018)", |
|
"ref_id": "BIBREF55" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised approaches", |
|
"sec_num": "2.1.2" |
|
}, |
|
{ |
|
"text": "A hybrid system, combining unsupervised extraction with training a Neural Network is described in Wu et al. (2018) . We also tested this approach in our experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 114, |
|
"text": "Wu et al. (2018)", |
|
"ref_id": "BIBREF54" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid approach", |
|
"sec_num": "2.1.3" |
|
}, |
|
{ |
|
"text": "Multi-and crosslingual NLP has been dominated by methods utilizing word embeddings in the last years. A relatively recent not embedding-based approach is described by T\u00e4ckstr\u00f6m et al. (2012) , where crosslingual word clusters are used to transfer models to predict linguistic structure between languages. These semantic clusters are built first for one language in the way described in by Brown et al. (1992) and then combined by projection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 190, |
|
"text": "T\u00e4ckstr\u00f6m et al. (2012)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 408, |
|
"text": "Brown et al. (1992)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-and Crosslingual NLP", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "A survey on crosslingual word embeddings was compiled by Ruder et al. (2017) . It suggests a taxonomy of training crosslingual word-embedding models, which is classifying them based on the training data required: parallel or just comparable, aligned on word, sentence or document level. Dufter et al. (2018) claim the current state-of-the-art model for sentence-aligned methods, called \"concept induction\". A parallel corpus is taken as input and used to induce dictionary graphs. From the dictionary graphs, concepts and words-concept pairs are then induced from the dictionary graph. Finally, embeddings are learned from the word-concept pairs using the standard Word2Vec method (Mikolov et al., 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 76, |
|
"text": "Ruder et al. (2017)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 287, |
|
"end": 307, |
|
"text": "Dufter et al. (2018)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 681, |
|
"end": 703, |
|
"text": "(Mikolov et al., 2013)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-and Crosslingual NLP", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Word-aligned models usually use both bi-or multilingual dictionaries and big monolingual corpora in the target languages. The method we used for our experiments was presented by Joulin et al. in 2018. It is based on creating a restrained mapping between the two target vector spaces using the entries from the bilingual dictionary as anchor points. Artetxe et al. (2017) use a similar approach, but focus on reducing the amount of training data required by a self-learning method.", |
|
"cite_spans": [ |
|
{ |
|
"start": 349, |
|
"end": 370, |
|
"text": "Artetxe et al. (2017)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multi-and Crosslingual NLP", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Some recent papers present ways to align word embeddings without any training data at all. Lample et al. (2018) and Zhang et al. (2017) use adversial training for this. In adversial training, two networks are used to provide training signals for each other. Lample et al. (2018) is also remarkable for presenting MUSE, an evaluation framework for multilingual embeddings that we also used as the basis for some of our experiments. Hoshen and Wolf (2018) instead of adversial training use iterative matching methods and Alvarez-Melis and Jaakkola (2018) see the task as an optimal transport problem and use the Gromov-Wasserstein distance to align embedding spaces. Figure 1 shows the components of the system and the data passed between them, together with task descriptions for the more complex components. The tasks and data printed in greyed out, italic letters are only relevant for some of the methods evaluated for the Aspect Term Extractor. Figure 2 shows the workflow of the system, with the focus put on exhibiting which parts are monolingual and which multilingual. First, reviews are crawled from internet sources, then preprocessed and vectorized as required by the method to be tested in the Aspect Term Extractor. Each method is optimized, the best performing one is used to extract the set of aspect terms required for the next steps. This happens independently for each language.", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 111, |
|
"text": "Lample et al. (2018)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 116, |
|
"end": 135, |
|
"text": "Zhang et al. (2017)", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 258, |
|
"end": 278, |
|
"text": "Lample et al. (2018)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 431, |
|
"end": 453, |
|
"text": "Hoshen and Wolf (2018)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 665, |
|
"end": 673, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 948, |
|
"end": 956, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Multi-and Crosslingual NLP", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In parallel, a set of multilingual word embeddings is created by aligning monolingual word embeddings of the target languages, using a dictionary that maps words between the languages to train the alignment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework for extracting aspect terms and learning multilingual topics", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The Aspect Term Vectorizer uses the aspect terms as well as both the monolingual and the previously obtained multilingual embeddings to create aspect term vectors -for each aspect terms once with the monolingual and once with the multilingual embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework for extracting aspect terms and learning multilingual topics", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "These vectors are then clustered to topics. From the monolingual embeddings, monolingual clusters are formed and then combined; from the multilingual ones, the multilingual clusters are formed directly. The performance of both the two ways of getting multilingual clusters and of the three different clustering methods that are evaluated is compared. The best performing method is used to create the final set of multilingual topic clusters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework for extracting aspect terms and learning multilingual topics", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In the following subsections, the different components are outlined in detail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework for extracting aspect terms and learning multilingual topics", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "This section describes the \"Preprocessor\" and \"Review Vectorizer\" components. The language of each review is identified using langid.py (Lui and Baldwin, 2012) , reviews not belonging to one of the target languages are filtered. Reviews are split into sentences with the PUNKT sentence segmenter (Kiss and Strunk, 2006) , using the default NLTK (Bird et al., 2009) model for the respective language. The Penn Treebank tokenizer (Marcus et al., 1993) was then used to split the sentence into tokens.", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 159, |
|
"text": "(Lui and Baldwin, 2012)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 319, |
|
"text": "(Kiss and Strunk, 2006)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 345, |
|
"end": 364, |
|
"text": "(Bird et al., 2009)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 428, |
|
"end": 449, |
|
"text": "(Marcus et al., 1993)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preparing review texts for classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To improve the performance of both sentence segmentation and tokenization, if a fullstop, colon, exclamation mark or quotation mark was directly followed by an upper-case character, a space was inserted in between. Without this step, the sentence segmenter would usually not split the sentence in case of this relatively common error.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preparing review texts for classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The reason to choose these relatively simple methods over more sophisticated ones is their applicability to many languages: PUNKT was specifically designed as a multilingual method and the tokenizer is using relatively simple regular expressions that work for most languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preparing review texts for classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The tokens in each sentence are then vectorized by assigning them a word embedding from a general-purpose dataset of pretrained word embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preparing review texts for classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Some of the methods tested in the Aspect Term Extractor require additional preprocessing steps or use additional data in their vectors. These method-dependent preprocessing steps are outlined in the method descriptions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preparing review texts for classification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "This section describes the \"Aspect Term Extractor\" component. The task of this component is to extract aspect terms from review sentences. We see aspects in the sense of the SemEval Task 2016/5 (Pontiki et al., 2016) , called there \"opinion target expression\":", |
|
"cite_spans": [ |
|
{ |
|
"start": 194, |
|
"end": 216, |
|
"text": "(Pontiki et al., 2016)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect term extraction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "[\u2026] an explicit reference (mention) to the reviewed entity of the [entityaspect] pair. This reference can be a named entity, a common noun or a multi-word term [\u2026] In other words, in a phrase expressing an opinion towards an aspect of the reviewed entity, it is the term explicitly referring to the aspect. It can be \u2022 a named entity, like \"My Sprite was lukewarm when I got it. \"", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 80, |
|
"text": "[entityaspect]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 160, |
|
"end": 163, |
|
"text": "[\u2026]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect term extraction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 a common noun, like \"The bartender excelled in his job. \"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect term extraction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 a multi-word term, like \"I loved the meat balls with mashed potatoes!\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect term extraction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To represent the positions of aspects in a sentence, sequence labelling with labelling space {B, I, O} is performed. That means that each word in a sentence gets assigned a tag: either O when it is not part of an aspect, B when it is a single-word term or the first word of a multi-word term or I when it is a later word of a multi-word aspect term. An example:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect term extraction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The/O chicken/B wings/I were/O tasty/O and/O their/O price/B moderate./O", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect term extraction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We evaluated the supervised method by Xu et al. (2018) , the hybrid method by Wu et al. (2018) and the unsupervised method by He et al. (2017) which we outlined in the previous chapter. All methods evaluated operate on a sentence level, so each sentence is seen as independent.", |
|
"cite_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 54, |
|
"text": "Xu et al. (2018)", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 78, |
|
"end": 94, |
|
"text": "Wu et al. (2018)", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 126, |
|
"end": 142, |
|
"text": "He et al. (2017)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect term extraction", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The supervised method was presented by Xu et al. in the paper \"Double Embeddings and CNN-based Sequence Labeling for Aspect Extraction\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised method: Xu et al. (2018)", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Their main contribution is to use what the authors call \"double embeddings\" as features. Double embeddings are concatenated general and domain-specific word embeddings. The general embeddings are trained on a huge, general dataset, the domainspecific embeddings on a dataset matching the target domain as exactly as possible. Labeled review sentences represented by these embeddings are used to train a relatively simple convolutional neural network.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised method: Xu et al. (2018)", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "This method requires the creation of domain-specific word vectors in the Review Vectorizer. We used this method without any changes. Besides in preprocessing, no changes were required to use this method for Finnish.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised method: Xu et al. (2018)", |
|
"sec_num": "3.2.1" |
|
}, |
|
{ |
|
"text": "Wu et al. presented the hybrid method we are evaluating in the paper \"A hybrid unsupervised method for aspect term and opinion target extraction\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid method: Wu et al. (2018)", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "The basic idea is to create training data for a deep-learning classifier by using some linguistic rules to create possible candidates, which are then filtered according to their domain correlation. The trained neural network is used to improve the domain correlation filter, which results in better training data for the next iteration, and so on. We evaluate the system performance either using the filtered candidates as the prediction or using the neural network to predict the tags.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid method: Wu et al. (2018)", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "While we tried to implement the model following the description in the paper as closely as possible, we had to make some adjustments in the selection of initial aspect candidates and the domain correlation filtering. The general architecture and the classifier remain unchanged. English Since the original paper doesn't describe the initial creation of candidates to the last detail and we slightly diverged from it, we are presenting the full process we implemented in the following paragraphs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid method: Wu et al. (2018)", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "First, for each sentence a parse tree like the one displayed in figure 3 is created using the NTLK (Bird et al., 2009) interface to the Stanford CoreNLP phrase constituency parser . All subtrees with \"NP\" (noun phrase) as the root node that have a height of 3 or 4 are extracted from the tree. A height of 3 means the level directly over part-of-speech (POS) tags and manages to capture mainly simple phrases like those consisting of just a noun; a NP with a height of 4 could for example be two nouns connected by a conjunction.", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 118, |
|
"text": "(Bird et al., 2009)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid method: Wu et al. (2018)", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "The noun phrases are filtered to only keep those which either include an adjective, adverb or a modal verb themselves or have a verbal phrase which includes one of these parts of speech as their right neighbor. Additionally, noun phrases that have a verb in base form as their left neighbour are kept, this is meant to capture phrases like \"try the sushi\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid method: Wu et al. (2018)", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Next, we remove overlapping phrases, which can exist because we initially picked phrases of both height 3 and 4. This is done as follows: The tree of height 4 is discarded if all of its NP-subtrees are also included in the set of trees eligible at this point. If that is not the case, the subtrees of height 3 are discarded.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid method: Wu et al. (2018)", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "From the remaining phrases, we remove all words that are not nouns of some kind or connectors. If connectors are at the beginning or the end of a phrase, they are, as the next step, also removed. The remaining words are seen as the final aspect term candidates and passed to the domain correlation filter.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid method: Wu et al. (2018)", |
|
"sec_num": "3.2.2" |
|
}, |
|
{ |
|
"text": "Finnish For Finnish, we had to take a different approach to extracting initial candidates, since no phrase constituency parser is available for that language. However, a good dependency parser, created by TurkuNLP group (Kanerva et al., 2018) exists, which yields POS-tags and depencies between words as shown in figure 4. That allowed us to extract candidates in the following way:", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 242, |
|
"text": "(Kanerva et al., 2018)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 4: Dependency parse with TurkuNLP", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We first extract all nouns that either are modified by an adjective directly or had a copula verb relation to any word. An example for a sentence with a copula verb relation is \"palvelu oli upea\" (\"the service was great\"), where \"oli\" (\"was\") is the copula verb. In a second step, if another noun is either part of a compound with a noun chosen in the first step or a nominal modifier of such, this other noun is added to the term. The resulting noun phrases are the final aspect candidates.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 4: Dependency parse with TurkuNLP", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Different than English, Finnish is a language with extremely many variations of each word. Because of that, we experimented with doing domain correlation filtering on lemmas instead of the word forms: all forms of a lemma should have the same domain correlation. The lemmas are created as part of the TurkuNLP parsing pipeline. As it is not guaranteed that a meaningful word embedding exists for a lemma, we represent a lemma by the embedding of its most frequent form. We always used lemmas to create the set of \"domain words\" against which every other word is compared in order to decide on its domain correlation. For the sentences used to train the classifier, we did experiments both with and without lemmatizing each word in them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 4: Dependency parse with TurkuNLP", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Besides that, the architecture is the same as for English and as described in the paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 4: Dependency parse with TurkuNLP", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The unsupervised method was presented in the paper \"An Unsupervised Neural Attention Model for Aspect Extraction\" by He et al.. This method does not train any classifier, but instead tries to compute representations for a set of topics 1 , in the same vector space as the word embeddings. These topics are not predefined, but the number of topics is a fixed hyperparameter. The topic embeddings can be interpreted by looking at the closest words around them, which should be a set of semantically related words. They are learned by first determining a sentence representation using an attention model to determine the weight of each word in it and then reducing the error of recreating this sentence from the topic embeddings. As the model extracts the words that are most important both for a topic and in a sentence, this can be used to extract aspect words as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised method: He et al. (2017)", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "We used the method basically as suggested in the original paper. It requires lemmatization, stopword removal and part-of-speech tagging as additional preprocessing steps.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised method: He et al. (2017)", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "The focus of the paper is on forming coherent topic clusters from words and not on extracting aspect terms in our sense. The clusters presented in their paper therefore contain also many words that are not aspect terms in the sense desired for this work. However, since the goal of the attention model is to put focus on words that have a high importance both towards the sentence and towards the aspect, the vectors representing the weight of each word in a sentence create a good basis to extract aspect terms from them. We did so by simply using all nouns whose weight is over a specified threshold as aspect terms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised method: He et al. (2017)", |
|
"sec_num": "3.2.3" |
|
}, |
|
{ |
|
"text": "To put the performance of the three models in relation, the following simple baseline values are given:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline values", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "\u2022 Taking all nouns as aspect terms; if multiple nouns follow in a row, the later nouns get an I tag.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline values", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "\u2022 Using the aspect term candidates extracted for the hybrid method as described in section 3.2.2 directly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline values", |
|
"sec_num": "3.2.4" |
|
}, |
|
{ |
|
"text": "As evaluation metrics for the aspect term extractor, precision, recall and F1-value are computed by comparing the output of a classifier with labeled data. A correctly identified aspect term is seen as a correct match, if it is not correctly identified it's a false one. This means that correctly set O tags do not increase the precision or recall. This method is described for example in Tjong Kim Sang and Buchholz (2000) . For example, if one of the methods would return the following tag sequence:", |
|
"cite_spans": [ |
|
{ |
|
"start": 389, |
|
"end": 423, |
|
"text": "Tjong Kim Sang and Buchholz (2000)", |
|
"ref_id": "BIBREF51" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "B O B I O O O B", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "and the ground truth is the following sequence:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "B O B O O B B O", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "the precision would be 0.333, since only one of three detected matches is correct; the recall would be 0.250, since only one of four actual matches is found. The F1 score would be 0.286, as it is the harmonic mean between precision and recall. As seen in the example, only full matches are seen as correct, partial matches are treated the same as wrong matches.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.2.5" |
|
}, |
|
{ |
|
"text": "This section describes the \"Embedding Aligner\" component. The goal of training multilingual word embeddings is to create embeddings for words from multiple languages in the same vector space. These embeddings should have the same properties across languages as embeddings for one language, i.e. similar words should appear closely together in the vector space.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Multilingual Embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To obtain multilingual embeddings, we use pretrained monolingual embeddings and use the method described by . It works as follows: First, a linear mapping between the vectors of the words that are in the training dictionary is learned. The mapping is optimized by minimizing the average of the loss between the mapped vectors of the source language and the vectors of the target language. The used loss function is based on the cosine similarity between the two vectors and symmetrically the average cosine similarity between one vector and the k nearest neighbours of the other vector (with k being a hyperparameter). The mapping is restrained to be orthogonal, which leads to the distances between the vectors being preserved from the original monolingual embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Multilingual Embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We used the reference implementation provided by the authors completely unchanged, also using their recommended hyperparameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Multilingual Embeddings", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In this section, we describe the components \"Aspect Term Vectorizer\" and \"Clusterer\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "The goal of the clustering task is to create groups of words that are semantically coherent, i.e. describe the same topic. We are evaluating three different clustering methods: k-means (in the k-means++ variant (Kanungo et al., 2004) ), Affinity Propagation (Frey and Dueck, 2007) and the attention-based method by He et al. described in section 3.2.3. K-means and Affinity Propagation are widely used general clustering algorithms that have been successfully used for the clustering of word embeddings (e.g. Kutuzov (2018) ; Cha et al. 2017; Su\u00e1rez-Paniagua et al. 2015), while the attention-based method was specifically developed for our target task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 211, |
|
"end": 233, |
|
"text": "(Kanungo et al., 2004)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 258, |
|
"end": 280, |
|
"text": "(Frey and Dueck, 2007)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 509, |
|
"end": 523, |
|
"text": "Kutuzov (2018)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We first vectorize the aspect terms, using either the multilingual embeddings obtained from the aligner or the monolingual embeddings directly. Then we try the different clustering methods and ways of obtaining multilingual clusters and use the best performing one to create the desired multilingual clusters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Term Clustering", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "The k-means algorithm is based on determining k centroid points, each of which defines a cluster as the points that are closer to it than to any other centroid. The distance used is the euclidean distance. The centroids are initialized randomly and then in each iteration chosen as the mean of the points in the centroid's cluster. After updating the centroids, the assignments of points to clusters are recomputed. This procedure is repeated until updating the centroids no longer leads to changes in the clustering, i.e. until the algorithm converged. The k-means++ variant we used differs from original k-means in the initialization of centroids, which is optimized for faster convergence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "K-means", |
|
"sec_num": "3.4.1" |
|
}, |
|
{ |
|
"text": "Affinity propagation is an algorithm that does not require specifying the number of clusters. It uses the concept of passing messages between points in order to determine which points are chosen as exemplar points. Each non-exemplar point is assigned to exactly one exemplar point and all points that belong to the same exemplar form a cluster.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Affinity propagation", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "The algorithm starts with considering all points as possible exemplars. In each iteration, messages are passed between points in two steps, responsibility and availability. Responsibility values are sent towards candidate exemplar points, indicating how likely a point considers the candidate to be its exemplar. Availability values are the reverse, being sent from the candidate exemplars towards other datapoints and reflecting how suitable the exemplar would be as an exemplar for a point. Responsibility is calculated using the distance between the two points and takes both availability of the previous iteration and distance towards other possible exemplars for the point into account. This results in the responsibility value being lowered if there are many other good exemplar candidates. Availability values are calculated from the responsibility of an exemplar candidate with itself and with other points. This results in a higher value if many other points see the candidate as suitable to be an exemplar.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Affinity propagation", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "The algorithm terminates either after a set number of iterations or when the number of clusters hasn't changed for some iterations. After termination, for each point the exemplar candidate with the highest value for summed availability and responsibility is chosen as its exemplar. If this candidate is the point itself, that point is an exemplar.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Affinity propagation", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "There are two main hyper-parameters for this algorithm to tune: The damping factor is used to determine the extent to which responsibility and availability values are updated over iterations, i.e. how big the impact of the value in the previous iteration is. A higher damping value indicates a higher weight to the previous value. The second hyper-parameter is the preference, which indicates how likely each point is chosen to be a exemplar. A higher preference value correlates with more clusters being created.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Affinity propagation", |
|
"sec_num": "3.4.2" |
|
}, |
|
{ |
|
"text": "The method for attention-based clustering has already been described in section 3.2.3. Each topic embedding defines a cluster, with each point being assigned to the topic embedding closest to it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Attention based clustering", |
|
"sec_num": "3.4.3" |
|
}, |
|
{ |
|
"text": "All of the clustering methods are done on word embeddings, assigning each embedding of an aspect term to a cluster. This works well for single-word aspect terms, since their embeddings are either directly in the embedding set or can be inferred from subword information (Bojanowski et al., 2017) . For aspects terms consisting of more than one word, this is not possible. While in theory it is possible to train embeddings for n-grams (Zhou et al., 2017) , this would require training word embeddings specificially for our dataset and wouldn't allow us to use pretrained embeddings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 270, |
|
"end": 295, |
|
"text": "(Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 454, |
|
"text": "(Zhou et al., 2017)", |
|
"ref_id": "BIBREF57" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Assigning multi-word terms to a cluster", |
|
"sec_num": "3.4.4" |
|
}, |
|
{ |
|
"text": "Therefore, we use the following approach: We train the clusterings on only singleword terms. Then, we check for each word in the multi-word term which cluster it would be assigned to. The full multi-word term is assigned to the cluster most of the words in it are assigned to. In case there isn't one cluster assigned more often than all others, we assign one of the most frequent clusters randomly for affinity propagation. For k-means and the attention-based method, we use the distances between the words in the term and the centroid (resp. topic embedding) of the cluster as a tie-breaker; the cluster with the lowest distance gets assigned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Assigning multi-word terms to a cluster", |
|
"sec_num": "3.4.4" |
|
}, |
|
{ |
|
"text": "To merge mono-lingual clusters of the different languages to multilingual ones, we used the bilingual dictionary also used for creating the multilingual clusters. For each cluster in the source language we checked in which clusters the translations of the words in it are in the clustering of the target language. The cluster gets merged with the cluster containing most translations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Merging monolingual to multilingual clusters", |
|
"sec_num": "3.4.5" |
|
}, |
|
{ |
|
"text": "Clusterings are evaluated against a pre-defined clustering. While this is in some ways slightly against the original purpose of dynamically creating topic clusters without pre-defining the set of topics, it appears to be the only way of providing an objective evaluation. In order to maintain the sense of dynamic clustering, we are mainly interested in seeing if clusters contain only terms belonging to one topic and not so much if there are clusters that could maybe be merged. To give an example, we would like to penalize if bartender and salmon steak are in the same cluster, since they very clearly do not belong to the same topic. We do not care much though if salmon steak and beef tenderloin are in the same cluster or not, since this is just a matter of how fine-grained the topic clustering is.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.4.6" |
|
}, |
|
{ |
|
"text": "In order to meet this evaluation goal, we only define very few, broad clusters to evaluate against and see the homogeneity score (Rosenberg and Hirschberg, 2007) as our primary evaluation metric. Homogeneity is maximized when all clusters contain only elements from one ground-truth class, with 1 being the maximum and 0 the minimum value. Homogeneity strongly prefers fine-grained clustering over coarse grained ones; in the most extreme case, if a clustering would contain one cluster for each datapoint, homogeneity would be maximised. We therefore don't accept too finegrained clusterings and also report the complementing score, completeness, which is maximized when all ground-truth classes contain only elements of one cluster.", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 161, |
|
"text": "(Rosenberg and Hirschberg, 2007)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.4.6" |
|
}, |
|
{ |
|
"text": "This evaluation method is based on the way He et al. 2017are evaluating their results. The main difference is that they manually assign clusters to ground-truth classes, which we avoid.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3.4.6" |
|
}, |
|
{ |
|
"text": "All code is written in Python 3. We use PyTorch (Paszke et al., 2017) as the framework for all deep learning methods except for the unsupervised aspect extraction method, which uses TensorFlow (Abadi et al., 2016) . The clustering methods use the implementations from the Scikit-learn framework (Pedregosa et al., 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 48, |
|
"end": 69, |
|
"text": "(Paszke et al., 2017)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 193, |
|
"end": 213, |
|
"text": "(Abadi et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 295, |
|
"end": 319, |
|
"text": "(Pedregosa et al., 2011)", |
|
"ref_id": "BIBREF36" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We implemented the crawling, preprocessing and vectorization components ourselves. For the other components, we used existing implementations of the tested methods as the base when they were available and extended and adjusted them to fit into our architecture. The only method completely implemented from scratch is the hybrid aspect extraction method, as no reference implementation has been published for it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In this section we present the experiments done in the Aspect Term Extractor, which are aimed at finding the best method of extracting aspect terms from review sentences.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect Term Extraction", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "This subsection describes the datasets used for the experiments. Which data was used for which experiment is explained in detail in the subsections for each method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "English For English, we used SemEval 2016 Task 5 (Pontiki et al., 2016) as the annotated dataset. This dataset consists of 2674 sentences, of which 2000 are considered training and 674 testing data. Some of these sentences are marked as \"out of scope\" in the dataset and not annotated, so these were removed here. 2579 sentences remain. For the hybrid and unsupervised methods, an additional corpus of 75000 restaurant reviews, which consist of 368551 sentences, was used. These reviews are a random selection of reviews provided by TrustYou GmbH 2 , which is a company focusing on review management for hotels and restaurants . The reviews were collected from different public sources, including TripAdvisor, Google, OpenTable, Facebook and Zomato.", |
|
"cite_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 71, |
|
"text": "(Pontiki et al., 2016)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "We use the pretrained word embeddings provided by the GloVe project (Pennington et al., 2014) , as this embedding set was used also in the original experiments for the supervised method (Xu et al., 2018) . It was trained on the CommonCrawl corpus, a general-purpose text corpus that includes text from several billion web pages; the GloVe embeddings were trained on 840 billion tokens. The GloVe set includes embeddings for 2.2 million words, the embeddings have 300 dimensions. As domainspecific embeddings for the supervised method, we use the embedding set provided by the authors, which is 100-dimensional and was trained with FastText (Bojanowski et al., 2017 ) on a dataset provided by Yelp.", |
|
"cite_spans": [ |
|
{ |
|
"start": 88, |
|
"end": 93, |
|
"text": "2014)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 203, |
|
"text": "(Xu et al., 2018)", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 640, |
|
"end": 664, |
|
"text": "(Bojanowski et al., 2017", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "Finnish For Finnish, it was more difficult to obtain a sizable corpus of restaurant reviews. We ended up crawling the page eat.fi, a website for reviews of restaurants in Finland. After filtering out all reviews written in a language different than Finnish with langid.py (Lui and Baldwin, 2012) , the obtained dataset consists of 71730 reviews, or 346144 sentences. 250 of these reviews, consisting of 1076 sentences, were labelled manually by the author. A subset of 70 reviews was additionally labelled by a native speaker; no major discrepancies in annotation were discovered. As general word embeddings, we use the Finnish word embeddings provided by FastText , which are also 300 dimensional and were trained on both CommonCrawl and Wikipedia data, together about 6 billion tokens. The provided dataset contains embeddings for exactly 2 million words, but also includes sub-word information that allows inferring embeddings for unknown words (Bojanowski et al., 2017) . We trained domain-specific embeddings ourselves with FastText on the full dataset of restaurant reviews. We used the default parameters of FastText to train 100-dimensional vectors. Table 1 shows the baseline values for the aspect extraction task. For both languages, these values were computed on the complete annotated datasets, consisting of 2579 sentences for English and 1076 sentences for Finnish. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 272, |
|
"end": 295, |
|
"text": "(Lui and Baldwin, 2012)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 948, |
|
"end": 973, |
|
"text": "(Bojanowski et al., 2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1158, |
|
"end": 1165, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "Datasets For the supervised method, the annotated data for Finnish was split to use 80% of the data for training and 20% for testing. This amounts to 216 testing and 860 training sentences. 128 of the training sentences were held out for choosing the best model and optimizing hyperparameters. For English, we used the SemEval 2016 Task 4 (Pontiki et al., 2016) dataset as suggested. After filtering \"out-of-scope\" sentences, that's 642 sentences for testing and 1937 for training. 150 training sentences were used for optimization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 339, |
|
"end": 361, |
|
"text": "(Pontiki et al., 2016)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "English We attempted to recreate the English results from the paper (which uses the same dataset), but ended up with slightly worse values: With exactly the same hyperparameters, the model got an F1-Value of 0.724 (average over 5 runs), compared to the 0.747 reported in the paper. It is however to note that the performance deviation between runs is relatively high, with values ranging from 0.713 to 0.731 in the 5 runs. The best of the 5 runs had a precision of 0.674 and recall of 0.802. These values are all created with the evaluation tool provided by SemEval, which calculates slightly different values than our evaluation tool. With our evaluation script, the F1 value of the best run is 0.730, the averaged one 0.722. Since the difference between the values is very small and a detailed analysis of the differences is made difficult by the SemEval tool not being Open Source, we omit a further investigation. All other values reported in this paper are created with our evaluation script.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "Finnish For Finnish, we tested different learning rates and dropout values. The results are displayed in table 2. All values are the average of three independent runs. The other hyperparameters were kept the same as in the paper. The experiments show that the dropout rate has very little influence on the result. The learning rate however has a significant influence, with performance generally increasing with bigger learning rates, despite the high number of training iterations (200). In all experiments, recall was at least slightly higher than precision. The result for a dropout of 70 and a learning rate of 10 \u22125 sticks out as the system in this case learned to always predict the label O. An explanation for this result could be the choice of the loss function: The negative log-likelihood is calculated for every possible target label, including O. With O being, naturally for this task, the by far most frequent label, a slight bias towards choosing it can be expected. This is however contrary to our evaluation method, for which always predicting O is the worst possible result. It is unclear why this happens only for this specific combination of parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "For the hybrid method, we trained the model with the full dataset (annotated and unannotated) for both English and Finnish and evaluated it on the annotated dataset. For English, we additionally did experiments where we used only the annotated dataset for both training and evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "We used a mini-batch size of 64 for all experiments. This value is not given in the original paper, as well as the learning rate. The latter we optimized as explained in the following subsection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hybrid", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "English For English, we first did some experiments to determine good hyper-parameters using only the annotated data for training. Training for six iterations (updating the domain correlation filter and thereby the training data after each iteration) and ten epochs per iteration, we optimized separately the learning rate and the minimum correlation required to pass the correlation filter. All other hyper-parameters were kept as reported in the paper. Results for different learning rates can be found in table 3; we used 0.50 as the minimum correlation here. Table 4 shows results for different minimum correlation values, with the learning rate set to the best value found, 0.001. The columns in the section \"Classifier\" mean the performance of the trained classifier, the columns in the \"Filter\" section mean the performance when using the filtered aspect candidates as the prediction. The experiments show that the influence of the learning rate is again relatively big, similarly to the supervised method. On the other hand, changing the minimum correlation value to pass the filter has a quite low influence. Using a lower minimum correlation value slightly increases the recall and the F1 value, as the precision stays about constant.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 562, |
|
"end": 569, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hybrid", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "Using the best values of these two experiments, we also used the full dataset, including both annotated and unannotated data, for training. We set the minimum frequency to be included into the set of domain words to 75. The result barely changed compared to training on only the annotated data: The classifier's precision slightly increased to 0.321, however recall fell to 0.626, resulting in an unchanged F1 value of 0.424. For using the output after the filtering step, the precision rose to 0.580, but with a significantly lower recall of 0.183, the F1 value dropped to 0.278.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Classifier Filter", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Finnish Since the amount of annotated data available is significantly lower for Finnish than for English, we for Finnish only ran experiments using the full dataset, both annotated and unannotated, for training. Using the same hyperparameters as for training on the full English dataset, we got the results for Finnish shown in table 5. Table 5 : Results for Finnish with and without lemmatization", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 337, |
|
"end": 344, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classifier Filter", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For the unsupervised, attention-based method we did most tests using the full dataset, containing both annotated and unannotated data, for training and evaluated the performance on the complete annotated dataset. We kept all hyper-parameters as in the paper. We tested the influence of the numbers of created clusters on the performance, which turned out to be negligible, so we kept it at 14 (which is the number used in the paper).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised", |
|
"sec_num": "4.2.5" |
|
}, |
|
{ |
|
"text": "For our modification of the method to obtain aspect terms, we had to introduce an additional hyper-parameter, which is the minimum weight of a word to be used as an aspect term. The performance for different values, both for English and Finnish, can be seen in table 6. This shows a precision/ recall trade-off: The lower the minimum weight, the higher the recall but the lower the precision. This result proves that the attention of a word generally is correlated to the likelihood of it being a aspect term. However, since recall decreases stronger than precision increases, a lower minimum weight leads generally to a higher F1 score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised", |
|
"sec_num": "4.2.5" |
|
}, |
|
{ |
|
"text": "For English, we additionally tested the performance when training and testing on only the annotated dataset. The F1 value was for all weight-cutoffs two to three percentage points lower than when using the full dataset. We see that the supervised method works best with a significant margin. The hybrid and unsupervised methods are at about the level of the rule-based baseline. Results for Finnish and English are comparable, with slightly better results for English with the supervised method and for Finnish with the other methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Finnish", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this section, we present the experiments evaluating the different ways of clustering and of creating multilingual clusters. This concerns primarily the \"Clusterer\" component, with additionally the \"Embedding Aligner\" playing a role in the experiments with multilingual embeddings.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multilingual embeddings and clusterings", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We use mainly the same datasets as for the aspect extraction task. The English labeled data from SemEval already contains category information, assigning each aspect term one of the classes ambiance, drinks, food, location, restaurant and service. The restaurant category is used for terms describing the restaurant in general and such that don't match one of the other categories. For the Finnish labeled data, we manually assigned each unique aspect term to one of these six classes. The English dataset contains 874 unique aspect terms, the Finnish dataset 623.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "Evaluation was done for all experiments with the full labeled datasets. We did experiments both with training the clusters on only the labeled datasets and with training them on the 5000 most frequent single-word aspect terms extracted by the best performing aspect extraction method from the full datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "For both English and Finnish, we use as word embeddings the pretrained FastText embeddings, which were trained on CommonCrawl and Wikipedia data and include subword information. For Finnish, this is the same embedding set used as for the aspect extraction task, for English, it is different. The reason for this is that we wanted to have embeddings trained in the same way for both languages, since we assumed that this would improve performance for the creation of multilingual embeddings from them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "Both for creating multilingual word embeddings and for clustering we only worked with the embeddings of words actually required. This includes \u2022 all unique aspect terms,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "\u2022 the full vocabulary of our datasets, preprocessed as for the attention-based aspect extraction and clustering method (which is lemmatized and reduced to only include words that appear at least two times in the corpus),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "\u2022 words from the evaluation datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "In total, this results in 25808 words for English and 28327 words for Finnish. We did this mainly because the script to create multilingual embeddings is very memoryintensive and was not possible to run with the full embedding sets on our machines. Also, this procedure allowed us to utilize the sub-word information of the FastText embeddings and create embeddings for all words in our vocabulary, also such that are not part of the pretrained set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "We used the default parameters for training multilingual embeddings and ran the training for 25 iterations. We tested if there is a performance difference between aligning English embeddings to the Finnish embedding space or the other way round. The performance was slightly better when treating English as the target embedding space and aligning the Finnish embeddings into it, so we went with this direction. Figure 5 shows homogeneity and completeness scores for English and Finnish monolingual clusterings, as well as for multilingual clusterings, either based on multilingual embeddings or on merged clusters. As expected, for all of these measurements homogeneity values increased with an increasing number of clusters, up to around 0.60 for Finnish and 0.55 for English. Completeness values stayed more or less constant at a low value of around 0.2.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 411, |
|
"end": 419, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Multilingual embeddings", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "Using multi-lingual embeddings results in homogeneity scores up to 0.53, the best score when merging monolingual clusters is about 0.46. Table 8 : Homogeneity values for clusters trained on either the full or only the the annotated dataset Table 8 shows homogeneity values for cluster size 29, which seemed like a good trade-off between a not too large number of clusters and a good homogeneity score. It shows in comparison the results for training clusters on only aspect terms from the annotated dataset and on also using the terms extracted by the supervised algorithm from the full dataset. The difference is between 2 and 13 percentage points, with the smallest difference for the monolingual English clusters and the biggest difference for monolingual Finnish clusters. This performance trend is also valid for other cluster sizes. For English, clusters trained on the full dataset work sometimes even slightly, up to 3 percentage points, better than those trained on only the annotated dataset. For Finnish, the performance is always at least 5 percentage points lower.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 144, |
|
"text": "Table 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 247, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Clustering", |
|
"sec_num": "4.3.3" |
|
}, |
|
{ |
|
"text": "Affinity Propagation Initial experiments showed that the damping factor had nearly no influence on the resulting performance, so we set it to 0.9, the value suggested by the authors of the original paper (Rosenberg and Hirschberg, 2007) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 236, |
|
"text": "(Rosenberg and Hirschberg, 2007)", |
|
"ref_id": "BIBREF44" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clustering", |
|
"sec_num": "4.3.3" |
|
}, |
|
{ |
|
"text": "The preference value however does have a very significant influence on the number of clusters created and therefore on our performance measurements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clustering", |
|
"sec_num": "4.3.3" |
|
}, |
|
{ |
|
"text": "We tested preference values from -42 to -6 in steps of 3, after that in steps of 1 until 0. We discarded any clustering with more than 30 clusters. Table 9 shows the best homogeneity performance for each experiment setup, together with the number of clusters created and the chosen preference value. The experiments show that the number of clusters created with the same preference value is strongly dependent on the amount of data. Comparing the optimal preference values for the experiments with multilingual embeddings, which contain about twice the amount of data, to the other experiments, we see that the required preference value to get a similar number of clusters is also about twice as small.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 148, |
|
"end": 155, |
|
"text": "Table 9", |
|
"ref_id": "TABREF12" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Clustering", |
|
"sec_num": "4.3.3" |
|
}, |
|
{ |
|
"text": "Another thing to notice is that using the full dataset for training increases the performance of the clustering for English and the merged clusters; it is to notice however that the number of clusters with the full data is slightly larger for these experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotated data only Full dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Also it can be seen that merging monolingual clusters works worse than using multilingual embeddings when training on only the annotated dataset, but slightly better when using the full dataset. Completeness scores are about constant for all experiments at values around 0.15.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotated data only Full dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Attention Table 10 shows results for clustering with the attention model from He et al. for different numbers of clusters. We ran tests for 14, 28 and 42 different clusters, 14 being the number chosen in the original paper. All other hyper-parameters we set to the best results from the aspect extraction experiments, see section 4.2.5 for details. We created the topic clusters from the complete review dataset. Table 10 : Homogeneity values for clustering with the attention-based model, using different numbers of clusters Different to the other methods, homogeneity scores don't generally increase with more clusters here; the results for 42 clusters are significantly worse than for 14 or 28 clusters across all setups. 28 clusters work best for English and merged clusters, for Finnish and when using multilingual embeddings 14 clusters work better. Using multilingual embeddings results in significantly worse values than merging clusters, English performs better than Finnish.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 10, |
|
"end": 18, |
|
"text": "Table 10", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 413, |
|
"end": 421, |
|
"text": "Table 10", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Annotated data only Full dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Comparison For all experiments we see that clustering with k-means works, for a similar amount of clusters, better than the other methods. The difference to using affinity propagation is relatively small, the attention based method works a lot worse. This means that the simplest and fastest method works best in our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotated data only Full dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For creating multilingual clusters, we see better results when using multilingual embeddings compared to creating monolingual clusters and merging them. Clustering with multilingual embeddings achieves nearly the same performance values as monolingual clusterings. We see that the two languages have generally similar performance in the monolingual experiments, with, depending on the setup, one or the other language performing slightly better. For English and when merging clusters, clustering on the full, automatically extracted set of aspect terms results in about the same performance as clustering on only the manually annotated terms. For Finnish and when using multilingual embeddings, using the full dataset yields worse results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Annotated data only Full dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the section about aspect term extraction, we showed that the supervised deep learning method is beating the performance of methods that don't require annotated data by a relatively big margin. This can partly be explained by the nature of the other methods we used. The hybrid method is mainly based on filtering candidates from the rule-based system. However, as we achieve recall values of only about 0.55 with the rule-based extraction, it doesn't appear like focusing on removing candidates from this set is the right approach to increase results. On the other hand, for using the filtered rule-based output to train a classifier, the precision isn't good enough, as can be seen with the best performance value for the English hybrid model, which achieves relatively high recall of 0.73 but a precision of only 0.3. For the unsupervised model, reasons are similar: Since the model gives a weight to every word in the sentence, not just those that would possibly be aspect terms in our sense, we had to add some simple rules to get aspect terms comparable to the other methods. While these rules on their own achieve a recall of about 80% (and a very low precision), the method can under no circumstances find aspect terms that don't match these rules. While the attention model does appear to be meaningful in some way, the precision gains from filtering candidates is lower than the loss of recall, which results in a relatively poor overall performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect Term Extraction", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The supervised model however achieves a good F1 score of about 0.7 both for English and Finnish. This is especially remarkable with the strict criterion of only treating exact matches as correct. There have been shared tasks (e.g. Wojatzki et al. (2017) ) that also counted aspect terms as correct when they were only overlapping with a ground truth term. A subjective look at the terms extracted by the model appears to confirm that the percentage of matches that a human would consider \"okay\" is significantly above 70%. Worth noting is also that the performance is about equal for English and Finnish, despite the very different language structure of English and Finnish and the significantly lower amount of training data for Finnish.", |
|
"cite_spans": [ |
|
{ |
|
"start": 231, |
|
"end": 253, |
|
"text": "Wojatzki et al. (2017)", |
|
"ref_id": "BIBREF53" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect Term Extraction", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We showed that clustering with k-means yields better results than the other methods. The reason for the attention based method to work badly is most likely that the topic centers it creates are not generally meaningful for the task we evaluate. This is due to this method creating clusters on the full reviews, not only on the extracted aspect terms. While in theory the attention mechanism should put weight only on the words representing aspects, this doesn't appear to always work well in practice. Looking at the topic clusters the method created on the full dataset in the best performing experiment (English, 28 clusters), we find for example one cluster containing mainly first names and one cluster containing predominantly positive adverbs. While there also are some clusters that look very good, like one containing mostly pasta dishes, these cannot save the overall bad performance of the method when clustering aspect terms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clustering and multilingual embeddings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "It is not clear why affinity propagation gives worse results than k-means. Previous works generally report better results for the former, also when working with textual data, for example Guan et al. (2011) . However, Kutuzov (2018) , who also clustered word embeddings, reports that the performance for his experiments was sometimes better with k-means and sometimes with affinity propagation, so it seems to be highly dependent on the data used.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 205, |
|
"text": "Guan et al. (2011)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 217, |
|
"end": 231, |
|
"text": "Kutuzov (2018)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clustering and multilingual embeddings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The good performance when creating clusters on the set of terms extracted with the supervised method from the first task further proves its quality.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clustering and multilingual embeddings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We showed that by using multilingual embeddings, we can achieve results similar to monolingual clusterings. Investigating the created clusters in detail, we can find that while many clusters are nicely merged including related terms from both languages, there are also some clusters that only include words from one language. This shows that there is still further room for improvement in the task of creating multilingual embeddings, either by tuning hyper-parameters or by testing or developing new methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Clustering and multilingual embeddings", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "We showed that supervised aspect extraction methods work significantly better than unsupervised ones, which means training data has to be created for each new language to be added. However, we also show that about 1000 annotated sentences are enough to train a well-performing model. It may well be possible to further reduce this number by using active learning or transfer learning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extensibility to other languages", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The method used for training multilingual embeddings was designed to work for more than two languages; its authors show that aligning 28 languages into the same vector space still works well on their evaluation tasks . However, since we noticed several clusters that only contained embeddings from one language, the performance on our clustering task is likely to reduce by some extent. Besides that, clustering would work the same as for two languages though. If clustering with k-means, the number of clusters could just be kept constant, for affinity propagation, the preference value would have to be adjusted for the additional data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extensibility to other languages", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "To summarize, our method is extensible, but some manual work to integrate another language would be required.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Extensibility to other languages", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "In our work, we presented a framework to extract aspect terms from monolingual reviews and cluster them to multilingual topics. We showed that for aspect term extraction, the supervised method we tested worked significantly better than the hybrid and unsupervised methods, which did not manage to exceed the performance of our baselines. We showed that for the supervised method, performance for English and Finnish is about equal, without any language-specific adjustments made.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion 6.1 Summary", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "For the clustering subtask, the best performing method was the simplest one we tested, k-means. We showed that when using multilingual embeddings, the performance of the clustering is just slightly worse compared to clustering only Finnish or only English terms monolingually. We also showed that the results when clustering on only annotated aspect terms are only slightly better than when clustering on the set of aspect terms obtained from the supervised aspect extraction method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion 6.1 Summary", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "There are some smaller, technical improvements that could be done to potentially improve the results in our experiments, mainly in the clustering part. While the homogeneity values for clusters created with multilingual embeddings already are close to the performance of monolingual clusters, we still got some clusters that only included words from one language, indicating further potential for improvements of the alignment process. Potential areas for improvements could be extended training dictionaries (for example by using PanLex (Kamholz et al., 2014) ) or a more extensive search for the best hyper-parameters.", |
|
"cite_spans": [ |
|
{ |
|
"start": 538, |
|
"end": 560, |
|
"text": "(Kamholz et al., 2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Technical extensions", |
|
"sec_num": "6.2.1" |
|
}, |
|
{ |
|
"text": "Another point for improvement of the clustering method is the evaluation method we chose. While using the classification categories from the SemEval data provided an objective truth to measure against and focusing on the homogeneity score allowed for finer-grained clusterings, we still can't really say which number of clusters yields the actually best clustering for the task. Determining which is the best clustering is a very subjective task, which points to a more detailed manual annotation of the resulting clusters being required.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Technical extensions", |
|
"sec_num": "6.2.1" |
|
}, |
|
{ |
|
"text": "Other directions of improvement are of larger scale. In the last months and years, progress in the field of deep learning has been very rapid. Many of the new developments could also be applied to the tasks of this paper, especially to the part about aspect term extraction. The supervised model which yielded the best performance is based on a relatively simple and straightforward neural network. It is likely that network architectures better suited for the task exist, but manually trying them is extremely time intensive. Methods like ENAS (Pham et al., 2018) optimize the architecture search, for example by sharing trained parameters between related architectures. The authors claim that using their method is 1000 times less computationally expensive than trying different architectures without optimization. Also other aspects in the suggested model could potentially be further improved, for example variational, learned dropout rates have shown better results than static ones (Molchanov et al., 2017) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 545, |
|
"end": 564, |
|
"text": "(Pham et al., 2018)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 988, |
|
"end": 1012, |
|
"text": "(Molchanov et al., 2017)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conceptual extensions", |
|
"sec_num": "6.2.2" |
|
}, |
|
{ |
|
"text": "For clustering, we found that one of the most simple clustering methods, k-means, performs better than the more sophisticated methods we tried. Clustering with kmeans has several weaknesses, like not being able to handle noise points and expecting non-overlapping clusters of similar sizes. However, especially when clustering the aspect terms extracted automatically, noise points have to be expected in our dataset; also the assumption of similar-sized clusters can not be made, since especially the \"food\" category is far bigger than the others. This indicates that there should be clustering methods that would be better suited for our problem than k-means. There are many additional methods to cluster high-dimensional data that could be tried. An overview is provided in Kriegel et al. (2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 777, |
|
"end": 798, |
|
"text": "Kriegel et al. (2009)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conceptual extensions", |
|
"sec_num": "6.2.2" |
|
}, |
|
{ |
|
"text": "For real-world applications to make use of our work, mainly two changes appear to be necessary: For one, the analysis and summarization of reviews is usually desired on the level of the entity they refer to, in our case the restaurant. This is necessary to provide a basis of comparison between the reviews for the different restaurants. Also, it would probably be required to choose an representative name for each topic cluster, so that for example a sentiment score could be displayed for each topic instead of having to list all the aspect terms in this topic. With these extensions, a system to dynamically detect and summarize the most relevant topics for a restaurant could be built. Our work has hopefully provided the basis for that.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conceptual extensions", |
|
"sec_num": "6.2.2" |
|
}, |
|
{ |
|
"text": "In the paper, the authors are using the term \"aspects\" for what we call \"topics\". We adopted this to our terminology for consistence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "www.trustyou.net", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work is inspired by the project OSCAR \"Opinion Stream Classification with Ensembles and Active Learners\" (funded by the German Research Foundation); Myra", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Tensorflow: A system for large-scale machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Abadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Barham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianmin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhifeng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Davis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthieu", |
|
"middle": [], |
|
"last": "Devin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjay", |
|
"middle": [], |
|
"last": "Ghemawat", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Irving", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Isard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manjunath", |
|
"middle": [], |
|
"last": "Kudlur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Levenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajat", |
|
"middle": [], |
|
"last": "Monga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sherry", |
|
"middle": [], |
|
"last": "Moore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Derek", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Murray", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benoit", |
|
"middle": [], |
|
"last": "Steiner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Tucker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vijay", |
|
"middle": [], |
|
"last": "Vasudevan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pete", |
|
"middle": [], |
|
"last": "Warden", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Wicke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoqiang", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "12th USENIX Symposium on Operating Systems Design and Implementation (OSDI 16)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "265--283", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Abadi, Paul Barham, Jianmin Chen, Zhifeng Chen, Andy Davis, Jef- frey Dean, Matthieu Devin, Sanjay Ghemawat, Geoffrey Irving, Michael Isard, Manjunath Kudlur, Josh Levenberg, Rajat Monga, Sherry Moore, Derek G. Murray, Benoit Steiner, Paul Tucker, Vijay Vasudevan, Pete War- den, Martin Wicke, Yuan Yu, and Xiaoqiang Zheng. 2016. Tensorflow: A system for large-scale machine learning. In 12th USENIX Symposium on Operating Systems Design and Implementation (OSDI 16). pages 265-283. https://www.usenix.org/system/files/conference/osdi16/osdi16-abadi.pdf.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Gromov-wasserstein alignment of word embedding spaces", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Alvarez-Melis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Jaakkola", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Alvarez-Melis and Tommi Jaakkola. 2018. Gromov-wasserstein alignment of word embedding spaces. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, Brus- sels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "An adaptable fine-grained sentiment analysis for summarization of multiple short online reviews", |
|
"authors": [ |
|
{ |
|
"first": "Reinald", |
|
"middle": [], |
|
"last": "Kim Amplayo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Data & Knowledge Engineering", |
|
"volume": "110", |
|
"issue": "", |
|
"pages": "54--67", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.datak.2017.03.009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reinald Kim Amplayo and Min Song. 2017. An adaptable fine-grained sentiment anal- ysis for summarization of multiple short online reviews. Data & Knowledge Engi- neering 110:54-67. https://doi.org/10.1016/j.datak.2017.03.009.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Local consumer review survey", |
|
"authors": [ |
|
{ |
|
"first": "Myles", |
|
"middle": [], |
|
"last": "Anderson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Myles Anderson. 2017. Local consumer review survey 2017. BrightLocal https://www.brightlocal.com/learn/local-consumer-review-survey/.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Learning bilingual word embeddings with (almost) no bilingual data", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "451--462", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1042" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2017. Learning bilingual word embeddings with (almost) no bilingual data. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, Vancouver, Canada, pages 451-462. https://doi.org/10.18653/v1/P17-1042.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Automatic aspect extraction using lexical semantic knowledge in code-mixed context", |
|
"authors": [ |
|
{ |
|
"first": "Kavita", |
|
"middle": [], |
|
"last": "Asnani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jyoti", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Pawar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Knowledge-Based and Intelligent Information & Engineering Systems: Proceedings of the 21st International Conference", |
|
"volume": "112", |
|
"issue": "", |
|
"pages": "693--702", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.procs.2017.08.146" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kavita Asnani and Jyoti D. Pawar. 2017. Automatic aspect extraction using lexical se- mantic knowledge in code-mixed context. Procedia Computer Science 112:693-702. Knowledge-Based and Intelligent Information & Engineering Systems: Proceed- ings of the 21st International Conference, KES-20176-8 September 2017, Marseille, France. https://doi.org/10.1016/j.procs.2017.08.146.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Natural Language Processing with Python", |
|
"authors": [ |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Bird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ewan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Loper", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Steven Bird, Ewan Klein, and Edward Loper. 2009. Natural Language Processing with Python. O'Reilly Media, Inc., 1 edition.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Enriching word vectors with subword information", |
|
"authors": [ |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "5", |
|
"issue": "", |
|
"pages": "135--146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enrich- ing word vectors with subword information. Transactions of the Association for Computational Linguistics 5:135-146. http://aclweb.org/anthology/Q17-1010.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Class-based n-gram models of natural language", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"V" |
|
], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Desouza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Mercer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenifer", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Della Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Comput. Linguist", |
|
"volume": "18", |
|
"issue": "4", |
|
"pages": "467--479", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter F. Brown, Peter V. deSouza, Robert L. Mercer, Vincent J. Della Pietra, and Jenifer C. Lai. 1992. Class-based n-gram models of natural language. Comput. Linguist. 18(4):467-479. http://dl.acm.org/citation.cfm?id=176313.176316.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Language modeling by clustering with word embeddings for text readability assessment", |
|
"authors": [ |
|
{ |
|
"first": "Miriam", |
|
"middle": [], |
|
"last": "Cha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Youngjune", |
|
"middle": [], |
|
"last": "Gwon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Kung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 ACM on Conference on Information and Knowledge Management", |
|
"volume": "17", |
|
"issue": "", |
|
"pages": "2003--2006", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3132847.3133104" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Miriam Cha, Youngjune Gwon, and H. T. Kung. 2017. Language modeling by cluster- ing with word embeddings for text readability assessment. In Proceedings of the 2017 ACM on Conference on Information and Knowledge Management. ACM, New York, NY, USA, CIKM '17, pages 2003-2006. https://doi.org/10.1145/3132847.3133104.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "An unsupervised aspect extraction strategy for monitoring real-time reviews stream", |
|
"authors": [ |
|
{ |
|
"first": "Mauro", |
|
"middle": [], |
|
"last": "Dragoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Federici", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andi", |
|
"middle": [], |
|
"last": "Rexha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Information Processing & Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.ipm.2018.04.010" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mauro Dragoni, Marco Federici, and Andi Rexha. 2018. An unsupervised aspect ex- traction strategy for monitoring real-time reviews stream. Information Processing & Management https://doi.org/10.1016/j.ipm.2018.04.010.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Embedding learning through multilingual concept induction", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Dufter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mengjie", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Schmitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Fraser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1520--1530", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Dufter, Mengjie Zhao, Martin Schmitt, Alexander Fraser, and Hinrich Sch\u00fctze. 2018. Embedding learning through multilingual concept induction. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, Melbourne, Australia, pages 1520-1530. http://aclweb.org/anthology/P18-1141.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Clustering by passing messages between data points", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Brendan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Delbert", |
|
"middle": [], |
|
"last": "Frey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dueck", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Science", |
|
"volume": "315", |
|
"issue": "5814", |
|
"pages": "972--976", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1126/science.1136800" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Brendan J. Frey and Delbert Dueck. 2007. Clustering by passing messages between data points. Science 315(5814):972-976. https://doi.org/10.1126/science.1136800.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Learning Word Vectors for 157 Languages", |
|
"authors": [ |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prakhar", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ";", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Khalid", |
|
"middle": [], |
|
"last": "Choukri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Cieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thierry", |
|
"middle": [], |
|
"last": "Declerck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Goggi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koiti", |
|
"middle": [], |
|
"last": "Hasida", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hitoshi", |
|
"middle": [], |
|
"last": "Isahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bente", |
|
"middle": [], |
|
"last": "Maegaard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Mariani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Edouard Grave, Piotr Bojanowski, Prakhar Gupta, Armand Joulin, and Tomas Mikolov. 2018. Learning Word Vectors for 157 Languages. In Nicoletta Calzo- lari (Conference chair), Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Koiti Hasida, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, H\u00e9l\u00e8ne Mazo, Asuncion Moreno, Jan Odijk, Stelios Piperidis, and Takenobu Tokunaga, ed- itors, Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018). European Language Resources Association (ELRA), Miyazaki, Japan.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Text clustering with seeds affinity propagation", |
|
"authors": [ |
|
{ |
|
"first": "Renchu", |
|
"middle": [], |
|
"last": "Guan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohu", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maurizio", |
|
"middle": [], |
|
"last": "Marchese", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanchun", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "IEEE Trans. on Knowl. and Data Eng", |
|
"volume": "23", |
|
"issue": "4", |
|
"pages": "627--637", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TKDE.2010.144" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Renchu Guan, Xiaohu Shi, Maurizio Marchese, Chen Yang, and Yanchun Liang. 2011. Text clustering with seeds affinity propagation. IEEE Trans. on Knowl. and Data Eng. 23(4):627-637. https://doi.org/10.1109/TKDE.2010.144.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "An unsupervised neural attention model for aspect extraction", |
|
"authors": [ |
|
{ |
|
"first": "Ruidan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Wee Sun Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dahlmeier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "388--397", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1036" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ruidan He, Wee Sun Lee, Hwee Tou Ng, and Daniel Dahlmeier. 2017. An unsuper- vised neural attention model for aspect extraction. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers). Association for Computational Linguistics, Vancouver, Canada, pages 388- 397. https://doi.org/10.18653/v1/P17-1036.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Non-adversarial unsupervised word translation", |
|
"authors": [ |
|
{ |
|
"first": "Yedid", |
|
"middle": [], |
|
"last": "Hoshen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lior", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yedid Hoshen and Lior Wolf. 2018. Non-adversarial unsupervised word translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, Brussels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Loss in translation: Learning bilingual word mapping with a retrieval criterion", |
|
"authors": [ |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piotr", |
|
"middle": [], |
|
"last": "Bojanowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Herv\u00e9", |
|
"middle": [], |
|
"last": "J\u00e9gou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Armand Joulin, Piotr Bojanowski, Tomas Mikolov, Herv\u00e9 J\u00e9gou, and Edouard Grave. 2018. Loss in translation: Learning bilingual word mapping with a retrieval crite- rion. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics, Brussels, Belgium.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Panlex: Building a resource for panlingual lexical translation", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Kamholz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Pool", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Susan", |
|
"middle": [], |
|
"last": "Colowick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Kamholz, Jonathan Pool, and Susan Colowick. 2014. Panlex: Build- ing a resource for panlingual lexical translation. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC-", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Turku neural parser pipeline: An end-to-end system for the CoNLL 2018 shared task", |
|
"authors": [ |
|
{ |
|
"first": "Jenna", |
|
"middle": [], |
|
"last": "Kanerva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Filip", |
|
"middle": [], |
|
"last": "Ginter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niko", |
|
"middle": [], |
|
"last": "Miekka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Akseli", |
|
"middle": [], |
|
"last": "Leino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tapio", |
|
"middle": [], |
|
"last": "Salakoski", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "133--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jenna Kanerva, Filip Ginter, Niko Miekka, Akseli Leino, and Tapio Salakoski. 2018. Turku neural parser pipeline: An end-to-end system for the CoNLL 2018 shared task. In Proceedings of the CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies. Association for Computational Linguistics, Brussels, Belgium, pages 133-142. http://www.aclweb.org/anthology/K18-2013.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "A local search approximation algorithm for k-means clustering", |
|
"authors": [ |
|
{ |
|
"first": "Tapas", |
|
"middle": [], |
|
"last": "Kanungo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Mount", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Netanyahu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Piatko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruth", |
|
"middle": [], |
|
"last": "Silverman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Special Issue on the 18th Annual Symposium on Computational Geometry -SoCG2002", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "89--112", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.comgeo.2004.03.003" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tapas Kanungo, David M. Mount, Nathan S. Netanyahu, Christine D. Piatko, Ruth Silverman, and Angela Y. Wu. 2004. A local search approximation algorithm for k-means clustering. Computational Geometry 28(2):89-112. Special Is- sue on the 18th Annual Symposium on Computational Geometry -SoCG2002. https://doi.org/10.1016/j.comgeo.2004.03.003.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Unsupervised multilingual sentence boundary detection", |
|
"authors": [ |
|
{ |
|
"first": "Tibor", |
|
"middle": [], |
|
"last": "Kiss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Computational Linguistics", |
|
"volume": "32", |
|
"issue": "4", |
|
"pages": "485--525", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/coli.2006.32.4.485" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tibor Kiss and Jan Strunk. 2006. Unsupervised multilingual sen- tence boundary detection. Computational Linguistics 32(4):485-525.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Clustering highdimensional data: A survey on subspace clustering, pattern-based clustering, and correlation clustering", |
|
"authors": [ |
|
{ |
|
"first": "Hans-Peter", |
|
"middle": [], |
|
"last": "Kriegel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peer", |
|
"middle": [], |
|
"last": "Kr\u00f6ger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arthur", |
|
"middle": [], |
|
"last": "Zimek", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "ACM Trans. Knowl. Discov. Data", |
|
"volume": "3", |
|
"issue": "1", |
|
"pages": "1--58", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1497577.1497578" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hans-Peter Kriegel, Peer Kr\u00f6ger, and Arthur Zimek. 2009. Clustering high- dimensional data: A survey on subspace clustering, pattern-based cluster- ing, and correlation clustering. ACM Trans. Knowl. Discov. Data 3(1):1-58. https://doi.org/10.1145/1497577.1497578.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Russian word sense induction by clustering averaged word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Andrey", |
|
"middle": [], |
|
"last": "Kutuzov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrey Kutuzov. 2018. Russian word sense induction by clustering averaged word embeddings. CoRR abs/1805.02258. http://arxiv.org/abs/1805.02258.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Word translation without parallel data", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc'aurelio", |
|
"middle": [], |
|
"last": "Ranzato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ludovic", |
|
"middle": [], |
|
"last": "Denoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Herv\u00e9", |
|
"middle": [], |
|
"last": "J\u00e9gou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Alexis Conneau, Marc'Aurelio Ranzato, Ludovic Denoyer, and Herv\u00e9 J\u00e9gou. 2018. Word translation without parallel data. In International Confer- ence on Learning Representations. https://openreview.net/forum?id=H196sainb.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Aspect term extraction with history attention and selective transformation", |
|
"authors": [ |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidong", |
|
"middle": [], |
|
"last": "Bing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piji", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhimou", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-18. International Joint Conferences on Artificial Intelligence Organization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4194--4200", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.24963/ijcai.2018/583" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xin Li, Lidong Bing, Piji Li, Wai Lam, and Zhimou Yang. 2018. Aspect term extraction with history attention and selective transformation. In Proceedings of the Twenty- Seventh International Joint Conference on Artificial Intelligence, IJCAI-18. Interna- tional Joint Conferences on Artificial Intelligence Organization, pages 4194-4200. https://doi.org/10.24963/ijcai.2018/583.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "langid.py: An off-the-shelf language identification tool", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Lui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the ACL 2012 System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Lui and Timothy Baldwin. 2012. langid.py: An off-the-shelf language identification tool. In Proceedings of the ACL 2012 System Demonstrations. Association for Computational Linguistics, Jeju Island, Korea, pages 25-30. http://www.aclweb.org/anthology/P12-3005.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Improving aspect term extraction with bidirectional dependency tree representation", |
|
"authors": [ |
|
{ |
|
"first": "Huaishao", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianrui", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Herwig", |
|
"middle": [], |
|
"last": "Unger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huaishao Luo, Tianrui Li, Bing Liu, Bin Wang, and Herwig Unger. 2018. Improving aspect term extraction with bidirectional dependency tree representation. CoRR abs/1805.07889. http://arxiv.org/abs/1805.07889.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "The stanford corenlp natural language processing toolkit", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Bauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jenny", |
|
"middle": [], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Bethard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mcclosky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Association for Computational Linguistics (ACL) System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "55--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher D. Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven J. Bethard, and David McClosky. 2014. The stanford corenlp natural language processing toolkit. In Association for Computational Linguistics (ACL) System Demonstrations. pages 55-60. http://www.aclweb.org/anthology/P/P14/P14-5010.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Building a large annotated corpus of english: The penn treebank", |
|
"authors": [ |
|
{ |
|
"first": "Mitchell", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Marcus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"Ann" |
|
], |
|
"last": "Marcinkiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Beatrice", |
|
"middle": [], |
|
"last": "Santorini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Comput. Linguist", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "313--330", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mitchell P. Marcus, Mary Ann Marcinkiewicz, and Beatrice Santorini. 1993. Building a large annotated corpus of english: The penn treebank. Comput. Linguist. 19(2):313- 330. http://dl.acm.org/citation.cfm?id=972470.972475.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Efficient estimation of word representations in vector space", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word representations in vector space. CoRR abs/1301.3781. http://arxiv.org/abs/1301.3781.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Variational dropout sparsifies deep neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Dmitry", |
|
"middle": [], |
|
"last": "Molchanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arsenii", |
|
"middle": [], |
|
"last": "Ashukha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitry", |
|
"middle": [], |
|
"last": "Vetrov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Doina Precup and", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dmitry Molchanov, Arsenii Ashukha, and Dmitry Vetrov. 2017. Varia- tional dropout sparsifies deep neural networks. In Doina Precup and", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Proceedings of the 34th International Conference on Machine Learning. PMLR, International Convention Centre", |
|
"authors": [ |
|
{ |
|
"first": "Yee Whye", |
|
"middle": [], |
|
"last": "Teh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "70", |
|
"issue": "", |
|
"pages": "2498--2507", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yee Whye Teh, editors, Proceedings of the 34th International Conference on Machine Learning. PMLR, International Convention Centre, Sydney, Australia, volume 70 of Proceedings of Machine Learning Research, pages 2498-2507. http://proceedings.mlr.press/v70/molchanov17a.html.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Automatic differentiation in pytorch", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Paszke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soumith", |
|
"middle": [], |
|
"last": "Chintala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Chanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Devito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeming", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alban", |
|
"middle": [], |
|
"last": "Desmaison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Antiga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lerer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the NIPS 2017 Autodiff Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. 2017. Auto- matic differentiation in pytorch. In Proceedings of the NIPS 2017 Autodiff Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Scikit-learn: Machine learning in python", |
|
"authors": [ |
|
{ |
|
"first": "Fabian", |
|
"middle": [], |
|
"last": "Pedregosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ga\u00ebl", |
|
"middle": [], |
|
"last": "Varoquaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Gramfort", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bertrand", |
|
"middle": [], |
|
"last": "Thirion", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Grisel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mathieu", |
|
"middle": [], |
|
"last": "Blondel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Prettenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Dubourg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jake", |
|
"middle": [], |
|
"last": "Vanderplas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Passos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Cournapeau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthieu", |
|
"middle": [], |
|
"last": "Brucher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthieu", |
|
"middle": [], |
|
"last": "Perrot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00c9douard", |
|
"middle": [], |
|
"last": "Duchesnay", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "The Journal of Machine Learning Research", |
|
"volume": "12", |
|
"issue": "", |
|
"pages": "2825--2830", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabian Pedregosa, Ga\u00ebl Varoquaux, Alexandre Gramfort, Vincent Michel, Bertrand Thirion, Olivier Grisel, Mathieu Blondel, Peter Prettenhofer, Ron Weiss, Vin- cent Dubourg, Jake Vanderplas, Alexandre Passos, David Cournapeau, Matthieu Brucher, Matthieu Perrot, and \u00c9douard Duchesnay. 2011. Scikit-learn: Machine learning in python. The Journal of Machine Learning Research 12:2825-2830. http://dl.acm.org/citation.cfm?id=1953048.2078195.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. Glove: Global vectors for word representation. In Empirical Methods in Natural Language Process- ing (EMNLP). pages 1532-1543. http://www.aclweb.org/anthology/D14-1162.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Fine-tuning word embeddings for aspectbased sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Duc-Hong", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anh-Cuong", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Text, Speech, and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "500--508", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Duc-Hong Pham, Anh-Cuong Le, et al. 2017. Fine-tuning word embeddings for aspect- based sentiment analysis. In International Conference on Text, Speech, and Dialogue. Springer, pages 500-508.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Efficient neural architecture search via parameters sharing", |
|
"authors": [ |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Pham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melody", |
|
"middle": [], |
|
"last": "Guan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barret", |
|
"middle": [], |
|
"last": "Zoph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 35th International Conference on Machine Learning. PMLR, Stockholmsm\u00e4ssan", |
|
"volume": "80", |
|
"issue": "", |
|
"pages": "4095--4104", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hieu Pham, Melody Guan, Barret Zoph, Quoc Le, and Jeff Dean. 2018. Efficient neural architecture search via parameters sharing. In Jennifer Dy and Andreas Krause, ed- itors, Proceedings of the 35th International Conference on Machine Learning. PMLR, Stockholmsm\u00e4ssan, Stockholm Sweden, volume 80 of Proceedings of Machine Learn- ing Research, pages 4095-4104. http://proceedings.mlr.press/v80/pham18a.html.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Twelve years of measuring linguistic diversity in the Internet: balance and perspectives. United Nations Educational, Scientific and Cultural Organization Paris", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Pimienta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Prado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00c1lvaro", |
|
"middle": [], |
|
"last": "Blanco", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Pimienta, Daniel Prado, and \u00c1lvaro Blanco. 2009. Twelve years of measuring linguistic diversity in the Internet: balance and perspectives. United Nations Educa- tional, Scientific and Cultural Organization Paris.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Morphological typology of languages for ir", |
|
"authors": [ |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Pirkola", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Journal of Documentation", |
|
"volume": "57", |
|
"issue": "3", |
|
"pages": "330--348", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1108/EUM0000000007085" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ari Pirkola. 2001. Morphological typology of languages for ir. Journal of Documenta- tion 57(3):330-348. https://doi.org/10.1108/EUM0000000007085.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Semeval-2016 task 5: Aspect based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Pontiki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitris", |
|
"middle": [], |
|
"last": "Galanis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haris", |
|
"middle": [], |
|
"last": "Papageorgiou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ion", |
|
"middle": [], |
|
"last": "Androutsopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suresh", |
|
"middle": [], |
|
"last": "Manandhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Al-", |
|
"middle": [], |
|
"last": "Mohammed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [], |
|
"last": "Smadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanyan", |
|
"middle": [], |
|
"last": "Al-Ayyoub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orph\u00e9e", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veronique", |
|
"middle": [], |
|
"last": "De Clercq", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marianna", |
|
"middle": [], |
|
"last": "Hoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Apidianaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Tannier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeniy", |
|
"middle": [], |
|
"last": "Loukachevitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kotelnikov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Pontiki, Dimitris Galanis, Haris Papageorgiou, Ion Androutsopoulos, Suresh Manandhar, Mohammed AL-Smadi, Mahmoud Al-Ayyoub, Yanyan Zhao, Bing Qin, Orph\u00e9e De Clercq, Veronique Hoste, Marianna Apidianaki, Xavier Tannier, Na- talia Loukachevitch, Evgeniy Kotelnikov, N\u00faria Bel, Salud Maria Jim\u00e9nez-Zafra, and G\u00fcl\u015fen Eryi\u011fit. 2016. Semeval-2016 task 5: Aspect based sentiment analysis. In Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval- 2016). Association for Computational Linguistics, San Diego, California, pages 19- 30. http://www.aclweb.org/anthology/S16-1002.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Aspect extraction for opinion mining with a deep convolutional neural network. Knowledge-Based Systems", |
|
"authors": [ |
|
{ |
|
"first": "Soujanya", |
|
"middle": [], |
|
"last": "Poria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erik", |
|
"middle": [], |
|
"last": "Cambria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Gelbukh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "New Avenues in Knowledge Bases for Natural Language Processing", |
|
"volume": "108", |
|
"issue": "", |
|
"pages": "42--49", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.knosys.2016.06.009" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Soujanya Poria, Erik Cambria, and Alexander Gelbukh. 2016. Aspect extraction for opinion mining with a deep convolutional neural network. Knowledge-Based Sys- tems 108:42-49. New Avenues in Knowledge Bases for Natural Language Process- ing. https://doi.org/10.1016/j.knosys.2016.06.009.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "V-measure: A conditional entropybased external cluster evaluation measure", |
|
"authors": [ |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Rosenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hirschberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew Rosenberg and Julia Hirschberg. 2007. V-measure: A conditional entropy- based external cluster evaluation measure. In Proceedings of the 2007 Joint Confer- ence on Empirical Methods in Natural Language Processing and Computational Nat- ural Language Learning (EMNLP-CoNLL). http://www.aclweb.org/anthology/D07- 1043.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "A survey of cross-lingual embedding models", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00f8gaard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Ruder, Ivan Vuli\u0107, and Anders S\u00f8gaard. 2017. A survey of cross-lingual embedding models. CoRR abs/1706.04902. http://arxiv.org/abs/1706.04902.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Survey on aspect-level sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Schouten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Flavius", |
|
"middle": [], |
|
"last": "Frasincar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "IEEE Trans. on Knowl. and Data Eng", |
|
"volume": "28", |
|
"issue": "3", |
|
"pages": "813--830", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TKDE.2015.2485209" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kim Schouten and Flavius Frasincar. 2016. Survey on aspect-level sen- timent analysis. IEEE Trans. on Knowl. and Data Eng. 28(3):813-830.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "Supervised and unsupervised aspect category detection for sentiment analysis with co-occurrence data", |
|
"authors": [ |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Schouten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Onne", |
|
"middle": [], |
|
"last": "Van Der Weijde", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Flavius", |
|
"middle": [], |
|
"last": "Frasincar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rommert", |
|
"middle": [], |
|
"last": "Dekker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE Transactions on Cybernetics", |
|
"volume": "48", |
|
"issue": "4", |
|
"pages": "1263--1275", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/TCYB.2017.2688801" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kim Schouten, Onne van der Weijde, Flavius Frasincar, and Rommert Dekker. 2018. Supervised and unsupervised aspect category detection for sentiment anal- ysis with co-occurrence data. IEEE Transactions on Cybernetics 48(4):1263-1275. https://doi.org/10.1109/TCYB.2017.2688801.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Word embedding clustering for disease named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "V\u0131ctor", |
|
"middle": [], |
|
"last": "Su\u00e1rez-Paniagua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabel", |
|
"middle": [], |
|
"last": "Segura-Bedmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paloma", |
|
"middle": [], |
|
"last": "Mart\u0131\u02canez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Fifth BioCreative Challenge Evaluation Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "299--304", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "V\u0131ctor Su\u00e1rez-Paniagua, Isabel Segura-Bedmar, and Paloma Mart\u0131\u02canez. 2015. Word embedding clustering for disease named entity recognition. In Proceedings of the Fifth BioCreative Challenge Evaluation Workshop. pages 299-304.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Cross-lingual word clusters for direct transfer of linguistic structure", |
|
"authors": [ |
|
{ |
|
"first": "Oscar", |
|
"middle": [], |
|
"last": "T\u00e4ckstr\u00f6m", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "477--487", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oscar T\u00e4ckstr\u00f6m, Ryan McDonald, and Jakob Uszkoreit. 2012. Cross-lingual word clusters for direct transfer of linguistic structure. In Proceedings of the 2012 Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technologies. Association for Compu- tational Linguistics, Stroudsburg, PA, USA, NAACL HLT '12, pages 477-487. http://dl.acm.org/citation.cfm?id=2382029.2382096.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Introduction to the conll-2000 shared task: Chunking", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Tjong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Buchholz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the 2Nd Workshop on Learning Language in Logic and the 4th Conference on Computational Natural Language Learning", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "127--132", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1117601.1117631" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F. Tjong Kim Sang and Sabine Buchholz. 2000. Introduction to the conll-2000 shared task: Chunking. In Proceedings of the 2Nd Workshop on Learning Language in Logic and the 4th Conference on Computational Natural Language Learning -Volume 7. Association for Computational Linguistics, Stroudsburg, PA, USA, ConLL '00, pages 127-132. https://doi.org/10.3115/1117601.1117631.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Crosslingual and multilingual construction of syntax-based vector space models", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Utt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "245--258", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Utt and Sebastian Pad\u00f3. 2014. Crosslingual and multilingual construction of syntax-based vector space models. Transactions of the Association for Computational Linguistics 2:245-258. https://transacl.org/ojs/index.php/tacl/article/view/240.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Shared task on aspect-based sentiment in social media customer feedback", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Wojatzki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugen", |
|
"middle": [], |
|
"last": "Ruppert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Holschneider", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Torsten", |
|
"middle": [], |
|
"last": "Zesch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Biemann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the GermEval 2017 -Shared Task on Aspectbased Sentiment in Social Media Customer Feedback", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Wojatzki, Eugen Ruppert, Sarah Holschneider, Torsten Zesch, and Chris Bie- mann. 2017. Germeval 2017: Shared task on aspect-based sentiment in social media customer feedback. In Proceedings of the GermEval 2017 -Shared Task on Aspect- based Sentiment in Social Media Customer Feedback. Berlin, Germany, pages 1-12.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "A hybrid unsupervised method for aspect term and opinion target extraction. Knowledge-Based Systems", |
|
"authors": [ |
|
{ |
|
"first": "Chuhan", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fangzhao", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sixing", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhigang", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongfeng", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "148", |
|
"issue": "", |
|
"pages": "66--73", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.knosys.2018.01.019" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chuhan Wu, Fangzhao Wu, Sixing Wu, Zhigang Yuan, and Yongfeng Huang. 2018. A hybrid unsupervised method for aspect term and opinion target extraction. Knowledge-Based Systems 148:66-73. https://doi.org/10.1016/j.knosys.2018.01.019.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Double embeddings and cnnbased sequence labeling for aspect extraction", |
|
"authors": [ |
|
{ |
|
"first": "Hu", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Shu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hu Xu, Bing Liu, Lei Shu, and Philip S. Yu. 2018. Double embeddings and cnn- based sequence labeling for aspect extraction. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers).", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Adversarial training for unsupervised bilingual lexicon induction", |
|
"authors": [ |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huanbo", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1959--1970", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1179" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Meng Zhang, Yang Liu, Huanbo Luan, and Maosong Sun. 2017. Adversarial training for unsupervised bilingual lexicon induction. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, Vancouver, Canada, pages 1959-1970. https://doi.org/10.18653/v1/P17-1179.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Learning phrase embeddings from paraphrases with grus", |
|
"authors": [ |
|
{ |
|
"first": "Zhihao", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lifu", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heng", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the First Workshop on Curation and Applications of Parallel and Comparable Corpora. Asian Federation of Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "16--23", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhihao Zhou, Lifu Huang, and Heng Ji. 2017. Learning phrase embeddings from para- phrases with grus. In Proceedings of the First Workshop on Curation and Applications of Parallel and Comparable Corpora. Asian Federation of Natural Language Process- ing, Taipei, Taiwan, pages 16-23. http://aclweb.org/anthology/W17-5603.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "Architecture of system components and passed data", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Multilingual workflow of the system", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "Constituency parse tree", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"text": "10 \u22125 10 \u22124 10 \u22123 10 \u22125 10 \u22124 10 \u22123 10 \u22125 10 \u22124 10 \u2212", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"num": null, |
|
"text": "Performance of k-means for different k values. Straight lines: Homogeneity, dotted lines: Completeness k-means We tested clusterings from 8 to 50 clusters in steps of 3.", |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Results for different dropout and learning rate values in Finnish", |
|
"content": "<table/>" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Results for using different learning rates for English", |
|
"content": "<table><tr><td/><td/><td colspan=\"2\">Classifier</td><td/><td/><td colspan=\"2\">Filter</td><td/></tr><tr><td>Min correlation</td><td>0.40</td><td>0.45</td><td>0.50</td><td>0.55</td><td>0.40</td><td>0.45</td><td>0.50</td><td>0.55</td></tr><tr><td>Precision</td><td colspan=\"8\">0.300 0.301 0.299 0.286 0.373 0.373 0.390 0.508</td></tr><tr><td>Recall</td><td colspan=\"8\">0.725 0.712 0.685 0.685 0.473 0.414 0.331 0.241</td></tr><tr><td>F1</td><td colspan=\"8\">0.424 0.423 0.416 0.404 0.417 0.393 0.358 0.327</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Results for using different correlation value cut-offs for English", |
|
"content": "<table/>" |
|
}, |
|
"TABREF7": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Results for experiments with different minimum aspect weights", |
|
"content": "<table/>" |
|
}, |
|
"TABREF8": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "shows a performance comparison between the three different methods and the baseline.", |
|
"content": "<table><tr><td/><td/><td>English</td><td/><td/><td/><td>Finnish</td><td/><td/></tr><tr><td>Method</td><td colspan=\"8\">Baseline Superv. Hybrid Unsup. Baseline Superv. Hybrid Unsup.</td></tr><tr><td>Precision</td><td>0.375</td><td>0.669</td><td>0.300</td><td>0.415</td><td>0.520</td><td>0.698</td><td>0.678</td><td>0.409</td></tr><tr><td>Recall</td><td>0.563</td><td>0.784</td><td>0.725</td><td>0.555</td><td>0.554</td><td>0.719</td><td>0.471</td><td>0.744</td></tr><tr><td>F1</td><td>0.450</td><td>0.722</td><td>0.424</td><td>0.473</td><td>0.537</td><td>0.707</td><td>0.556</td><td>0.528</td></tr></table>" |
|
}, |
|
"TABREF9": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Summary of the best results for all methods", |
|
"content": "<table/>" |
|
}, |
|
"TABREF12": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Best results for clustering with affinity propagation", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |