|
{ |
|
"paper_id": "C12-1035", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:23:11.766991Z" |
|
}, |
|
"title": "A Semi-Supervised Bayesian Network Model for Microblog Topic Classification", |
|
"authors": [ |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "State Key Laboratory of Software Development Environment", |
|
"institution": "Beihang University", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China (" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Zhou", |
|
"middle": [], |
|
"last": "Jun Li", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "State Key Laboratory of Software Development Environment", |
|
"institution": "Beihang University", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China (" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Liqiang", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Ie", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Singapore", |
|
"location": { |
|
"country": "Singapore (" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "X Ia", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Arizona State University", |
|
"location": { |
|
"country": "United States" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "X Iang Yu", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Singapore", |
|
"location": { |
|
"country": "Singapore (" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Tat", |
|
"middle": [ |
|
"\u2212" |
|
], |
|
"last": "Seng Chua", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "National University of Singapore", |
|
"location": { |
|
"country": "Singapore (" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "X Iaoming", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "State Key Laboratory of Software Development Environment", |
|
"institution": "Beihang University", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China (" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Microblogging services have brought users to a new era of knowledge dissemination and information seeking. However, the large volume and multi-aspect of messages hinder the ability of users to conveniently locate the specific messages that they are interested in. While many researchers wish to employ traditional text classification approaches to effectively understand messages on microblogging services, the limited length of the messages prevents these approaches from being employed to their full potential. To tackle this problem, we propose a novel semi-supervised learning scheme to seamlessly integrate the external web resources to compensate for the limited message length. Our approach first trains a classifier based on the available labeled data as well as some auxiliary cues mined from the web, and probabilistically predicts the categories for all unlabeled data. It then trains a new classifier using the labels for all messages and the auxiliary cues, and iterates the process to convergence. Our approach not only greatly reduces the time-consuming and labor-intensive labeling process, but also deeply exploits the hidden information from unlabeled data and related text resources. We conducted extensive experiments on two real-world microblogging datasets. The results demonstrate the effectiveness of the proposed approaches which produce promising performance as compared to state-of-the-art methods.", |
|
"pdf_parse": { |
|
"paper_id": "C12-1035", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Microblogging services have brought users to a new era of knowledge dissemination and information seeking. However, the large volume and multi-aspect of messages hinder the ability of users to conveniently locate the specific messages that they are interested in. While many researchers wish to employ traditional text classification approaches to effectively understand messages on microblogging services, the limited length of the messages prevents these approaches from being employed to their full potential. To tackle this problem, we propose a novel semi-supervised learning scheme to seamlessly integrate the external web resources to compensate for the limited message length. Our approach first trains a classifier based on the available labeled data as well as some auxiliary cues mined from the web, and probabilistically predicts the categories for all unlabeled data. It then trains a new classifier using the labels for all messages and the auxiliary cues, and iterates the process to convergence. Our approach not only greatly reduces the time-consuming and labor-intensive labeling process, but also deeply exploits the hidden information from unlabeled data and related text resources. We conducted extensive experiments on two real-world microblogging datasets. The results demonstrate the effectiveness of the proposed approaches which produce promising performance as compared to state-of-the-art methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Microblogging services are becoming immensely popular in breaking-news disseminating, information sharing, and events participation. This enables users to express their thoughts and intentions in short textual snippets on a daily and even hourly basis. The most well-known one is Twitter (www.twitter.com), which has more than 140 million active users with 1 billion Tweets every 3 days 1 as of March 2012. Over time, a tremendous number of messages have been accumulated in their repositories, which greatly facilitate general users seeking information by querying their interested topics using the corresponding hashtag.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "However, users often have to browse through large amount of results in order to find the information of their interests. This is due to the ambiguous hashtag and the presentation style. The microblogging platforms mix search results in a ranked list, determined by their relevance to the corresponding hashtag and published time. Unfortunately, most hashtags are very short, ambiguous and even vague, leading to unsatisfactory search results. For example, the returned list for queried hashtag \"#apple\" is extremely messy and diversified, potentially covering several different sub-topics: smartphone, computer, fruit and so on. In this case, users can benefit from overviews of search results based on meaningful and structural categories, such as, grasping at a glance the spread of categories covered by a given search topic and quickly locating the information of their interests with the assistance of the labeled categories. This is especially important for mobile search through handheld devices such as smartphones.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Classifying microblogs into pre-defined subtopic-oriented classes poses new challenges due to the following reasons. First, unlike normal documents, these messages are typically short, consisting of no more than 140 characters. They thus do not provide sufficient word co-occurrences or shared contexts for effective similarity measure (Hu et al., 2009) . The data sparseness hinders general machine learning methods to achieve desirable accuracy. Second, microblogging messages are not well conformed as standard structures of documents. Sometimes they do not even obey grammatical rules (Hu and Liu, 2012b) . Third, microblogs lack label information. It is time and labor consuming to label the huge amounts of messages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 336, |
|
"end": 353, |
|
"text": "(Hu et al., 2009)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 589, |
|
"end": 608, |
|
"text": "(Hu and Liu, 2012b)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Intensive efforts have been made on the classification of short texts utilizing machine learning techniques (Nie et al., 2011) . Some representative research efforts are based on topic model (Ramage et al., 2009) (Zhao et al., 2011) . As these approaches heavily rely on the term co-occurrence information, the sparsity of short and informal messages unduly influence the significant improvement of the performance. Some others explore some traditional supervised learning methods to classify microblogging messages (Lee et al., 2011) (Zubiaga et al., 2011) (Sriram et al., 2010) (Tang et al., 2012) . The sparsity problem again hinders the similarity measurement. Moreover, it is laborious and time consuming to obtain labeled data from microblogging. Consequently, new approaches towards microblog classification are highly desired.", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 126, |
|
"text": "(Nie et al., 2011)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 191, |
|
"end": 212, |
|
"text": "(Ramage et al., 2009)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 213, |
|
"end": 232, |
|
"text": "(Zhao et al., 2011)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 516, |
|
"end": 534, |
|
"text": "(Lee et al., 2011)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 535, |
|
"end": 557, |
|
"text": "(Zubiaga et al., 2011)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 558, |
|
"end": 579, |
|
"text": "(Sriram et al., 2010)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 580, |
|
"end": 599, |
|
"text": "(Tang et al., 2012)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a semi-supervised learning approach to the classification of microblogging messages. We aim to tackle three challenges in this paper. First, to handle the data sparseness problem, our approach submits a query that is related to hashtag and category to Google Search Engine; meanwhile it incorporates the external information provided by search engine results to enrich the short microblogs. Second, to alleviate negative effect brought by informal words in microblogging, we employ linguistic corpus to detect informal words in microblogging messages and correct them into formal expressions. Third, with the integration of hashtag related resources, our model is robust with only a small amount of training data, which greatly reduces the manually labeling costs. Our algorithm alternates between performing an E-step and M-step. Specifically, it first trains a classifier based on the available labeled messages as well as some auxiliary cues mined from the web, and probabilistically predicts the class labels of the unlabeled messages. It then trains a new classifier using all messages and the auxiliary cues, and iterates to convergence. We conduct experiments on the real-world datasets, and demonstrate that our proposed scheme yields significant accuracy in microblogging messages categorization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The main contributions of this research can be summarized as follows,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 To the best of our knowledge, this work is the first attempt towards microblogs categorization using semi-supervised learning approach, which requires less labeled data and can thus be practically extended to large-scale datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Our approach incorporates external statistical knowledge to enrich the short microblogs, which greatly remedies the data sparseness issue.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Our approach adopts a category-word distribution analysis, which well addresses the broader phenomenon existed in microblogs: non-standard language presentation and abundant spelling errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The reminder of this paper is organized as follows: we introduce the details of our proposed approach and experimental results in Section 2 and Section 3 respectively. In section 4, we briefly reviews of the related work, followed by concluding remarks in Section 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Before formulating our approach, we first define some notations. A set of messages is collected by a given hashtag t, which are partitioned into two subsets: a labeled set M l = {m 1 , m 2 , . . . , m L } and an unlabeled set M u = {m L+1 , m L+2 , . . . , m L+N }. M l includes only the example messages provided through user interaction, where each instance is associated with a predefined category c i with belonging to C = {c 1 , c 2 . . . c K }; while M u includes all the other messages. We aim to predict the category label for each data point in M u . Here we assume that each tweet belongs to only one category. Similar idea of assigning a single topic or category to a short sequence of words has been used before in (Diao et al., 2012) (Gruber et al., 2007) (Zhao et al., 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 727, |
|
"end": 746, |
|
"text": "(Diao et al., 2012)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 747, |
|
"end": 768, |
|
"text": "(Gruber et al., 2007)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 769, |
|
"end": 788, |
|
"text": "(Zhao et al., 2011)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semi-Supervised Graphical Model for Microblogs Classification", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We now introduce the overview of the whole processing that aims to classify microblogging messages by exploiting the internal and external resources. The workflow consists of three phrases, as shown in Figure 1 . It includes the preprocessing of external resources, preprocessing of microblogging messages, and construction of Semi-Supervised Bayesian Network (SSBN) model. Figure 1 : The General Framework classified into K pre-defined categories. For a given hashtag t (for example, stock), we build K hashtag-category pairs (for example, stock Sports, stock Business, etc.), and consider each pair as a query to return 20 extended documents from Google Search Engine, denoted as S. Comparing with the way that only takes each hashtag as a query, the combination of hashtag and category can find more accurate documents. Next, we assign the tf.idf weight of each word for each category in S. We further use the google search results to estimate the category prior distribution.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 202, |
|
"end": 210, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 374, |
|
"end": 382, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Phrase 2: Preprocessing of Microblogging Messages It is worth noting that there is a large amount of misspelled and informal expressions in microblogging messages. This is different from the formal expressions and words used in Google Search results. To handle this mismatch problem, we first construct a microblog dictionary containing all the abbreviate forms of words used in Twitter from some dictionaries, such as Twitternary 2 , twitterforteachers 3 . The dictionary contains 727 words. Giving a microblogging message, we first use this dictionary to detect the informal words, then correct them to the formal words. In this way, we are also able to collect more words related to the predefined categories from the labeled messages to tackle the sparseness problem in microblogging messages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Phrase 3: Construction of SSBN model In order to fully integrate hashtag related resources and unlabeled data to a classifier, we propose a semi-supervised Bayesian network model. The semisupervised classifier can offer robust solution to microblog topic classification for two reasons. First, it utilizes those labeled microblogging messages with hashtags by training a topic model based classifier, which is then used to find the category (label) distribution of unlabeled messages accurately. Second, it leverages the related external resources to provide a valuable context to microblogging messages. In this way, compared with supervised learning methods, we need only few labeled data for training. The details of SSBN model construction will be introduced in the next subsection. The vector indicating category weights for message data collection. \u03c6", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The vector indicating category weights for specific message.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "\u03b8 \u2032 , \u03c6 \u2032 The |C| \u00d7 |N | matrix indicating category-word distribution. \u03bb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The contribution of unlabeled data to prior probability. \u03b1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The contribution of prior knowledge from \u03b8 . 1 \u2212 \u03b1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The contribution of prior knowledge from \u03c6. \u03b2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The contribution of likelihood probability from", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "\u03b8 \u2032 . 1 \u2212 \u03b2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The contribution of likelihood probability from \u03c6 \u2032 . \u03b7 d , \u03b7 g Hyperparameters and priors of Dirichlet distributions. C", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The category vector. c j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The jth category. M", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The message collection in the original message data. m", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The message. N", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The word collection in the original message data. t", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The hashtag. w", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The word. y", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The category label of message. Table 1 : Important notations used in this paper and their descriptions.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 38, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "The General Framework", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The above formulations intuitively reflect that the category prediction task comprises two estimations: coarse-grained category distribution and fined-grained category-word distribution. It is schematically illustrated in Figure 2 , in which the corresponding notations are summarized in Table 1.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 222, |
|
"end": 230, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Probabilistic Graph Model Construction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "1. Category distribution: There are two kinds of category distribution in the data. Let \u03b8 denotes the category distribution obtained from the original message M , which is a weight vector representing the weight for each category. Similarly, let \u03c6 denotes the category distribution for external resources obtained from the search results S. The category distribution for the total data D is assumed to be a linear combination of \u03b8 and \u03c6. Parameter \u03b1 is employed as the weight to adjust the contributions of different sources. In addition, the original message data also consists of labeled and unlabeled data; and \u03bb is used to denote the contribution of unlabeled data in generating the category distribution for M .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Graph Model Construction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "2. Category-word distribution: The category-word distribution also has two parts: \u03b8 \u2032 denotes the distribution of different words over different categories in the original messages, which is a |C| \u00d7 |N | matrix. Here, |C| is the number of categories, and |N | is the number of words in the data. Similarly, \u03c6 \u2032 denotes the category-word distribution in the search results. The category-word distribution for data D is again assumed to be a linear combination of \u03b8 \u2032 and \u03c6 \u2032 , where parameter \u03b2 is employed as the weight to adjust the contributions of different sources.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Graph Model Construction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Our semi-supervised Bayesian Network (SSBN) belongs to probabilistic graphical model, which formally denotes the probability of a message m falling into a category c as,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Graph Model Construction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P(c|m) = P(c)P(m|c) c P(c)P(m|c)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Probabilistic Graph Model Construction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Category-Word Distribution Figure 2 : Probabilistic graphical representation of semi-supervised Bayesian network model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 35, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Probabilistic Graph Model Construction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Category Distribution g d 1 | | M w | | N 1 | | C ' ' j c | | C C t", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Graph Model Construction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where P(c) is the prior probability of category in the message data collection. By assuming the presence of a word w is independent to the presence of any other word in m, we derive", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Probabilistic Graph Model Construction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P(m|c) = w\u2208m P(w|c)", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Probabilistic Graph Model Construction", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In this section, we turn our attention to procedures for parameter inference with EM approach. In the expectation step, the distributions \u03b8 , \u03c6,\u03b8 \u2032 w k c j and\u03c6 \u2032 w k c j , will be estimated. Besides the labeled data and external resource, the parameter estimations also make use of the unlabeled data. Initially we assign category labels to unlabeled data with an uniform distribution, i.e., the probability is 1 |C| for each category. In the following iterations, labels of unlabeled data and SSBN model are alternatively updated and reinforced until convergence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Estimating \u03b8 : \u03b8 represents the probability of each category in the original message data collection. It is proportional to the expected number of messages that was assigned to this category.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b8 c j \u2261 P(c j |\u03b8 ) = 1 + |M | i=1 \u039b(i)P( y i = c j |m i ) |C| + |M l | + \u03bb|M u |", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "As aforementioned in section 2.2, the message data collection consists of labeled messages M l and unlabeled messages M u . They have different contribution to the category probability estimation. The function \u039b(i), defined as in equation 4, is employed to achieve that goal. The parameter \u03bb", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2208 [0, 1]. \u039b(i) = \u03bb if m i \u2208 M u ; 1 if m i \u2208 M l .", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Estimating \u03c6: \u03c6 denotes the prior category probability distributes over the Google Search results. In this paper, the prior probability of category c j for a hashtag t completely depends on the relationship between the corresponding hashtag t and the predefined category names,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u03c6 c j \u2261 P(c j |\u03c6) = 1 N GD(t,c j ) + \u00b5 |C| j=1 1 N GD(t,c j ) + |C|\u00b5 (5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where \u00b5 is a smoothing factor and N G D(t, c j ) is the Normalized Google Distance 4 , which is employed to calculate distance between the tag t and the category c j . It can be observed that a smaller value of N G D leads to more contribution of c j for the specific message.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Estimating \u03b8 \u2032 and \u03c6 \u2032 : \u03b8 \u2032 and \u03c6 \u2032 respectively denote the category-word distributions over original message collection and Google Search results. Both of them are |C| \u00d7 |N | matrices. They can be estimated using the following formulas:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b8 \u2032 w k c j \u2261 P(w k |c j ,\u03b8 \u2032 ) = n d w k c j + \u03b7 d |N | p \u2032 =1 n d w p \u2032 c j + |N |\u03b7 d (6) \u03c6 \u2032 w k c j \u2261 P(w k |c j ,\u03c6 \u2032 ) = n g w k c j + \u03b7 g |N | q \u2032 =1 n g w q \u2032 c j + |N |\u03b7 g", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "n d w k c j and n g w k c j", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "are respectively the number of times that the word w k has occurred in the category c j in message data collection and Google Search results (retrieved by the combination of hashtag t and the name of the j-th category). \u03b7 d and \u03b7 g are hyperparameters with a small value for smoothing purpose to avoid the zero problem.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The maximum likelihood category label for a given message m i is,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y i = arg max c j P(c j |m i ,\u03b8 ,\u03c6,\u03b8 \u2032 ,\u03c6 \u2032 ) = P(c j |\u03b8 ,\u03c6,\u03b8 \u2032 ,\u03c6 \u2032 )P(m i |c j ,\u03b8 ,\u03c6,\u03b8 \u2032 ,\u03c6 \u2032 ) P(m i |\u03b8 ,\u03c6,\u03b8 \u2032 ,\u03c6 \u2032 )", |
|
"eq_num": "(8)" |
|
} |
|
], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where P(m i |\u03b8 ,\u03c6,\u03b8 \u2032 ,\u03c6 \u2032 ) is formally written as follows,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P(m i |\u03b8 ,\u03c6,\u03b8 \u2032 ,\u03c6 \u2032 ) = c j P(c j |\u03b8 ,\u03c6,\u03b8 \u2032 ,\u03c6 \u2032 )P(m i |c j ,\u03b8 ,\u03c6,\u03b8 \u2032 ,\u03c6 \u2032 )", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where the prior probability for category c j is obtained by linearly fusing two estimations on two resources,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P(c j |\u03b8 ,\u03c6,\u03b8 \u2032 ,\u03c6 \u2032 ) = P(c j |\u03b8 ,\u03c6) =\u03b1P(c j |\u03b8 ) + (1 \u2212\u03b1)P(c j |\u03c6)", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where \u03b1 is a trade-off parameter to balance the contributions between two kinds of category distribution. The maximum likelihood probability for the each message m i can be derived as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P(m i |c j ,\u03b8 ,\u03c6,\u03b8 \u2032 ,\u03c6 \u2032 ) = P(m i |c j ,\u03b8 \u2032 ,\u03c6 \u2032 ) = |m i | k=1 P(w k |c j ,\u03b8 \u2032 ,\u03c6 \u2032 ) = |m i | k=1 {\u03b2 P(w k |c j ,\u03b8 \u2032 ) + (1 \u2212 \u03b2)P(w k |c j ,\u03c6 \u2032 )}", |
|
"eq_num": "(11)" |
|
} |
|
], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Similar to \u03b1, \u03b2 is tuned to control the contribution between the the category-word distribution over two different resources.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parameter Inference", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "In this section, we first evaluate our proposed model on two real-world datasets, utilizing a range of popular metrics. We then compare our model with the state-of-the-art text classification approaches on microblogs. Also, we study the sensitivity of the training dataset size, convergence analysis followed by the impact analysis on the parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In our experiments, two large-scale real-world datasets were constructed:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 Twitter: The Twitter dataset was generated from Trec-Twitter2011 5 . First, we collected 10 hot topics from Google Trends 6 , including NBA, Apple, facebook, etc. For each topic, we manually selected several low-level sub-topics and combined each of them with the highlevel topic. Take the topic \"Apple\" as an example. We extended it with \"Apple stock\", \"Apple ipad\", etc. We manually determine which category the sub-topics belong to. For example, \"stock\" is classified to Business, while \"ipad\" is assigned to science. These pairs are naturally viewed as queries. Then the Twitter dataset was constructed by retrieving all the related messages from Trec-Twitter2011 based on these queries. To validate the robustness of our proposed model on partially noisy data, we deliberately did not provide ground truth for this dataset. Instead, the returned messages under a query are directly considered as belongings to the same category as the sub-topic. The Twitter dataset is in this way labeled semi-automatically based on sub-topics. The ground truth is so-called pseudo ground truth. For example, all the messages searched by \"Apple stock\" are regarded as business category. \u2022 Sina Weibo: Based on selected trending topics of Sina Weibo, we crawled a collection of messages. And then manually assigned each messages into one of 7 predefined categories: sports, politics, science&tech, game, movie, music and others. The messages fallen into \"others\" are removed; and up to 15, 811 unique messages were remained. To build the ground truth, we adopted a manual labeling procedure. We divided 15 people with different background into 3 teams to manually label these messages. Every team labeled the complete dataset. The voting method was employed to combine the label results from different teams. For each message, only one category label with the majority voting was selected as the ground truth label. For the cases that a message received three different categories, a discussion was carried out among the labelers to decide the final ground truths.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The distributions of different categories over two datasets are displayed in Table 2 . For each dataset, we devise 4 test configurations with different amount of training data: 5%, 20%, 50% and 90% for training respectively, and use the corresponding reminders for testing. The training data is randomly selected.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 77, |
|
"end": 84, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In this work, we utilize several widely-used performance metrics to evaluate our classification task: average accuracy, precision, recall, and F 1 score (Sokolova and Lapalme, 2009) (Rosa et al., 2011) . Average accuracy evaluates the average effectiveness for each category of a classifier. Precision is the fraction of retrieved messages that are relevant to the search, while recall is the percentage of the relevant messages that are successfully retrieved, and F1 measure combines both of recall and precision. Table 3 : Performance of SSBN model on two datasets with 5% training data and 95% testing data, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 181, |
|
"text": "(Sokolova and Lapalme, 2009)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 201, |
|
"text": "(Rosa et al., 2011)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 516, |
|
"end": 523, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Settings", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We first conducted experiment to evaluate the effectiveness of our proposed SSBN model on two datasets. Table 3 displays the average performance in terms of different metrics. Here the parameters are set as \u03b1 = 0.5, \u03b2 = 0.9, \u03bb = 0.4 for Twitter and \u03b1 = 0.9, \u03b2 = 0.9, \u03bb = 0.3 for Sina Weibo, respectively. The parameters selection will be introduced later.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 111, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "On Classification Performance Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "It is observed that our proposed scheme achieves promising precision, recall and F 1 scores despite of limited availability of labeled data. For twitter dataset, most of the categories achieve precision score higher than 0.85, and the best precision score is up to 0.93 (sports). Half of the categories obtain good results in terms of recall and F1, higher than 0.94 and 0.83, respectively. Our approach yields significant performance over the dataset with pseudo ground truths. This demonstrates the robustness of our method to noisy data. When it comes to Sina Weibo, all the categories achieve remarkable performance of greater than 0.80 across all evaluating metrics. This observation verifies that our method is more stable in less training data. However, our method fails for certain categories such as the Business and Education categories in Twitter dataset. This poor performance mainly comes from the unreliable pseudo ground truths. \"Business\" and \"Education\" frequently broaden to various sub-topics. Therefore, the messages retrieved by these types of queries are not internal coherent, at least not as strong as others' categories, even they are assumed to belong to the same category. The unreliable pseudo ground truths bring unpredictable noise to our model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "On Classification Performance Analysis", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To demonstrate the effectiveness of our proposed approach, we compare it against the following the state-of-the-art classifying methods (Phyu, 2009) (Kotsiantis, 2007) : \u2022 SVM (Cortes and Vapnik, 1995) is a supervised learning method. In our experiment, we use an open source package LIBSVM 7 with linear kernel function as baseline. \u2022 Naive Bayesian (NB) is a simple probabilistic classifier by applying Bayesian theorem with strong independence assumptions. We use a multi-nomial naive bayesian classifier in our experiment (Yang and Pederson, 1997) . \u2022 K Nearest Neighbors (KNN) clusters objects based on the closest training examples in the feature space (Creecy et al., 1992) . An unlabeled message is assigning the label which is most frequent among the K training samples nearest to the message. \u2022 Rocchio (Schapire et al., 1998) is a variant of the Vector Space Model. The average of the relevant documents is viewed as the centroid of the \"class\". \u2022 Labeled LDA (L-LDA) incorporates supervision by constraining LDA model to use only those topics that correspond to an observed label set (Ramage et al., 2009) . \u2022 Transductive SVM (Trans-SVM) is a semi-supervised SVM method. We extend the binary Transductive SVM in svm-light (Joachims, 1999) to multi-class classifier by incorporating one-against-all strategy. \u2022 Semi-Naive Bayesian classifiers (Semi-NB) is a famous semi-supervised text classification method (Nigam et al., 2000) . We employ it by using only unlabeled microblogging messages as a prior.", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 148, |
|
"text": "(Phyu, 2009)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 167, |
|
"text": "(Kotsiantis, 2007)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 201, |
|
"text": "(Cortes and Vapnik, 1995)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 526, |
|
"end": 551, |
|
"text": "(Yang and Pederson, 1997)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 659, |
|
"end": 680, |
|
"text": "(Creecy et al., 1992)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 813, |
|
"end": 836, |
|
"text": "(Schapire et al., 1998)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1096, |
|
"end": 1117, |
|
"text": "(Ramage et al., 2009)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1235, |
|
"end": 1251, |
|
"text": "(Joachims, 1999)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1420, |
|
"end": 1440, |
|
"text": "(Nigam et al., 2000)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "On Classification Performance Comparison", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "C l assi f", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "On Classification Performance Comparison", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "For each aforementioned approaches, the involved parameters are carefully tuned, and the parameters with best performance are used to report the final comparison results. In addition, the same underlying features are utilized for approaches learning. To be fair, our proposed SSBN model was trained with up to 90% data compared with supervised methods, while only 5% training data when compared with semi-supervised approaches. Here, the values of the parameters in SSBN model are set as \u03b1 = 0.5, \u03b2 = 0.9, \u03bb = 0.4 for Twitter dataset and \u03b1 = 0.9, \u03b2 = 0.9, \u03bb = 0.3 for Sina Weibo dataset. The comparison results with supervised methods on two datasets are illustrated in Table 4 and Table 5 , respectively. It is observed from the tables that our proposed model in general performs better than SVM, NB and L-LDA, and remarkably better than KNN and Rocchio. Even the performance of our method for M acr oP, M acr oR and M acr oF 1 on Twitter and M acr oP on Sina Weibo does not achieve the best results, they are still comparable and convincing. Table 6 and Table 7 respectively display the comparison results with semi-supervised methods on two datasets, using 5% as training data. It can be observed that our proposed approach are consistently and significantly better than the current publicly disclosed the state-of-the-art semi-supervised algorithms, across various evaluating metrics. This comprehensive improvements are due to the facts that the integrated external knowledge enriches the message representation and the leveraging intrinsic information detected from abundant unlabeled data enhances the prediction accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 670, |
|
"end": 677, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 682, |
|
"end": 689, |
|
"text": "Table 5", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 1044, |
|
"end": 1064, |
|
"text": "Table 6 and Table 7", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "On Classification Performance Comparison", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In this section, we conduct experiments to investigate the influence of training data size on the overall performance. We progressively increase the size of training corpus at step size of 10%. The experimental results on Twitter and Sina Weibo are respectively illustrated in Figures 3a and 3b . It is observed that the overall trend is upwards along with increasing training set. This is coherent and consistent with our common sense. Also, it is observed that a smaller training set size still produces a robust model on less noisy dataset, with greater than 87% on Sina Weibo.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 277, |
|
"end": 294, |
|
"text": "Figures 3a and 3b", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "On the Sensitivity of Training Data Size and Convergence Analysis", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Perplexity, which is widely used in the topic modeling fields to analyze the convergence of a model (Blei et al., 2003) (Zhao et al., 2010) . We do perplexity comparison of SSBN and L-LDA on the testing data when parameters in SSBN model are set as \u03b1 = 0.5, \u03b2 = 0.9, \u03bb = 0.4 for Twitter and \u03b1 = 0.9, \u03b2 = 0.9, \u03bb = 0.3 for Sina Weibo dataset. Compared with L-LDA model, SSBN model has a lower perplexity value, which means that the words are less surprising to SSBN model, and SSBN model has a powerful predication than L-LDA model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 119, |
|
"text": "(Blei et al., 2003)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 120, |
|
"end": 139, |
|
"text": "(Zhao et al., 2010)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "On the Sensitivity of Training Data Size and Convergence Analysis", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Parameters of \u03b1, \u03b2 and \u03bb are important in our method. In this subsection, we further conduct experiments to study the effect of these parameters. A grid search is performed to select the optimal parameter values. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "On the Sensitivity of Parameters", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "The trade-off parameter \u03b1 is used to balance the effects of two kinds of prior knowledge at category level: microblogging data collection and external resources. A larger \u03b1 indicates that more information is preserved from our data collection into the category distribution. A smaller \u03b1 means that the cues mined from external resources play a dominant role in our model. Figure 4 illustrates the average performance with various \u03b1 and training collection size on two different datasets. It is observed that the performance increases with the gradual increase of \u03b1, and arrives at a peak at certain \u03b1, then the performance decreases. This result reflects that an optimal performance comes from an appropriate combination of external and internal resources, rather than pure individual knowledge. Also it verifies that the incorporation of Google resources has been proven useful. Empirical optimal value of \u03b1 is within [0.5, 1].", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 372, |
|
"end": 380, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of Parameter \u03b1", |
|
"sec_num": "3.5.1" |
|
}, |
|
{ |
|
"text": "There are two category-word distributions, \u03b8 \u2032 and \u03c6 \u2032 , which are respectively generated from our data collection and google search results; and parameter \u03b2 is utilized to adjust the contribution between these two different resources in category-word level. Larger \u03b2 implies larger likelihood a word is generated from \u03b8 \u2032 . The effects of parameter \u03b2 on Twitter and Sina Weibo are shown in Figure 5 . It is clearly observed that larger values of \u03b2 frequently lead to higher accuracies with different training set sizes, and the accuracy reaches peak value when \u03b2 locates at 0.9. However, when \u03b2 trends to 1, the performance slightly decreases. Empirical optimal value of \u03b2 is within [0.5, 1]. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 391, |
|
"end": 399, |
|
"text": "Figure 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of Parameter \u03b2", |
|
"sec_num": "3.5.2" |
|
}, |
|
{ |
|
"text": "\u03bb indicates the contribution from unlabeled data points, between 0 and 1. When \u03bb is close to 1, knowledge from unlabeled data is considered as important as labeled data. On the other hand, when \u03bb at near-zero value, our model approaches a supervised learning algorithm. The results are illustrated in Figure 6 , from which we observe some insights: (1) varying \u03bb has little impact on average accuracy for a large training set, such as 50 percent as training set, especially for 90 percent as training set;", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 301, |
|
"end": 309, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of Parameter \u03bb", |
|
"sec_num": "3.5.3" |
|
}, |
|
{ |
|
"text": "(2) the best accuracy occurs at \u03bb = 0.4 and \u03bb = 0.3 respectively for Twitter and Sina Weibo, and then drops down quickly, which illustrates unlabeled data could give some feedback to improve classification performance. Empirical optimal value of \u03bb is within [0.3, 0.5].", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Effect of Parameter \u03bb", |
|
"sec_num": "3.5.3" |
|
}, |
|
{ |
|
"text": "The task of topic classification of microblogging messages is to assign the pre-defined class labels to unlabeled messages given a collection of messages. It has been demonstrated to be a fundamental task for many applications, such as query disambiguation (Teevan et al., 2011) , location prediction (Gao et al., 2012) and hot topic tracking (Weng and Lee, 2011) , etc. To the best of our knowledge, our work is the first attempt to utilize semi-supervised learning methods to classify microblogging messages. There are, however, several lines of related work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 257, |
|
"end": 278, |
|
"text": "(Teevan et al., 2011)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 301, |
|
"end": 319, |
|
"text": "(Gao et al., 2012)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 343, |
|
"end": 363, |
|
"text": "(Weng and Lee, 2011)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The significance of topic models has been exploited in microblog clustering and classification. A representative work was proposed in 2010 (Hong and Davison, 2010) , where latent dirichlet allocation (LDA) (Blei et al., 2003) and author-topic model (Rosen-Zvi et al., 2010) were deeply investigated to automatically find hidden topic structures on Twitter. Following that, Zhao et al. (2011) performed content analysis through Twitter-LDA modeling on a Twitter corpus collected within a three month span. Several variants of LDA to incorporate supervision have been proposed by Ramage et al. (2009 Ramage et al. ( , 2010 , and have been shown to be competitive with strong baselines in the microblogging environment. Although these LDA-based topic model greatly save cognitive and physical effort required from user interaction, their performances are usually not very satisfactory. The main reason is due to the sparsity of short informal messages that makes similarity comparison difficult. Different from previous models, we employed a two-step pre-processing: detecting informal words using dictionary and correcting the words into formal ones. This helps to alleviate the negative effects brought by short message sparsity to some extent. Lee et al. (2011) classified tweets into pre-defined categories such as sports, technology, politics, etc. Instead of topic models, they constructed word vectors with tf-idf weights and utilized a Naive Bayesian Multinomial classifier to classify tweets. Further, Support Vector Machines achieved good performance to classify Twitter messages, as reported by Zubiaga et al. (2011) . Sriram et al. (2010) proposed to use a small set of domain-specific features extracted from the author's profile and text to represent short messages. Their method, however, requires extensive pre-processing to conduct effectively feature analysis, which was impractical to as a general solution for classification of microblogging messages. The performance improvement of the supervised methods mainly depend on a large scale of labeled training data, which is laborious and time consuming. Further, the sparsity problem hinders significant performance improvement. To break the current impasse between annotation cost and effectiveness, we proposed to utilize semi-supervised learning methods. We trained a semi-supervised classifier by using the large amount of unlabeled data, together with labeled data. In addition, our work is novel in that we mined the information cues from Google Search Engine and seamlessly fused them with informal microblogging messages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 163, |
|
"text": "(Hong and Davison, 2010)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 225, |
|
"text": "(Blei et al., 2003)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 273, |
|
"text": "(Rosen-Zvi et al., 2010)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 391, |
|
"text": "Zhao et al. (2011)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 578, |
|
"end": 597, |
|
"text": "Ramage et al. (2009", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 598, |
|
"end": 620, |
|
"text": "Ramage et al. ( , 2010", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 1244, |
|
"end": 1261, |
|
"text": "Lee et al. (2011)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1603, |
|
"end": 1624, |
|
"text": "Zubiaga et al. (2011)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 1627, |
|
"end": 1647, |
|
"text": "Sriram et al. (2010)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In this paper, we proposed a novel scheme to classify microblogging messages, which addresses three concerns in microblog classifications. First, the incorporation of external resources to supplement the short microblogs well compensates the data sparseness issue. Second, the semi-supervised classifier seamlessly fuse labeled data structure and external resources into the training process, which reduced the requirement for manually labeling to a certain degree. Third, we model the category probability of a given message based on the category-word distribution, and this successfully avoided the difficulty brought about by the spelling errors that are common in microblogging messages. We proposed a semi-supervised learning approach to classify microblogging messages, and the experimental results demonstrated its effectiveness as compared to existing the state-of-the-art methods, as well as practically extension to large-scale dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "This work suggests some interesting directions for further exploration. It is interesting to explore whether: (1) the incorporation of social network structure can improve the performance of microblogging classification (Hu and Liu, 2012a) ; (2) the use of external resources such as Wikipedia and WordNet might be valuable for understanding microblogging messages; and (3) the provision of category summarization can help to organize microblogging messages.", |
|
"cite_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 239, |
|
"text": "(Hu and Liu, 2012a)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "http://blog.twitter.com/2012/03/twitter-turns-six.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.twittonary.com/ 3 http://twitterforteachers.wetpaint.com/page/Twitter+Dictionary", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://en.wikipedia.org/wiki/Normalized_Google_distance, here in case of N G D not equal to zero, we add a small constant closing to zero.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://trec.nist.gov/data/tweets/ 6 http://www.google.com/trends/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.csie.ntu.edu.tw/~cjlin/libsvm/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research was supported by the National Natural Science Foundation of China (grant number 61170189, 60973105), the National Natural Science Fund for Young Scholar (grant number 61202239), the Research Fund for the Doctoral Program of Higher Education (grant number 20111102130003), and the Fund of the State Key Laboratory of Software Development Environment (grant number SKLSDE-2011ZX-03),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Latent dirichlet allocation", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "993--1022", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Blei, D. M., Ng, A. Y., and Jordan, M. I. (2003). Latent dirichlet allocation. Journal of Machine Learning Research, 3:993-1022.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Support-vector networks", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Cortes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Vapnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Machine Learning", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "273--297", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cortes, C. and Vapnik, V. (1995). Support-vector networks. Machine Learning, 20:273-297.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Trading mips and memory for knowledge engineering", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Creecy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Masand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Waltz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Communication of the ACM", |
|
"volume": "35", |
|
"issue": "", |
|
"pages": "48--64", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Creecy, R. H., Masand, B. M., Smith, S. J., and Waltz, D. L. (1992). Trading mips and memory for knowledge engineering. In Communication of the ACM, volume 35, pages 48-64.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Finding bursty topics from microblogs", |
|
"authors": [ |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Diao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E.-P", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diao, Q., Jiang, J., Zhu, F., and Lim, E.-P. (2012). Finding bursty topics from microblogs. In Proceedings of Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Exploring social-historical ties on location-based social networks", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of International AAAI Conference on Weblogs and Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gao, H., Tang, J., and Liu, H. (2012). Exploring social-historical ties on location-based social networks. In Proceedings of International AAAI Conference on Weblogs and Social Media.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Hidden topic markov model", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gruber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Rosen-Zvi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Weiss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of International Conference on Artificial Intelligence and Statistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gruber, A., Rosen-Zvi, M., and Weiss, Y. (2007). Hidden topic markov model. In Proceedings of International Conference on Artificial Intelligence and Statistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Empirical study of topic modeling in twitter", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Hong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Davison", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of KDD Workshop on Social Media Analytics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hong, L. and Davison, B. D. (2010). Empirical study of topic modeling in twitter. In Proceedings of KDD Workshop on Social Media Analytics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Social status and role analysis of palin's email network", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the international conference companion on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hu, X. and Liu, H. (2012a). Social status and role analysis of palin's email network. In Proceed- ings of the international conference companion on World Wide Web.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Text analytics in social media. Mining Text Data", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "385--414", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hu, X. and Liu, H. (2012b). Text analytics in social media. Mining Text Data, pages 385-414.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Exploiting internal and external semantics for the clustering of short texts using world knowledge", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T.-S", |
|
"middle": [], |
|
"last": "Chua", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the ACM conference on Information and knowledge management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hu, X., Sun, N., Zhang, C., and Chua, T.-S. (2009). Exploiting internal and external semantics for the clustering of short texts using world knowledge. In Proceedings of the ACM conference on Information and knowledge management.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Transductive inference for text classification using support vector machines", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Joachims", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joachims, T. (1999). Transductive inference for text classification using support vector machines. In Proceedings of International Conference on Machine Learning.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Supervised machine learning: A review of classification techniques", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Kotsiantis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Informatica", |
|
"volume": "31", |
|
"issue": "", |
|
"pages": "249--268", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kotsiantis, S. B. (2007). Supervised machine learning: A review of classification techniques. Informatica, 31:249-268.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Twitter trending topic classification", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proceedings of ICDM Workshop on Optimization Based Methods for Emerging Data Mining Problems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Twitter trending topic classification. In Proceedings of ICDM Workshop on Optimization Based Methods for Emerging Data Mining Problems.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Multimedia answering: enriching text qa with media information", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Zha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T.-S", |
|
"middle": [], |
|
"last": "Chua", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of Annual ACM Conference on Special Interest Group on Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nie, L., Wang, M., Zha, Z.-j., Li, G., and Chua, T.-S. (2011). Multimedia answering: enriching text qa with media information. In Proceedings of Annual ACM Conference on Special Interest Group on Information Retrieval.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Text classification from labeled and unlabeled documents using EM", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Nigam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"K" |
|
], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Thrun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Mitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Machine Learning -Special issue on information retrieval", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "103--134", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nigam, K., Mccallum, A. K., Thrun, S., and Mitchell, T. (2000). Text classification from labeled and unlabeled documents using EM. In Machine Learning -Special issue on information retrieval, volume 39, pages 103-134.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Survey of classification techniques in data mining", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Phyu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of International MultiConference of Engineers and Computer Scientists", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Phyu, N. P. (2009). Survey of classification techniques in data mining. In Proceedings of Interna- tional MultiConference of Engineers and Computer Scientists.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Charaterizing microblog with topic models", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Ramage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Dumais", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Liebling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of International AAAI Conference on Weblogs and Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramage, D., Dumais, S., and Liebling, D. (2010). Charaterizing microblog with topic models. In Proceedings of International AAAI Conference on Weblogs and Social Media.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Labeled LDA: a supervised topic model for credit attribution in multi-labeled corpora", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Ramage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Nallapati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of International Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ramage, D., Hall, D., Nallapati, R., and Manning, C. D. (2009). Labeled LDA: a supervised topic model for credit attribution in multi-labeled corpora. In Proceedings of International Conference on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Topical clustering of tweets", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Rosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gershman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Frederking", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of SIGIR Workshop on Social Web Search and Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rosa, K. D., Shah, R., Lin, B., Gershman, A., and Frederking, R. (2011). Topical clustering of tweets. In Proceedings of SIGIR Workshop on Social Web Search and Mining.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Learning author-topic models from text corpora", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Rosen-Zvi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Chemudugunta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Griffiths", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Smyth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Steyvers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "ACM Transactions on Information Systems", |
|
"volume": "28", |
|
"issue": "", |
|
"pages": "1--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rosen-Zvi, M., Chemudugunta, C., Griffiths, T., Smyth, P., and Steyvers, M. (2010). Learning author-topic models from text corpora. ACM Transactions on Information Systems, 28:1-38.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Boosting and Rocchio applied to text filtering", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Schapire", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Singhal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "Proceedings of Annual ACM Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "215--223", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Schapire, R. E., Singer, Y., and Singhal, A. (1998). Boosting and Rocchio applied to text filter- ing. In Proceedings of Annual ACM Conference on Research and Development in Information Retrieval, pages 215-223.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "A systematic analysis of performance measures for classification tasks", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Sokolova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Lapalme", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Information Processing and Management", |
|
"volume": "45", |
|
"issue": "", |
|
"pages": "427--437", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sokolova, M. and Lapalme, G. (2009). A systematic analysis of performance measures for classi- fication tasks. Information Processing and Management, 45:427-437.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Short text classification in twitter to improve information filtering", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Sriram", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Fuhry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Demir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Ferhatosmanoglu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Demirbas", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of Annual ACM Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sriram, B., Fuhry, D., Demir, E., Ferhatosmanoglu, H., and Demirbas, M. (2010). Short text clas- sification in twitter to improve information filtering. In Proceedings of Annual ACM Conference on Research and Development in Information Retrieval.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Enriching short text representation in microblog for clustering", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Frontiers of Computer Science in China", |
|
"volume": "6", |
|
"issue": "1", |
|
"pages": "88--101", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tang, J., Wang, X., Gao, H., Hu, X., and Liu, H. (2012). Enriching short text representation in microblog for clustering. Frontiers of Computer Science in China, 6(1):88-101.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "#twittersearch: a comparison of microblog search and web search", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Teevan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Ramage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Morris", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of ACM Conference on Web Search and Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Teevan, J., Ramage, D., and Morris, M. R. (2011). #twittersearch: a comparison of microblog search and web search. In Proceedings of ACM Conference on Web Search and Data Mining.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Event detection in twitter", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B.-S", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of Association for the Advancement of Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weng, J. and Lee, B.-S. (2011). Event detection in twitter. In Proceedings of Association for the Advancement of Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Feature selection in statistical learning of text categorization", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Pederson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang, Y. and Pederson, J. (1997). Feature selection in statistical learning of text categorization. In Proceedings of International Conference on Machine Learning.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "User-sentiment topic model: refining user's topics with sentiment information", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proceedings of ACM SIGKDD Workshop on Mining Data Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhao, T., Li, C., Ding, Q., and Li, L. (2010). User-sentiment topic model: refining user's top- ics with sentiment information. In Proceedings of ACM SIGKDD Workshop on Mining Data Semantics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Comparing twitter and traditional media using topic models", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"X" |
|
], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Weng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E.-P", |
|
"middle": [], |
|
"last": "Lim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of European Conference on IR Research", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhao, W. X., Jiang, J., Weng, J., He, J., Lim, E.-P., Yan, H., and Li, X. (2011). Comparing twitter and traditional media using topic models. In Proceedings of European Conference on IR Research.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Classfying trending topics: A typology of conversation triggers on twitter", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Zubiaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Spina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V", |
|
"middle": [], |
|
"last": "Fresno", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Martinez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of ACM Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zubiaga, A., Spina, D., Fresno, V., and Martinez, R. (2011). Classfying trending topics: A typology of conversation triggers on twitter. In Proceedings of ACM Conference on Information and Knowledge Management.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "\u03b8", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"text": "Performance sensitivity of training set size on Twitter and Sina Weibo.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"text": "The Performance with varying \u03b1 and training data size when other parameters are fixed.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"text": "The Performance with varying \u03b2 and training data size when other parameters are fixed. The Performance with varying \u03bb and training data size when other parameters are fixed.", |
|
"type_str": "figure", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"text": "For some cases, we also provide the macr o\u2212 and micr o\u2212 values. The micr o\u2212 assigns equal weight to each message, while macr o\u2212 treats each category equally.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td>Twitter</td><td/><td>Sina Weibo</td><td/></tr><tr><td>Total</td><td>16935</td><td>Total</td><td>15811</td></tr><tr><td>Sports</td><td>2720</td><td>Sports</td><td>2602</td></tr><tr><td>Entertainment</td><td>2816</td><td>Movies</td><td>2694</td></tr><tr><td>Business</td><td>2912</td><td>Games</td><td>2605</td></tr><tr><td>Science&Tech</td><td>2827</td><td>Science&Tech</td><td>2647</td></tr><tr><td>Politics</td><td>2937</td><td>Politics</td><td>2654</td></tr><tr><td>Education</td><td>2723</td><td>Music</td><td>2609</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "The distribution of different categories over two datasets.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td/><td>Twitter</td><td/><td/><td/><td>Sina Weibo</td></tr><tr><td>C at eg or y</td><td colspan=\"2\">P r ecision Recal l</td><td>F 1</td><td>C at eg or y</td><td colspan=\"2\">P r ecision Recal l</td><td>F 1</td></tr><tr><td>Sports</td><td>0.9322</td><td colspan=\"2\">0.9483 0.9402</td><td>Sports</td><td>0.9318</td><td>0.8747 0.9023</td></tr><tr><td>Entertainment</td><td>0.9000</td><td colspan=\"2\">0.5625 0.6923</td><td>Movies</td><td>0.8848</td><td>0.8207 0.8515</td></tr><tr><td>Business</td><td>0.8043</td><td colspan=\"2\">0.5323 0.6382</td><td>Games</td><td>0.8090</td><td>0.9283 0.8646</td></tr><tr><td>Science&Tech</td><td>0.6937</td><td colspan=\"2\">0.9801 0.8124</td><td>Science&Tech</td><td>0.8688</td><td>0.8323 0.8502</td></tr><tr><td>Politics</td><td>0.9096</td><td colspan=\"2\">0.9640 0.9360</td><td>Politics</td><td>0.8661</td><td>0.9324 0.8980</td></tr><tr><td>Education</td><td>0.5000</td><td colspan=\"2\">0.5519 0.5165</td><td>Music</td><td>0.8819</td><td>0.8699 0.8759</td></tr><tr><td>Micro-average</td><td>0.7979</td><td colspan=\"2\">0.7979 0.7979</td><td>Micro-average</td><td>0.8798</td><td>0.8798 0.8798</td></tr><tr><td>Macro-average</td><td>0.7934</td><td colspan=\"2\">0.6043 0.6128</td><td>Macro-average</td><td>0.8737</td><td>0.8764 0.8738</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"text": "Performance comparison among SSBN and other supervised baseline methods on twitter with 90% training data.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"2\">C l assi f ier Accur ac y</td><td colspan=\"6\">M ic r oP M ic r oR M ic r oF 1 M ac r oP M ac r oR M ac r oF 1</td></tr><tr><td>SSBN</td><td>0.9020</td><td>0.9020</td><td>0.9020</td><td>0.9020</td><td>0.8976</td><td>0.9045</td><td>0.9004</td></tr><tr><td>SVM</td><td>0.8991</td><td>0.8991</td><td>0.8991</td><td>0.8991</td><td>0.9017</td><td>0.8971</td><td>0.8991</td></tr><tr><td>NB</td><td>0.9015</td><td>0.9015</td><td>0.9015</td><td>0.9015</td><td>0.8990</td><td>0.9024</td><td>0.9003</td></tr><tr><td>KNN</td><td>0.8565</td><td>0.8565</td><td>0.8565</td><td>0.8565</td><td>0.8589</td><td>0.8486</td><td>0.8526</td></tr><tr><td>Rocchio</td><td>0.8802</td><td>0.8803</td><td>0.8802</td><td>0.8802</td><td>0.8769</td><td>0.8832</td><td>0.8781</td></tr><tr><td>L-LDA</td><td>0.8905</td><td>0.8905</td><td>0.8905</td><td>0.8905</td><td>0.8876</td><td>0.8989</td><td>0.8932</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"text": "Performance comparison among SSBN and other supervised baseline methods on SinaWeibo with 90% training data.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
}, |
|
"TABREF7": { |
|
"text": "Performance comparison among SSBN and other semi-supervised baseline methods on Twitter with 5% training data.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"2\">C l assi f ier Accur ac y</td><td colspan=\"6\">M ic r oP M ic r oR M ic r oF 1 M ac r oP M ac r oR M ac r oF 1</td></tr><tr><td>SSBN</td><td>0.8798</td><td>0.8798</td><td>0.8798</td><td>0.8798</td><td>0.8737</td><td>0.8764</td><td>0.8738</td></tr><tr><td>Trans-SVM</td><td>0.8084</td><td>0.8084</td><td>0.8084</td><td>0.8084</td><td>0.8049</td><td>0.8085</td><td>0.8052</td></tr><tr><td>Semi-NB</td><td>0.8198</td><td>0.8198</td><td>0.8198</td><td>0.8198</td><td>0.8225</td><td>0.8217</td><td>0.8204</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF8": { |
|
"text": "Performance comparison among SSBN and other semi-supervised baseline methods on Sina Weibo with 5% training data.", |
|
"type_str": "table", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |