|
{ |
|
"paper_id": "C10-1034", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:56:32.951439Z" |
|
}, |
|
"title": "An Empirical Study on Learning to Rank of Tweets", |
|
"authors": [ |
|
{ |
|
"first": "Yajuan", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Science", |
|
"location": { |
|
"country": "Technology of China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Research Asia", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Research Asia", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Microsoft Research Asia", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Heung-Yeung", |
|
"middle": [], |
|
"last": "Shum", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Twitter, as one of the most popular micro-blogging services, provides large quantities of fresh information including real-time news, comments, conversation, pointless babble and advertisements. Twitter presents tweets in chronological order. Recently, Twitter introduced a new ranking strategy that considers popularity of tweets in terms of number of retweets. This ranking method, however, has not taken into account content relevance or the twitter account. Therefore a large amount of pointless tweets inevitably flood the relevant tweets. This paper proposes a new ranking strategy which uses not only the content relevance of a tweet, but also the account authority and tweet-specific features such as whether a URL link is included in the tweet. We employ learning to rank algorithms to determine the best set of features with a series of experiments. It is demonstrated that whether a tweet contains URL or not, length of tweet and account authority are the best conjunction. 1", |
|
"pdf_parse": { |
|
"paper_id": "C10-1034", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Twitter, as one of the most popular micro-blogging services, provides large quantities of fresh information including real-time news, comments, conversation, pointless babble and advertisements. Twitter presents tweets in chronological order. Recently, Twitter introduced a new ranking strategy that considers popularity of tweets in terms of number of retweets. This ranking method, however, has not taken into account content relevance or the twitter account. Therefore a large amount of pointless tweets inevitably flood the relevant tweets. This paper proposes a new ranking strategy which uses not only the content relevance of a tweet, but also the account authority and tweet-specific features such as whether a URL link is included in the tweet. We employ learning to rank algorithms to determine the best set of features with a series of experiments. It is demonstrated that whether a tweet contains URL or not, length of tweet and account authority are the best conjunction. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Twitter provides a platform to allow users to post text messages known as tweets to update their followers with their findings, thinking and comments on some topics (Java et al., 2007) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 184, |
|
"text": "(Java et al., 2007)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The searched tweets are presented by Twitter in chronological order except the first three, which are ranked by considering popularity of tweets in terms of the number of retweets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This ranking method, however, has not taken into account the content relevance and twitter account; inevitably, a large amount of pointless tweets (Pear Analytics, 2009) may flood the relevant tweets. Although this ranking method can provide fresh information to tweet users, users frequently expect to search relevant tweets to the search queries. For example, consider someone researching consumer responses toward the iPad. He or she would like to find tweets with appropriate comments such as iPad is great or you can find many useful features of iPad, rather than tweets with irrelevant comment, even if they are most recent or popular.", |
|
"cite_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 169, |
|
"text": "(Pear Analytics, 2009)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Moreover, neither Twitter's current chronological order based ranking nor the recently introduced popularity based ranking can avoid spam. A developer can accumulate hundreds of thousands of followers in a day or so. At the same time, it is not difficult for spammers to create large quantities of retweets. By contrast, content relevance ranking can effectively prevent spammers from cheating. Different from ranking tweets through chronological order and popularity, a content relevance strategy considers many characteristics of a tweet to determine its ranking level. Thus it is difficult for spammers to break the ranking system by simple methods such as increasing retweet count or number of followers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose a method to rank the tweets which outputs the matched tweets based on their content relevance to the query. We investigate the effects of content features and non-content features and produce a ranking system by a learning to rank approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "With a series of experiments, we determined the best set of features and analyzed the effects of each of individual feature. We provide empirical evidence supporting the following claims, \uf0b7 Account authority, length of tweet and whether a tweet contains a URL are the top three effective features for tweet ranking, where containing a URL is the most effective feature. \uf0b7 We find an effective representation of account authority: the number of times the author was listed by other users. We find through experiments that this representation is better than the widely adopted number of followers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "At present, a number of web sites offer the so-called real-time search service which mainly returns real-time posts or shared links, videos and images obtained from micro-blogging systems or other medium according to the user's query. We investigate the ranking method used by these web sites. From their self-introduction page, we find four main criteria for ranking real-time posts. They are posting time, account authority, topic popularity and content relevance. Specifically, Twitter maintains a specialized search engine which ranks tweets according to posting time and topic popularity. In addition, Google, Twazzup 2 and Chirrps 3 rank real-time tweets by posting time. While the last one also ranks tweets by popularity, which is measured by retweet count.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Real-time Search", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Tweefind 4 ranks search result according to authority of authors which depends on how popular, relevant, and active the author is. Additionally, Twitority 5 rank tweets by author authority as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Real-time Search", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Bing and CrowdEye 6 rank tweets by posting time or content relevance. Bing takes authors authority, retweet count and freshness into consideration while measuring the relevance. To determine the relevance of a tweet, CrowdEye considers a number of factors including content relevance and author influence which appears to rely heavily on the number of followers an author has. It turns out that the number of followers is not a very reasonable measure of the influence of an account according to our experimental results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Real-time Search", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Besides tweet search, recently some researchers have focused on twitter recommendation system. Chen et al. (2010) presented an approach to recommend URLs on Twitter as a means to better direct user attention in information streams. They designed the recommender taking three separate dimensions into consideration: content source, topic interest and social voting.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 113, |
|
"text": "Chen et al. (2010)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Twitter Recommendation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Sun et al. 2009proposed a diffusion-based micro-blogging recommendation framework aiming to recommend micro-blogs during critical events via optimizing story coverage, reading effort and delay time of a story. The key point of this method is to construct an exact diffusion graph for micro-blogging, which is difficult due to the presence of extensive irrelevant personal messages and spam.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Twitter Recommendation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Another related topic is blog search and forum search. Recently, many approaches for blog search and forum search have been developed, which include learning to rank methods and link-based method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Blog Search and Forum Search", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Learning to rank approach Xi et al. (2004) used features from the thread trees of forums, authors, and lexical distribution within a message thread and then applied Linear Regression and Support Vector Machine (SVM) to train the ranking function. Fujimura et al. (2005) exploited provisioning link and evaluation link between bloggers and blog entries, and scored each blog entry by weighting the hub and authority scores of the bloggers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 42, |
|
"text": "(2004)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 269, |
|
"text": "Fujimura et al. (2005)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Blog Search and Forum Search", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Link-Based approach Kritikopoulos et al. (2006) introduced similarities among bloggers and blogs into blog ranking. This method enabled the assignment of a higher score to the blog entry published by a blogger who has already accepted a lot of attention. Xu and Ma (2006) built a topic hierarchy structure through content similarity. Liu et al. (2007) presented a newsgroup structure-based approach PostRank which built posting trees according to response relationship between postings.", |
|
"cite_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 47, |
|
"text": "Kritikopoulos et al. (2006)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 271, |
|
"text": "Xu and Ma (2006)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 351, |
|
"text": "Liu et al. (2007)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Blog Search and Forum Search", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Chen et al. 2008proposed a posting rank algorithm which built link graphs according to co-replier relationships. This kind of method exploits different types of structures among postings and improved the performance of traditional link-based ranking algorithm for forum search. However, it is difficult to rank postings which only have a few words simply based on content by using FGRank algorithm. And PostingRank approach relies too much on reply relations which are more likely to suffer from topic excursion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Blog Search and Forum Search", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Although approaches proposed above perform effectively in forum search and blog search, they are not appropriate for twitter search because tweets are usually shorter and more informal than blogs. Furthermore, it does not have the explicit hierarchy structure of newsgroup messages on forums. In addition, tweets possess many particular characteristics that blog and forum do not have.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Blog Search and Forum Search", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "To generate a good ranking function which provides relevant search results and prevents spammers' cheating activities, we analyze both content features and authority features of tweets and determine effective features. We adopt learning to rank algorithms which have demonstrated excellent power in addressing various ranking problems of search engines.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of Our Approach", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Learning to Rank is a data-driven approach which integrates a bag of features in the model effectively. Figure 1 shows the paradigm of learning for tweet ranking.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 112, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learning to Rank Framework", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "At the first step, we prepare the training and test corpus as described in Section 5. Then we extract features from the training corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning to Rank Framework", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "RankSVM algorithm (Joachims Thorsten, 1999) is used to train a ranking model from the training corpus. Finally, the model is evaluated by the test corpus. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 43, |
|
"text": "Thorsten, 1999)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning to Rank Framework", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "One of the most important tasks of a learning to rank system is the selection of a feature set. We exploit three types of features for tweet ranking. 1) Content relevance features refer to those features which describe the content relevance between queries and tweets. 2) Twitter specific features refer to those features which represent the particular characteristics of tweets, such as retweet count and URLs shared in tweet. 3) Account authority features refer to those features which represent the influence of authors of the tweets in Twitter (Leavitt et al., 2009 ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 548, |
|
"end": 569, |
|
"text": "(Leavitt et al., 2009", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features for Tweets Ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In the next section, we will describe these three types of features in detail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Features for Tweets Ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We used three content relevance features, Okapi BM25 (Robertson et al., 1998) , similarity of contents and length of tweet.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 77, |
|
"text": "(Robertson et al., 1998)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Relevance Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Okapi BM25 score measures the content relevance between query Q and tweet T. The standard BM25 weighting function is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Relevance Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Relevance Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where Length(T) denotes the length of T and represents average length of tweet in corpus. IDF( ) is Inverse Document Frequency.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Relevance Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Similarity of contents estimates the popularity of documents in the corpus (Song et al., 2008) . In our case, it measures how many tweets of the query are similar in content with the current tweet. We calculate a cosine similarity score for every pair of tweets, and the final similarity score for tweet in is computed by the following formula:", |
|
"cite_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 94, |
|
"text": "(Song et al., 2008)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Relevance Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Content Relevance Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "represents the TFIDF vector of and refers to tweets collection of query . Length is measured by the number of words that a tweet contains. Intuitively, a long sentence is apt to contain more information than a short one. We use length of tweet as a measure of the information richness of a tweet.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Where", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Tweets have many special characteristics. We exploit these characteristics and extract six twitter specific features as listed in Table 1 . Figure 2 contains URL http://myloc.me/43tPj which leads to a map indicating where the publisher located.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 137, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 148, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Twitter's Specific Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "URL is a binary feature. It is assigned 1 when a tweet contains at least one URL, otherwise 0. URL Count estimates the number of times that the URL appears in the tweet corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Twitter's Specific Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Retweet Count: Twitter users can forward a tweet to his or her followers with or without modification on the forwarded tweets, which is called retweet on Twitter. A retweeted tweet usually includes an RT tag. Generally, sentences before RT are comments of the retweeter and sentences after RT are the original content, perhaps with some modifications. Here we only consider tweets including RT with the original content unmodified. Retweet count is defined as the number of times a tweet is retweeted. In Figure 2 , original tweet Satu-slank #nowplaying !! http://myloc.me/43tPj is retweeted once.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 505, |
|
"end": 514, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Twitter's Specific Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Hash Tag Score: Publishers are allowed to insert hash tags into their tweets to indicate the topic. In Figure 2 , #nowplaying is a hash tag. We collect hash tags appearing in the tweets of every query and sort them in descending order according to frequency. Tag frequency for tweet of query is computed from normalized frequency of top-n tags.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 111, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Twitter's Specific Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "(3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Twitter's Specific Features", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "is the normalization factor. represents the frequent of in corpus. And denotes the tag collection extracted from . Reply: This is a binary feature. It is 1 when the tweet is a reply and 0 otherwise. A tweet starting with a twitter account is regarded as a reply tweet in our experiment. Figure 3 shows an example.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 287, |
|
"end": 295, |
|
"text": "Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Where", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "OOV: This feature is used to roughly approximate the language quality of tweets. Words out of vocabulary in Twitter include spelling errors and named entities. According to a small-scale investigation, spelling errors account for more than 90% of OOVs excluding capitalized words, tags, mentions of users and URLs. We use a dictionary with 0.5 million entries to compute the ratio of OOVs in a tweet.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 3. Reply Tweet", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Figure 3. Reply Tweet", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There are three important relations between users in Twitter: follow, retweet, and mention. Additionally, users are allowed to classify their followings into several lists based on topics. We measured the influence of users' authorities on tweets based on the following assumptions: \uf0b7 Users who have more followers and have been mentioned in more tweets, listed in more lists and retweeted by more important users are thought to be more authoritative. \uf0b7 A tweet is more likely to be an informative tweet rather than pointless babble if it is posted or retweeted by authoritative users. In order to distinguish the effect of the three relations, we computed four scores for each user representing the authority independently. \uf0b7 Follower Score: number of followers a user has. \uf0b7 Mention Score: number of times a user is referred to in tweets. \uf0b7 List Score: number of lists a user appears in. \uf0b7 Popularity Score: computed by PageRank algorithm (Page et al., 1999) based on retweet relations. Following the retweet relationship among users, we construct a directed graph G (V, E). In our experiments, G is built from a tweet collection including about 1.1 million tweets. V denotes twitter users that appear in training examples. E is a set of directed edges. If author published the tweet , and author retweeted after , there exists an edge from to . We call original author and retweeter. Figure 4 shows the PageRank algorithm for calculating popularity scores for twitter users. In our experiment, damping factor e was set to 0.8. Like Dong et al. (2010) did, we define three subtypes for each account authority score. Table 2 presents features of account authority we use. The highest follower score of the user who published or retweeted the tweet Important_ popularity", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 1387, |
|
"end": 1395, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 1618, |
|
"end": 1625, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Account Authority Features", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The highest popularity score of the user who published or retweeted the tweet Important_ mention", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Account Authority Features", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The highest mention score of the user who published or retweeted the tweet Important_l ist", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Account Authority Features", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The highest list score of the user who published or retweeted the tweet Table 2 . Account Authority Features for tweet", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 79, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Account Authority Features", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We introduce the data we used in experiment and the evaluation metrics in this section.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiment Data and Evaluation", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We analyze 140 hot searches on CrowdEye within a week. They consist of big events, PageRank algorithm for calculating popularity score for users. Input: Directed Graph G of retweet relationship Damping factor e. Output: popularity score for each user Procedure:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Step 1: popularity score of all users are initialized as .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Step 2: update the popularity score for users. denotes the collection of users who retweeted 's tweet.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "is the number of times has been retweeted by .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "is the number of users whose tweets has retweeted.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Step 3: Repeat the second step until all popularity scores will never change. famous persons, new products, festivals, movies and so on. The most frequent types of hot searches, which account for more than 81% of all hot searches, are as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "\uf0b7 News: news about public figures and news related to some places. \uf0b7 Products: character description, promotion information and comments about products. \uf0b7 Entertainment: mainly about movies, including film reviews and introductions about plots. We select 20 query terms as shown in Table 3 Table 3 . 20 Query Terms", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 282, |
|
"end": 289, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 290, |
|
"end": 297, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Retweets are forwardings of corresponding original tweets, sometimes with comments of retweeters. They are supposed to contain no more information than the original tweets, therefore they drops out of ranking in this paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We sample 500 tweets for each query from its original tweets collection and ask a human editor to label them with a relevance grade. In order to ensure the annotation is reasonable, we set multiple search intentions for each query referring to the topics arising in the tweets about the query in the corpus. Specifically, for Locations, tweets describing news related to the location are relevant. For people, what they have done and the comments about them are regarded as relevant information. For products, tweets including feature description, promotion and comments are considered relevant. And for movies, tweets about comment on the movies, show time and tickets information are preferred. We apply four judgment grades on query-tweet pairs: excellent, good, fair and bad. According to the statistics, about half of the tweets in the experiment data are labeled as bad. Table 4 presents the distribution for all grades. Table 4 . Tweet Distribution of Each Grade", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 877, |
|
"end": 884, |
|
"text": "Table 4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 927, |
|
"end": 934, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "There are several metrics that are often used to measure the quality of rankings. In this paper, we use Normalized Discount Cumulative Gain (NDCG) which can handle multiple levels of relevance as the evaluation metrics (Jarvelin and Kekalainen, 2002).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Metrics", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Five-fold cross-validation was used in our experiments. We choose tweets of sixteen queries (four from each query type) as the training data. The remaining tweets are divided into evaluation data and validation data equally.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We learn a ranking model by using a RankSVM algorithm based on all features we extracted, which is denoted as RankSVM_Full. In the experiment, a toolkit named svm struct 7 implemented by Thorsten Joachims is used. Figure 5 shows the comparison between our method which integrates three types of features and ranking through chronological order, account authority, and content relevance individually.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 222, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learning to Rank for Tweet Ranking", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "In this experiment, Content Relevance is measured by BM25 score. And Account Authority is approximated by the number of followers of the user. Figure 5 illustrates that ranking through content relevance is not as effective as other methods. This is because our work is essentially re-ranking on the result of Twitter Search. Hence almost all tweets include the query term which makes it difficult to distinguish them by BM25 score. Figure 5 also reveals that account authority is useful for ranking tweet relevance; it outperforms ranking through chronological order and is competitive to our model trained from all features. This agrees with the assumption we made about the influence of user authorities on tweets. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 151, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
}, |
|
{ |
|
"start": 432, |
|
"end": 440, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learning to Rank for Tweet Ranking", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "As the RankSVM_Full underperforms against some models trained from subsets of features, we use an advanced greedy feature selection method and find the best feature conjunction to improve the performance of RankSVM_full. Figure 6 shows the feature selection approach.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 229, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Selection", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Although greedy feature selection approach is commonly used in many problems, it does not work efficiently in addressing this problem partly for data sparseness. It is always blocked by a local optimum feature set. In order to resolve this problem, we first generate several feature sets randomly and run the greedy selection algorithm based the best one among them. Finally, we find the best feature conjunction composed by URL, Sum_mention, First_List, Length, and Important_follower, from which a model is learnt denoted as RankSVM_Best. Figure 7 illustrates that this model outperforms RankSVM_Full by about 15.3% on NDCG@10. We conduct a paired t-test between RankSVM_Best and each of other four ranking methods on NDCG@10 of ten test queries. The results demonstrate that RankSVM_Best outperforms ranking through time, account authority and content relevance respectively with a significance level of 0.01, and RankSVM_Full with a level of 0.05.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 541, |
|
"end": 549, |
|
"text": "Figure 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Selection", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We are interested in which features in particular are highly valued by our model for tweet ranking. We evaluate the importance of each feature by the decrement of performance when removing the feature measured from RankSVM_Best. Figure 8 reveals the importance of each feature in our model. An advanced greedy feature selection algorithm. Input: All features we extracted. Output: the best feature conjunction BFC Procedure:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 237, |
|
"text": "Figure 8", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Analysis", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Step1: Randomly generate 80 feature set F.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Analysis", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Step 2: Evaluate every feature set in F and select the best one denoted by RBF. Features excluded those in RBF are denoted as EX_RBF", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Analysis", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Step 3: t = 0,BFC(t)=RBF;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Analysis", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Repeat Foreach feature in EX_RBF If Evaluation(BFC) < Evaluation(BFC, feature) BFC(t+1) = {BFC(t), feature} EX_RBF(t+1) = EX_RBF(t) -{feature} While BFC(t+1) \u2260 BFC(t) Note: Evaluation(BFC) refers to the performance of ranking function trained from features in BFC on validation data. We observe from Figure 8 that URL is very important for our model; without it the performance declines seriously (with a significance level of 0.001). The reason may be that URLs shared in tweets, which provide more detailed information beyond the tweet's 140 characters, may be relevant to the query at a high probability.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 300, |
|
"end": 308, |
|
"text": "Figure 8", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Feature Analysis", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Another useful feature is the number of lists that the author of the tweet has been listed in. The performance of ranking decreases with a significance level of 0.05 when removing it from the best feature combination. However, other features do not show significant contribution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Feature Analysis", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Our experiment in section 6.2 demonstrates that features such as Hash tag Score and Retweet Count are not as effective as expected. This may be due to the small size of training data. We present an approach to learn an effective tweets ranker in a small dataset through feature selection. However, 20 queries are not sufficient to train a powerful ranker for Twitter.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In this study, to minimize the annotation effort, for each test query, we only annotate the tweets containing the query (returned by Twitter Search) and then used them for evaluation. With this kind of evaluation, it is hard to completely evaluate the significance of some features, such as content relevance features. In the future, we will select more queries including both hot searches and long tail searches, and select tweets for annotation directly from the twitter firehose.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "There is also an opportunity for more accurate retweet relation detection in our work. At present, we just identify the retweet whose original tweet has not been modified, which leaves out a fair amount of retweet information. We would need to develop a more precise retweet relation detection method.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In this paper, we study three types of tweet features and propose a tweet ranking strategy by applying learning to rank algorithm. We find a set of most effective features for tweet ranking. The results of experiments demonstrate that the system using Sum_mention, First_list, Important_follower, length and URL performs best. In particular, whether a tweet contains a URL is the most effective feature. Additionally, we find in the experiments that the number of times the account is listed by other users is an effective representation of account authority and performs better than the number of followers that is widely used in previous work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "There are many aspects we would like to explore in the future. First, this research is based on the search results returned from Twitter which contains the input query. The tweets not containing the queries are not returned. We will explore query expansion approaches to improve the recall of the search results. We did not consider spam issues in the ranking process. However, spam filtering is important to all types of search engines. We will explore the impacts of spam and work out a spam filtering approach.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Twazzup: http://www.twazzup.com/ 3 Chirrps: http://chirrps.com/ 4 Tweefind: http://www.tweefind.com/ 5 Twitority: http://www.twitority.com/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "CrowdEye: http://www.crowdeye.com/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "SVM struct : http://svmlight.joachims.org/svm_struct.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Short and Tweet: Experiments on Recommending Content from Information Streams", |
|
"authors": [ |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Jilin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rowan", |
|
"middle": [], |
|
"last": "Nairn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Les", |
|
"middle": [], |
|
"last": "Nelson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Bernstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ed", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Chi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "the Proceedings of the 28th International conference on Human Factors in Computing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1185--1194", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen Jilin, Rowan Nairn, Les Nelson, Michael Bernstein, and Ed H. Chi. 2010. Short and Tweet: Experiments on Recommending Content from Information Streams. In the Proceedings of the 28th International conference on Human Factors in Computing Systems, Pages: 1185-1194.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "PostingRank: Bringing Order to Web Forum Postings", |
|
"authors": [ |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Zhi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weihua", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "the proceedings of the 4th Asia Information Retrieval Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "377--384", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen Zhi, Li Zhang, Weihua Wang. 2008. PostingRank: Bringing Order to Web Forum Postings. In the proceedings of the 4th Asia Information Retrieval Symposium, Pages: 377-384.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Time of the essence: improving recency ranking using Twitter data", |
|
"authors": [ |
|
{ |
|
"first": "Dong", |
|
"middle": [], |
|
"last": "Anlei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruiqiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranam", |
|
"middle": [], |
|
"last": "Kolari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Diaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhaohui", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongyuan", |
|
"middle": [], |
|
"last": "Zha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "the proceedings of the 19 th International Conference on World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "331--340", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dong Anlei, Ruiqiang Zhang, Pranam Kolari, Jing Bai, Fernando Diaz, Yi Chang, Zhaohui Zheng, and Hongyuan Zha. 2010. Time of the essence: improving recency ranking using Twitter data. In the proceedings of the 19 th International Conference on World Wide Web, Pages: 331-340.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "The EigenRumor Algorithm for Ranking Blogs", |
|
"authors": [ |
|
{ |
|
"first": "Fujimura", |
|
"middle": [], |
|
"last": "Ko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Takafumi", |
|
"middle": [], |
|
"last": "Inoue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Masayuki", |
|
"middle": [], |
|
"last": "Sugisaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "the proceedings of the 2 nd Annual Workshop on the Weblogging Ecosystem: Aggregation, Analysis and Dynamics, World Wide Web", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fujimura Ko, Takafumi Inoue, and Masayuki Sugisaki. 2005. The EigenRumor Algorithm for Ranking Blogs. In the proceedings of the 2 nd Annual Workshop on the Weblogging Ecosystem: Aggregation, Analysis and Dynamics, World Wide Web.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Cumulated gain-based evaluation of IR techniques", |
|
"authors": [ |
|
{ |
|
"first": "Jarvelin", |
|
"middle": [], |
|
"last": "Kalervo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaana", |
|
"middle": [], |
|
"last": "Kekalainen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACM Transactions on Information Systems", |
|
"volume": "20", |
|
"issue": "", |
|
"pages": "422--446", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jarvelin Kalervo, and Jaana Kekalainen. 2002. Cumulated gain-based evaluation of IR techniques. ACM Transactions on Information Systems, Volume 20, Pages: 422-446.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Why we twitter: Understanding Microblogging Usage and Communities", |
|
"authors": [ |
|
{ |
|
"first": "Java", |
|
"middle": [], |
|
"last": "Akshay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Finin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Belle", |
|
"middle": [], |
|
"last": "Tseng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "the proceedings of the 9 th International Workshop on Knowledge Discovery on the Web and the 1st International Workshop on Social Networks Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "118--138", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Java Akshay, Xiaodan Song, Tim Finin, and Belle Tseng. 2007. Why we twitter: Understanding Microblogging Usage and Communities. In the proceedings of the 9 th International Workshop on Knowledge Discovery on the Web and the 1st International Workshop on Social Networks Analysis. Pages: 118-138.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Making Large-Scale SVM Learning Practical", |
|
"authors": [ |
|
{ |
|
"first": "Joachims", |
|
"middle": [], |
|
"last": "Thorsten", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Advances in Kernel Methods: Support Vector Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "169--184", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joachims Thorsten. 1999. Making Large-Scale SVM Learning Practical. Advances in Kernel Methods: Support Vector Learning, Pages: 169-184.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Pear Analytics", |
|
"authors": [], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pear Analytics. 2009. Twitter Study-August 2009.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "BlogRank: Ranking Weblogs Based on Connectivity and Similarity Features", |
|
"authors": [ |
|
{ |
|
"first": "Kritikopoulos", |
|
"middle": [], |
|
"last": "Apostolos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Sideri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iraklis", |
|
"middle": [], |
|
"last": "Varlamis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "the proceedings of the 2 nd International Workshop on Advanced Architectures and Algorithms for Internet Delivery and Applications", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kritikopoulos Apostolos, Martha Sideri, and Iraklis Varlamis. 2006. BlogRank: Ranking Weblogs Based on Connectivity and Similarity Features. In the proceedings of the 2 nd International Workshop on Advanced Architectures and Algorithms for Internet Delivery and Applications.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The Influentials: New Approaches for Analyzing Influence on Twitter. A publication of the Web Ecology Project", |
|
"authors": [ |
|
{ |
|
"first": "Evan", |
|
"middle": [], |
|
"last": "Leavitt Alex", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Burchard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Fisher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gilbert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leavitt Alex, Evan Burchard, David Fisher, and Sam Gilbert. 2009. The Influentials: New Approaches for Analyzing Influence on Twitter. A publication of the Web Ecology Project.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A Link-Based Rank of Postings in Newsgroup", |
|
"authors": [ |
|
{ |
|
"first": "Liu", |
|
"middle": [], |
|
"last": "Hongbo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiahai", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiaxin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "the proceedings of the 5 th International Conference on Machine Learning and Data Mining in Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "392--403", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liu Hongbo, Jiahai Yang, Jiaxin Wang, Yu Zhang. 2007. A Link-Based Rank of Postings in Newsgroup. In the proceedings of the 5 th International Conference on Machine Learning and Data Mining in Pattern Recognition, Pages: 392-403.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The PageRank Citation Ranking: Bring Order to the Web", |
|
"authors": [ |
|
{ |
|
"first": "Page", |
|
"middle": [], |
|
"last": "Lawrence", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Brin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajeev", |
|
"middle": [], |
|
"last": "Motwani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Terry", |
|
"middle": [], |
|
"last": "Winograd", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Page Lawrence, Sergey Brin, Rajeev Motwani, and Terry Winograd. 1999. The PageRank Citation Ranking: Bring Order to the Web. Technical report, Stanford University.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Okapi at TREC-7: Automatic Ad Hoc, Filtering, VLC and Interactive", |
|
"authors": [ |
|
{ |
|
"first": "Robertson", |
|
"middle": [], |
|
"last": "Stephen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Micheline", |
|
"middle": [], |
|
"last": "Hancock-Beaulieu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "the Proceedings of the 7 th Text Retrieval Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "199--210", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robertson Stephen E., Steve Walker, and Micheline Hancock-Beaulieu. 1998. Okapi at TREC-7: Automatic Ad Hoc, Filtering, VLC and Interactive. In the Proceedings of the 7 th Text Retrieval Conference. Pages: 199-210", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Question Utility: A Novel Static Ranking of Question Search", |
|
"authors": [ |
|
{ |
|
"first": "Song", |
|
"middle": [], |
|
"last": "Young-In", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunbo", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hae-Chang", |
|
"middle": [], |
|
"last": "Rim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "the Proceedings of the 23 rd AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1231--1236", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Song Young-In, Chin-Yew Lin, Yunbo Cao, and Hae-Chang Rim. 2008. Question Utility: A Novel Static Ranking of Question Search. In the Proceedings of the 23 rd AAAI Conference on Artificial Intelligence. Pages: 1231-1236", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A Novel Recommendation Framework for Micro-blogging based on Information Diffusion", |
|
"authors": [ |
|
{ |
|
"first": "Sun", |
|
"middle": [], |
|
"last": "Aaron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiesi", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Zeng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "the proceedings of the 19 th Workshop on Information Technologies and Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sun Aaron R., Jiesi Cheng, and Daniel D. Zeng. 2009. A Novel Recommendation Framework for Micro-blogging based on Information Diffusion. In the proceedings of the 19 th Workshop on Information Technologies and Systems.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Learning effective ranking functions for newsgroup search", |
|
"authors": [ |
|
{ |
|
"first": "Xi", |
|
"middle": [], |
|
"last": "Wensi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jesper", |
|
"middle": [], |
|
"last": "Lind", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Brill", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "the proceedings of the 27 th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "394--401", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xi Wensi, Jesper Lind, and Eric Brill. 2004. Learning effective ranking functions for newsgroup search. In the proceedings of the 27 th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, Pages: 394-401", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Building Implicit Links from Content for Forum Search", |
|
"authors": [ |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ma", |
|
"middle": [], |
|
"last": "Wei-Ying", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "the proceedings of the 29 th International ACM SIGIR Conference on Research and Development in Information Retrieval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "300--307", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xu Gu, and Ma Wei-Ying. 2006. Building Implicit Links from Content for Forum Search. In the proceedings of the 29 th International ACM SIGIR Conference on Research and Development in Information Retrieval. Pages: 300-307.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "General Paradigm of Learning for Tweets Ranking" |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "PageRank Algorithm for Calculating Popularity Score for Users" |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Performance of Four Ranking Methods" |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Figure 6. Advanced Greedy Feature Selection Algorithm" |
|
}, |
|
"FIGREF5": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Importance of Each Feature" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Twitter Specific Features</td></tr><tr><td>Figure 2. A Tweet Example</td></tr><tr><td>URL & URL Count: Twitter allows users to</td></tr><tr><td>include URL as a supplement in their tweets.</td></tr><tr><td>The tweet in</td></tr></table>", |
|
"text": "" |
|
} |
|
} |
|
} |
|
} |