|
{ |
|
"paper_id": "D13-1016", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T16:41:44.074376Z" |
|
}, |
|
"title": "Learning Latent Word Representations for Domain Adaptation using Supervised Word Clustering", |
|
"authors": [ |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Temple University Philadelphia", |
|
"location": { |
|
"postCode": "19122", |
|
"region": "PA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Feipeng", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Temple University Philadelphia", |
|
"location": { |
|
"postCode": "19122", |
|
"region": "PA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yuhong", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Temple University Philadelphia", |
|
"location": { |
|
"postCode": "19122", |
|
"region": "PA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Domain adaptation has been popularly studied on exploiting labeled information from a source domain to learn a prediction model in a target domain. In this paper, we develop a novel representation learning approach to address domain adaptation for text classification with automatically induced discriminative latent features, which are generalizable across domains while informative to the prediction task. Specifically, we propose a hierarchical multinomial Naive Bayes model with latent variables to conduct supervised word clustering on labeled documents from both source and target domains, and then use the produced cluster distribution of each word as its latent feature representation for domain adaptation. We train this latent graphical model using a simple expectation-maximization (EM) algorithm. We empirically evaluate the proposed method with both cross-domain document categorization tasks on Reuters-21578 dataset and cross-domain sentiment classification tasks on Amazon product review dataset. The experimental results demonstrate that our proposed approach achieves superior performance compared with alternative methods.", |
|
"pdf_parse": { |
|
"paper_id": "D13-1016", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Domain adaptation has been popularly studied on exploiting labeled information from a source domain to learn a prediction model in a target domain. In this paper, we develop a novel representation learning approach to address domain adaptation for text classification with automatically induced discriminative latent features, which are generalizable across domains while informative to the prediction task. Specifically, we propose a hierarchical multinomial Naive Bayes model with latent variables to conduct supervised word clustering on labeled documents from both source and target domains, and then use the produced cluster distribution of each word as its latent feature representation for domain adaptation. We train this latent graphical model using a simple expectation-maximization (EM) algorithm. We empirically evaluate the proposed method with both cross-domain document categorization tasks on Reuters-21578 dataset and cross-domain sentiment classification tasks on Amazon product review dataset. The experimental results demonstrate that our proposed approach achieves superior performance compared with alternative methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Supervised prediction models typically require a large amount of labeled data for training. However, manually collecting data annotations is expensive in many real-world applications such as document categorization or sentiment classification. Recently, domain adaptation has been proposed to exploit existing labeled data in a related source domain to assist the prediction model training in the target domain (Ben-David et al., 2006; Daum\u00e9 III, 2007; Chen et al., 2012) . As an effective tool to reduce annotation effort, domain adaptation has achieved success in various crossdomain natural language processing (NLP) systems such as document categorization (Dai et al., 2007) , sentiment classification (Blitzer et al., 2007; Chen et al., 2012; Mejova and Srinivasan, 2012; Chen et al., 2011) , email spam detection (Jiang and Zhai, 2007) , and a number of other NLP tasks Daum\u00e9 III, 2007) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 411, |
|
"end": 435, |
|
"text": "(Ben-David et al., 2006;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 436, |
|
"end": 452, |
|
"text": "Daum\u00e9 III, 2007;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 453, |
|
"end": 471, |
|
"text": "Chen et al., 2012)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 660, |
|
"end": 678, |
|
"text": "(Dai et al., 2007)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 706, |
|
"end": 728, |
|
"text": "(Blitzer et al., 2007;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 729, |
|
"end": 747, |
|
"text": "Chen et al., 2012;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 748, |
|
"end": 776, |
|
"text": "Mejova and Srinivasan, 2012;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 777, |
|
"end": 795, |
|
"text": "Chen et al., 2011)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 819, |
|
"end": 841, |
|
"text": "(Jiang and Zhai, 2007)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 876, |
|
"end": 892, |
|
"text": "Daum\u00e9 III, 2007)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "One primary challenge of domain adaptation lies in the distribution divergence of the two domains in the original feature representation space. For example, documents about books may contain very different high-frequency words and discriminative words from documents about kitchen. A good crossdomain feature representation thus has been viewed as critical for bridging the domain divergence gap and facilitating domain adaptation in the NLP area (Ben-David et al., 2006 . Many domain adaptation works have been proposed to learn new cross-domain feature representations . Though demonstrated good performance on certain problems, these works mostly induce new feature representations in an unsupervised way, without taking the valuable label information into account.", |
|
"cite_spans": [ |
|
{ |
|
"start": 447, |
|
"end": 470, |
|
"text": "(Ben-David et al., 2006", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we present a novel supervised representation learning approach to discover a latent representation of words which is not only generalizable across domains but also informative to the classification task. Specifically, we propose a hier-archical multinomial Naive Bayes model with latent word cluster variables to perform supervised word clustering on labeled documents from both domains. Our model directly models the relationships between the observed document label variables and the latent word cluster variables. The induced cluster representation of each word thus will be informative for the classification labels, and hence discriminative for the target classification task. We train this directed graphical model using an expectationmaximization (EM) algorithm, which maximizes the log-likelihood of the observations of labeled documents. The induced cluster distribution of each word can then be used as its generalizable representation to construct new cluster-based representation of each document. For domain adaptation, we train a supervised learning system with labeled data from both domains in the new representation space and apply it to categorize test documents in the target domain. In order to evaluate the proposed technique, we conduct extensive experiments on the Reuters-21578 dataset for cross-domain document categorization and on Amazon product review dataset for cross-domain sentiment classification. The experimental results show the proposed approach can produce more effective representations than the comparison domain adaptation methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Domain adaptation has recently been popularly studied in natural language processing and a variety of domain adaptation approaches have been developed, including instance weighting adaptation methods and feature representation learning methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Instance weighting adaptation methods improve the transferability of a prediction model by training an instance weighted learning system. Much work in this category has been developed to address different weighting schemas (Sugiyama et al., 2007; Wan et al., 2011) . Jiang and Zhai (2007) applied instance weighting algorithms to tackle cross-domain NLP tasks and proposed to remove misleading source training data and assign less weights to labeled data from the source domain than labeled data from the target domain. Dai et al. (2007) proposed to increase the weights of mistakenly predicted instances from the target domain and decrease the weights of incor-rectly predicted instances from the source domain during an iterative training process.", |
|
"cite_spans": [ |
|
{ |
|
"start": 223, |
|
"end": 246, |
|
"text": "(Sugiyama et al., 2007;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 264, |
|
"text": "Wan et al., 2011)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 267, |
|
"end": 288, |
|
"text": "Jiang and Zhai (2007)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 520, |
|
"end": 537, |
|
"text": "Dai et al. (2007)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Representation learning methods bridge domain divergence either by differentiating domaininvariant features from domain-specific features (Daum\u00e9 III, 2007; Daum\u00e9 III et al., 2010; Finkel and Manning, 2009) or seeking generalizable latent features across domains (Blitzer et al., , 2007 Prettenhofer and Stein, 2010) . Daum\u00e9 III (2007) ; Daum\u00e9 III et al. (2010) proposed a simple heuristic feature replication method to represent common, source specific and target specific features. Finkel and Manning (2009) proposed a former version of it based on the use of a hierarchical Bayesian prior. proposed a coupled subspace learning method, which learns two projectors, one for each domain, to project the original features into domain-sharing and domainspecific features. proposed a structural correspondence learning (SCL) method to model the correlation between pivot features and non-pivot features. It uses the correlation to induce latent domain-invariant features as augmenting features for supervised learning. Extensions of this work include improving pivot feature selection (Blitzer et al., 2007; Prettenhofer and Stein, 2010) , and improving the correlation modeling between pivot and non-pivot features (Tan, 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 138, |
|
"end": 155, |
|
"text": "(Daum\u00e9 III, 2007;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 179, |
|
"text": "Daum\u00e9 III et al., 2010;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 205, |
|
"text": "Finkel and Manning, 2009)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 262, |
|
"end": 285, |
|
"text": "(Blitzer et al., , 2007", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 286, |
|
"end": 315, |
|
"text": "Prettenhofer and Stein, 2010)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 318, |
|
"end": 334, |
|
"text": "Daum\u00e9 III (2007)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 337, |
|
"end": 360, |
|
"text": "Daum\u00e9 III et al. (2010)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 483, |
|
"end": 508, |
|
"text": "Finkel and Manning (2009)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1081, |
|
"end": 1103, |
|
"text": "(Blitzer et al., 2007;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 1104, |
|
"end": 1133, |
|
"text": "Prettenhofer and Stein, 2010)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1212, |
|
"end": 1223, |
|
"text": "(Tan, 2009)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The proposed approach in this paper belongs to representation learning methods. However, unlike the unsupervised representation learning methods reviewed above, our proposed approach learns generalizable feature representations of words by exploiting data labels from the two domains.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this paper, we address domain adaptation for text classification. Given a source domain D S with plenty of labeled documents, and a target domain D T with a very few labeled documents, the task is to learn a classifier from the labeled documents in both domains, and use it to classify the unlabeled documents in the target domain. The documents in the two domains share the same universal vocabu-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Latent Word Representations using Supervised Word Clustering", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "lary V = {w 1 , w 2 , \u2022 \u2022 \u2022 , w n }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Latent Word Representations using Supervised Word Clustering", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ", but the word distributions in the two domains are typically different.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Latent Word Representations using Supervised Word Clustering", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Therefore, training the classification model directly from the original word feature space V may not generalize well in the target domain. We propose to address this problem by first learning a supervised mapping function \u03c6 : V \u2212\u2192 Z from the labeled documents in both domains, which maps the input word features in the large vocabulary set V into a low dimensional latent feature space Z. By filtering out unimportant details and noises, we expect the low dimensional mapping can capture the intrinsic structure of the input data that is discriminative for the classification task and generalizable across domains. In particular, we learn such a mapping function by conducting supervised word clustering on the labeled documents using a hierarchical multinomial Naive Bayes model. Below, we will first introduce this supervised word clustering model and then use the mapping function produced to transform documents in different domains into the same low-dimensional space for training cross domain text classification systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Latent Word Representations using Supervised Word Clustering", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Given all labeled documents from the source and target domains,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Word Clustering", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "D = {(w t , y t )} T t=1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Word Clustering", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": ", where the t-th labeled document is expressed as a bag of words, w t = {w t1 , w t2 , \u2022 \u2022 \u2022 , w tNt }, and its label value is y t \u2208 Y for Y = {1, \u2022 \u2022 \u2022 , K}, we propose to perform supervised word clustering by modeling the document-label pair distribution using a hierarchical multinomial Naive Bayes model given in Figure 1 , which has a middle layer of latent cluster variables.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 317, |
|
"end": 325, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Supervised Word Clustering", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In this plate model, the variable Y t denotes the observed class label for the t-th document, and all the label variables, {Y t } T t=1 , share the same multinomial distribution \u03b8 Y across documents. The latent variable C t,i denotes the cluster membership of the word W t,i , and all the cluster variables, {C t,i } T,Nt t=1,i=1 , share the same set of conditional distributions {\u03b8 C|y } K y=1 across documents and words. The variable W t,i denotes the i-th observed word in the t-th document, and all the word variables, {W t,i } T,Nt t=1,i=1 , share the same set of conditional distributions {\u03b8 W |c } m c=1 . Here we assume the number of word clusters is m. For simplicity, we do not show the distribution parameter variables in the Figure. Following the Markov property of directed graph- ical models, we can see that given the cluster variable values, the document label variables will be completely independent of the word variables. By learning this latent directed graphical model, we thus expect the important classification information expressed in the input observation words can be effectively summarized into the latent cluster variables. This latent model is much simpler than the supervised topic models (Blei and McAuliffe, 2007) , but we will show later that it can suitably produce a generalizable feature mapping function for domain adaptation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1220, |
|
"end": 1246, |
|
"text": "(Blei and McAuliffe, 2007)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 737, |
|
"end": 744, |
|
"text": "Figure.", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Supervised Word Clustering", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To train the latent graphical model in Figure 1 on labeled documents D, we use a standard expectation-maximization (EM) algorithm (Dempster et al., 1977) to maximize the marginal loglikelihood of the observations:", |
|
"cite_spans": [ |
|
{ |
|
"start": 130, |
|
"end": 153, |
|
"text": "(Dempster et al., 1977)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Word Clustering", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "LL(D; \u03b8) = t log P (y t , w t |\u03b8)", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Supervised Word Clustering", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The EM algorithm is an iterative procedure. In each iteration, it takes an alternative E-step and M-step to maximize the lower bound of the marginal loglikelihood function. In our experiments, we start from a random initialization of the model parameters and the latent variable values, and then perform iterative EM updates until converge to a local optimal solution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Supervised Word Clustering", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "After training the supervised clustering model using EM algorithm, a set of local optimal model parameters \u03b8 * will be returned, which define a joint distribution over the three groups of variables in the directed graphical model. Next we define a supervised latent feature mapping function \u03c6 from this trained model to map each word w in the vocabulary V into a conditional distribution vector over the word cluster variable, such as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Induced Word Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u03c6(w) = [P (c = 1|w, \u03b8 * ), \u2022 \u2022 \u2022 , P (c = m|w, \u03b8 * )]. (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Induced Word Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The conditional distributions involved in this mapping function can be computed as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Induced Word Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (c|w, \u03b8 * ) = y\u2208Y P (w|c, \u03b8 * )P (c|y, \u03b8 * )P (y|\u03b8 * ) P (w)", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Induced Word Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where P (w|c, \u03b8 * ) = \u03b8 * w|c P (c|y, \u03b8 * ) = \u03b8 * c|y and P (y|\u03b8 * ) = \u03b8 * y can be determined from the model parameters directly, and p(w) can be computed as the empirical frequency of word w among all the other words in all the training documents.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Induced Word Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We then define a transformation matrix \u03a0 \u2208 R n\u00d7m based on the mapping function \u03c6 defined in Eq. 2, such that \u03a0 i: = \u03c6(w i ) where w i is the i-th word in the vocabulary V. That is, each row of \u03a0 is the induced representation vector for one word. \u03a0 can be viewed as a soft word clustering matrix, and \u03a0 i,j denotes the probability of word w i belongs to the j-th cluster. Given the original document-word frequency matrix X tr \u2208 R T \u00d7n for the labeled training documents from the two domains, we can construct its representations Z tr \u2208 R T \u00d7m in the predictive latent clustering space by performing the following transformation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Induced Word Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Z tr = X tr \u03a0.", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Induced Word Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Similarly, we can construct the new representation matrix Z ts for the test data X ts in the target domain.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Induced Word Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We then train a classification model on the labeled data Z tr and apply it to classify the test data Z ts .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Induced Word Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We evaluate the proposed approach with experiments on cross domain document categorization of Reuters data and cross domain sentiment classification of Amazon product reviews, comparing to a number of baseline and existing domain adaptation methods. In this section, we report the experimental setting and results on these two data sets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We compared our proposed supervised word clustering approach (SWC) with the following five comparison methods for domain adaptation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approaches", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(1) BOW: This is a bag-of-word baseline method, which trains a SVM classifier with labeled data from both domains using the original bag-ofword features.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approaches", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(2) PLSA: This is an unsupervised word clustering method, which first applies the probabilistic latent semantic analysis (PLSA) (Hofmann, 1999) to obtain word clusterings with both labeled and unlabeled data from the two domains and then uses the soft word clusterings as augmenting features to train SVM classifiers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 143, |
|
"text": "(Hofmann, 1999)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approaches", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(3) FDLDA: This is an alternative supervised word clustering method we built by training the Fast-Discriminative Latent Dirichlet Allocation model (Shan et al., 2009) with all labeled data from the two domains. After training the model, we used the learned topic distribution p(z) and the conditional word distributions p(w|z) to compute the conditional distribution over topics p(z|w) for each word as the soft clustering of the word. We then used the soft word clusterings as augmenting features to train SVM classifiers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 166, |
|
"text": "(Shan et al., 2009)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approaches", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(4) SCL: This is the structural correspondence learning based domain adaptation method . It first induces generalizable features with all data from both domains by modeling the correlations between pivot features and non-pivot features, and then uses the produced generalizable features as augmenting features to train SVM classifiers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approaches", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(5) CPSP: This is coupled subspace learning based domain adaptation method . It first learns two domain projectors using all data from the two domains by approximating multi-view dimensionality reduction, and then projects the labeled data to low dimensional latent feature space to train SVM Classifiers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approaches", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We used the LIBSVM package (Chang and Lin, 2011) with its default parameter setting to train linear SVM classifiers as the base classification model for all comparison methods. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 48, |
|
"text": "(Chang and Lin, 2011)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Approaches", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We used the popularly studied Reuters-21578 dataset (Dai et al., 2007) , which contains three crossdomain document categorization tasks, Orgs vs People, Orgs vs Places, People vs Places. The source and target domains of each task contain documents sampled from different non-overlapping subcategories. From example, the task of Orgs vs People assigns a document into one of the two top categories (Orgs, People), and the source domain documents and the target domain documents are sampled from different subcategories of Orgs and People. There are 1237 source documents and 1208 target documents for the task of Orgs vs People, 1016 source documents and 1043 target documents for the task of Orgs vs Places, and 1077 source documents and 1077 target documents for the task ofPeople vs Places. For each task, we built a unigram vocabulary based on all the documents from the two domains and represented each document as a feature vector containing term frequency values.", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 70, |
|
"text": "(Dai et al., 2007)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments on Reuters Data Set", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Document Categorization For each of the three cross-domain document categorization tasks on Reuters-21578 dataset, we used all the source documents as labeled training data while randomly selecting 100 target documents as labeled training data and setting the rest as unlabeled test data. For the BOW baseline method, we used the term-frequency features. The other five approaches are based on representation learning, and we selected the dimension size of the representation learning, i.e., the cluster number in our proposed approach, from {5, 10, 20, 50, 100} according to the average classification results over 3 runs on the task of Orgs vs People. The dimension sizes of the induced representations for the five approaches, PLSA, FDLDA, SCL, CPSP and SWC are 20, 20, 100, 100 and 20 respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Results for Cross-Domain", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "We then repeated each experiment 10 times on each task with different random selections of the 100 labeled target documents to compare the six comparison approaches. The average classification results in terms of accuracy and standard deviations are reported in Table 1 . We can see that by simply combining labeled documents from the two domains without adaptation, the BOW method performs poorly across the three tasks. The PLSA method outperforms the BOW method over all the three tasks with small improvements. The supervised word clustering method FDLDA, though performing slightly better than the unsupervised clustering method PLSA, produces poor performance comparing to the proposed SWC method. One possible reason is that the FDLDA model is not specialized for supervised word clustering, and it uses a logistic regression model to predict the labels from the word topics, while the final soft word clustering is computed from the learned distribution p(z) and p(w|z). That is, in the FDLDA model the labels only influence the word clusterings indirectly and hence its influence can be much smaller than the influence of labels as direct parent variables of the word cluster variables in the SWC model. The two domain adaptation approaches, SCL and CPSP, both produce significant improvements over BOW, PLSA and FDLDA on the two tasks of Orgs vs People and Orgs vs Places, while the CPSP method produces slightly inferior performance than PLSA and FDLDA on the task of People vs Places. The proposed method SWC on the other hand consistently and significantly outperforms all the other comparison methods across all the three tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 262, |
|
"end": 269, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results for Cross-Domain", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "We also studied the sensitivity of the proposed approach with respect to the number of clusters, i.e., the dimension size of the learned representation. We experimented with a set of different values m \u2208 {5, 10, 20, 50, 100} as the number of clusters. For each m value, we used the same experimental setting as above and repeated the experiments 10 times to obtain the average comparison results. The classification accuracy results on the three tasks are reported in Figure 2 . We can see that the proposed method is not very sensitive to the number of clusters, across the set of increasing values we considered, and its performance becomes very stable after the cluster number reaches 20.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 468, |
|
"end": 476, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results for Cross-Domain", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "We next conducted experiments to compare the six approaches by varying the amount of the labeled data from the target domain. We tested a set of different values, s \u2208 {100, 200, 300, 400, 500}, as the number of labeled documents from the target domain. For each different s value, we repeated the experiments 10 times by randomly selecting s labeled documents from the target domain using the same experimental setting as before. The comparison results across the set of s values are plotted in Figure 3 . We can see that in general the performance of each method improves with the increase of the number of labeled documents from the target domain. The proposed method SWC and the domain adaptation method SCL clearly outperform the other four methods. Moreover, the proposed method SWC not only maintains consistent and significant advantages over all other methods across the range of different s values, its performance with 300 labeled target instances is even superior to the other methods with 500 labeled target instances. All these results suggest the proposed approach is very effective for adapting data across domains.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 495, |
|
"end": 503, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Document Categorization Accuracy vs Label Complexity in Target Domain", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "We conducted cross-domain sentiment classification on the widely used Amazon product reviews (Blitzer et al., 2007) , which contains review documents distributed in four categories: Books(B), DVD(D), Electronics(E) and Kitchen(K). Each category contains 1000 positive and 1000 negative reviews. We constructed 12 cross-domain sentiment classification tasks, one for each source-target domain pair, B2D, B2E, B2K, D2B, D2E, D2K, E2B, E2D, E2K, K2B, K2D, K2E . For example, the task B2D means that we use the Books reviews as the source domain and the DVD reviews as the target domain. For each pair of domains, we built a vocabulary with both unigram and bigram features extracted from all the documents of the two domains, and then represented each review document as a feature vector with term frequency values.", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 115, |
|
"text": "(Blitzer et al., 2007)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 214, |
|
"text": "Electronics(E)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 403, |
|
"end": 456, |
|
"text": "B2E, B2K, D2B, D2E, D2K, E2B, E2D, E2K, K2B, K2D, K2E", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments on Amazon Product Reviews", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "For each of the twelve cross-domain sentiment classification tasks on Amazon product reviews, we used all the source reviews as labeled data and randomly selected 100 target reviews as labeled data while treating the rest as unlabeled test data. For the baseline method BOW, we used binary indicator values as features, which has been shown to work better than the term-frequency features for sentiment classification tasks (Pang et al., 2002; Na et al., 2004) . For all the other representation learning based methods, we selected the dimension size of learned representation according to the average results over 3 runs on the B2D task. The dimension sizes selected for the methods PLSA, FDLDA, SCL, CPSP, and SWC are 10, 50, 50, 100 and 10, respectively. 1 We then repeated each experiment 10 times based on different random selections of 100 labeled reviews from the target domain to compare the six methods on the twelve tasks. The average classification results are reported in Table 2 . We can see that the PLSA and FDLDA methods do not show much advantage over the baseline method BOW. CPSP performs better than PLSA and BOW on many of the twelve tasks, but with small advantages, while SCL outperforms CPSP on most tasks. The proposed method SWC however demonstrates a clear advantage over all the other methods and produces the best results on all the twelve tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 424, |
|
"end": 443, |
|
"text": "(Pang et al., 2002;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 460, |
|
"text": "Na et al., 2004)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 984, |
|
"end": 991, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Results for Cross-Domain Sentiment Classification", |
|
"sec_num": "4.3.1" |
|
}, |
|
{ |
|
"text": "We also conducted sensitivity analysis over the proposed approach regarding the number of clusters on the twelve cross-domain sentiment classification tasks, by testing a set of cluster number values m = {5, 10, 20, 50, 100}. The average results are plotted in Figure 5 . Similar as before, we can see the proposed approach has stable performance across the set of different cluster numbers. Moreover, these results also clearly show that domain adaptation is not a symmetric process, as we can see it is easier to conduct domain adaptation from the source domain Books to the target domain Kitchen (with an accuracy around 82%), but it is more difficult to make domain adaptation from the source domain Kitchen to the target domain Books (with an ac- curacy around 75%). It also shows that the degree of relatedness of the two domains is an important factor for the effectiveness of knowledge adaptation. For example, one can see that it is much easier to conduct domain adaptation from Kitchen to Electronics (with an accuracy around 84%) than from Kitchen to Books (with an accuracy around 75%), as Kitchen is more closely related to Electronics than Books.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 269, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "100", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Similar as before, we tested the proposed approach using a set of different values s \u2208 {100, 200, 300, 400, 500} as the number of labeled reviews from the target domain. For each given s value, we conducted the comparison experiments using the same setting above. The average results are reported in Figure 4 . We can see that the performance of each approach in general improves with the increase of the number of labeled reviews from the target domain. The proposed approach maintains a clear advantage over all the other methods on all the twelve tasks across different label complexities. All those empirical results demonstrate the effectiveness of the proposed approach for cross-domain sentiment classification.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 300, |
|
"end": 308, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentiment Classification Accuracy vs Label Complexity in Target Domain", |
|
"sec_num": "4.3.2" |
|
}, |
|
{ |
|
"text": "Finally, we would also like to demonstrate the hard word clusters produced by the proposed supervised word clustering method. We assign a word into the cluster it most likely belongs to according to its soft clustering representation, such as c * = arg max c P (c|w, \u03b8 * ). Table 3 presents the top repre-sentative words (i.e., the most frequent words) of the 10 word clusters produced on the task of B2K. We can see that the first three clusters (C1, C2, and C3) contain words with positive sentiment polarity in different degrees. The two clusters (C4 and C5) contain words used to express the degree of opinions. The next four clusters (C6, C7, C8, and C9) contain content words related to Books or Kitchen. The last cluster (C10) contains words of negative sentiment polarity. These results demonstrate that the proposed supervised word clustering can produce task meaningful word clusters and hence label-informative latent features, which justifies its effectiveness.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 274, |
|
"end": 281, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Illustration of the Word Clusters", |
|
"sec_num": "4.3.3" |
|
}, |
|
{ |
|
"text": "In this paper, we proposed a novel supervised representation learning method to tackle domain adaptation by inducing predictive latent features based on supervised word clustering. With the soft word clustering produced, we can transform all documents from the two domains into a unified lowdimensional feature space for effective training of cross-domain NLP prediction system. We conducted extensive experiments on cross-domain document categorization tasks on Reuters-21578 dataset and cross-domain sentiment classification tasks on Amazon product reviews. Our empirical results demonstrated the efficacy of the proposed approach. Table 3 : Clustering illustration for the task of B2K on Amazon product reviews.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 634, |
|
"end": 641, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "50 and 100 are also the suggested values for SCL(Blitzer et al., 2007) and CPSP respectively on this cross-domain sentiment classification dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Analysis of representations for domain adaptakind of basically is only half of first of as if and still anything about have some C5 ever may still going maybe either at least of all totally sort of are very C6 life work machine size design bottom business picture hand hook gas sink turner shelves C7 way coffee pan keep cooking maker heat job working children handle meet core wine C8 people us world come fact man place stars during example went short bathroom apple price C9 pot friends daily light fire tells knew holds keep the continued meal hooked silver wind C10 disappointed waste unfortunately worse poorly sorry weak not worth stupid fails awful useless tion", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Ben-David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Blitzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Crammer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Ben-David, J. Blitzer, K. Crammer, and F. Pereira. Analysis of representations for domain adapta- kind of basically is only half of first of as if and still anything about have some C5 ever may still going maybe either at least of all totally sort of are very C6 life work machine size design bottom business picture hand hook gas sink turner shelves C7 way coffee pan keep cooking maker heat job working children handle meet core wine C8 people us world come fact man place stars during example went short bathroom apple price C9 pot friends daily light fire tells knew holds keep the continued meal hooked silver wind C10 disappointed waste unfortunately worse poorly sorry weak not worth stupid fails awful useless tion. In Advances in Neural Information Process- ing Systems (NIPS), 2006.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "A theory of learning from different domains", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Ben-David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Blitzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Crammer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Kulesza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Vaughan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Machine Learng", |
|
"volume": "79", |
|
"issue": "1-2", |
|
"pages": "151--175", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Ben-David, J. Blitzer, K. Crammer, A. Kulesza, F. Pereira, and J. Vaughan. A theory of learning from different domains. Machine Learng, 79(1- 2):151-175, 2010.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Supervised topic models", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Blei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Mcauliffe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Blei and J. McAuliffe. Supervised topic mod- els. In Advances in Neural Information Process- ing Systems (NIPS), 2007.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Domain adaptation with structural correspondence learning", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Blitzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proc. of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Blitzer, R. McDonald, and F. Pereira. Domain adaptation with structural correspondence learn- ing. In Proc. of the Conference on Empir- ical Methods in Natural Language Processing (EMNLP), 2006.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Biographies, bollywood, boom-boxes and blenders: Domain adaptation for sentiment classification", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Blitzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Blitzer, M. Dredze, and F. Pereira. Biographies, bollywood, boom-boxes and blenders: Domain adaptation for sentiment classification. In Proc. of the Annual Meeting of the Association for Com- putational Linguistics (ACL), 2007.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Domain adaptation with coupled subspaces", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Blitzer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Kakade", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of the International Conference on Artificial Intelligence and Statistics (AISTATS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Blitzer, D. Foster, and S. Kakade. Domain adapta- tion with coupled subspaces. In Proc. of the Inter- national Conference on Artificial Intelligence and Statistics (AISTATS), 2011.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "LIBSVM: A library for support vector machines", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "ACM Transactions on Intelligent Systems and Technology", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Chang and C. Lin. LIBSVM: A library for sup- port vector machines. ACM Transactions on In- telligent Systems and Technology, 2:27:1-27:27, 2011.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Co-training for domain adaptation", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Blitzer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Advances in Neural Inform. Process. Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Chen, K. Weinberger, and J. Blitzer. Co-training for domain adaptation. In Advances in Neural In- form. Process. Systems (NIPS), 2011.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Marginalized denoising autoencoders for domain adaptation", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Sha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of the International Conf. on Machine Learning (ICML)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Chen, Z. Xu, K. Weinberger, and F. Sha. Marginalized denoising autoencoders for domain adaptation. In Proc. of the International Conf. on Machine Learning (ICML), 2012.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Boosting for transfer learning", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the International Conf. on Machine Learning (ICML)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "W. Dai, Q. Yang, G. Xue, and Y. Yu. Boosting for transfer learning. In Proc. of the International Conf. on Machine Learning (ICML), 2007.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Frustratingly easy domain adaptation", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the Annual Meeting of the Association for Comput. Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Daum\u00e9 III. Frustratingly easy domain adaptation. In Proc. of the Annual Meeting of the Association for Comput. Linguistics (ACL), 2007.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Coregularization based semi-supervised domain adaptation", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Daum\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Saha", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Daum\u00e9 III, A. Kumar, and A. Saha. Co- regularization based semi-supervised domain adaptation. In Advances in Neural Information Processing Systems (NIPS), 2010.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Maximum likelihood from incomplete data via the em algorithm", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Dempster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Laird", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Rubin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "Journal of the royal statistical society", |
|
"volume": "39", |
|
"issue": "1", |
|
"pages": "1--38", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Dempster, N. Laird, and D. Rubin. Maximum likelihood from incomplete data via the em algo- rithm. Journal of the royal statistical society, 39 (1):1-38, 1977.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Hierarchical bayesian domain adaptation", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of the Conference of the North American Chapter", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Finkel and C. Manning. Hierarchical bayesian domain adaptation. In Proc. of the Conference of the North American Chapter of the Association for Computational Linguistics (NAACL), 2009.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Probabilistic latent semantic analysis", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Hofmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proc. of the Conference on Uncertainty in Artificial Intelligence (UAI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "T. Hofmann. Probabilistic latent semantic analysis. In Proc. of the Conference on Uncertainty in Ar- tificial Intelligence (UAI), 1999.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Instance weighting for domain adaptation in nlp", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proc. of the Annual Meeting of the Association for Computational Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Jiang and C. Zhai. Instance weighting for domain adaptation in nlp. In Proc. of the Annual Meeting of the Association for Computational Linguistics (ACL), 2007.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Crossing media streams with sentiment: Domain adaptation in blogs, reviews and twitter", |
|
"authors": [ |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Mejova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Srinivasan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. of the International AAAI Conference on Weblogs and Social Media (ICWSM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Y. Mejova and P. Srinivasan. Crossing media streams with sentiment: Domain adaptation in blogs, reviews and twitter. In Proc. of the Inter- national AAAI Conference on Weblogs and Social Media (ICWSM), 2012.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Effectiveness of simple linguistic processing in automatic sentiment classification of product reviews", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Na", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Sui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Khoo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Chan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proc. of the Conf. of the Inter. Society for Knowledge Organization", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Na, H. Sui, C. Khoo, S. Chan, and Y. Zhou. Effec- tiveness of simple linguistic processing in auto- matic sentiment classification of product reviews. In Proc. of the Conf. of the Inter. Society for Knowledge Organization, 2004.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Thumbs up?: sentiment classification using machine learning techniques", |
|
"authors": [ |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Vaithyanathan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proc. of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "B. Pang, L. Lee, and S. Vaithyanathan. Thumbs up?: sentiment classification using machine learn- ing techniques. In Proc. of the Conference on Em- pirical Methods in Natural Language Processing (EMNLP), 2002.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Cross-language text classification using structural correspondence learning", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Prettenhofer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Stein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Proc. of the Annual Meeting of the Association for Comput. Linguistics (ACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Prettenhofer and B. Stein. Cross-language text classification using structural correspondence learning. In Proc. of the Annual Meeting of the Association for Comput. Linguistics (ACL), 2010.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Discriminative mixed-membership models", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Shan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Oza", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of the IEEE Inter. Conference on Data Mining (ICDM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Shan, A. Banerjee, and N. Oza. Discriminative mixed-membership models. In Proc. of the IEEE Inter. Conference on Data Mining (ICDM), 2009.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Direct importance estimation with model selection and its application to covariate shift adaptation", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Sugiyama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Nakajima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Kashima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "B\u00fcnau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kawanabe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Advances in Neural Information Processing Systems (NIPS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Sugiyama, S. Nakajima, H. Kashima, P. von B\u00fcnau, and M. Kawanabe. Direct importance es- timation with model selection and its application to covariate shift adaptation. In Advances in Neu- ral Information Processing Systems (NIPS), 2007.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Improving scl model for sentiment-transfer learning", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proc. of the Conference of the North American Chapter of the Association for Computational Linguistics (NAACL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Tan. Improving scl model for sentiment-transfer learning. In Proc. of the Conference of the North American Chapter of the Association for Compu- tational Linguistics (NAACL), 2009.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Bi-weighting domain adaptation for cross-language text classification", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. of the International Joint Conference on Artificial Intelligence (IJCAI)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "C. Wan, R. Pan, and J. Li. Bi-weighting domain adaptation for cross-language text classification. In Proc. of the International Joint Conference on Artificial Intelligence (IJCAI), 2011.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Supervised word clustering model." |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Sensitivity analysis of the proposed approach w.r.t. the number of clusters for the three cross-domain document categorization tasks on Reuters-21578 dataset." |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Average classification results for three cross-domain document categorization tasks on Reuters-21578 dataset by varying the amount of labeled training data from the target domain." |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Average results (accuracy\u00b1standard deviation) for the 12 cross-domain sentiment classification tasks on Amazon product reviews with different numbers of labeled training data from the target domain." |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Sensitivity analysis of the proposed approach wrt the number of clusters for the twelve cross-domain sentiment classification tasks. Each figure shows experimental results for three tasks with the same source domain." |
|
}, |
|
"TABREF0": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Task</td><td>BOW</td><td>PLSA</td><td>FDLDA</td><td>SCL</td><td>CPSP</td><td>SWC</td></tr><tr><td>Orgs vs</td><td/><td/><td/><td/><td/><td/></tr></table>", |
|
"text": "Average results (accuracy\u00b1standard deviation) for three cross-domain document categorization tasks on Reuters-21578 dataset. People 76.07\u00b10.39 76.50\u00b10.10 76.95\u00b10.23 78.71\u00b10.20 77.58\u00b10.21 81.27\u00b10.23 Orgs vs Places 73.88\u00b10.58 74.68\u00b10.20 74.87\u00b10.29 76.71\u00b10.23 75.76\u00b10.28 78.33\u00b10.64 People vs Places 61.80\u00b10.44 63.36\u00b10.40 63.46\u00b10.40 64.65\u00b10.40 62.73\u00b10.53" |
|
}, |
|
"TABREF2": { |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Task</td><td>BOW</td><td>PLSA</td><td>FDLDA</td><td>SCL</td><td>CPSP</td><td>SWC</td></tr><tr><td colspan=\"2\">B2D 76.</td><td/><td/><td/><td/><td/></tr></table>", |
|
"text": "Average results (accuracy\u00b1standard deviation) for twelve cross-domain sentiment classification tasks on Amazon product reviews. 58\u00b10.14 76.01\u00b10.10 75.95\u00b10.16 80.17\u00b10.16 77.53\u00b10.14 81.66\u00b10.23 B2K 75.48\u00b10.34 74.68\u00b10.20 74.87\u00b10.15 78.13\u00b10.21 76.38\u00b10.15 82.26\u00b10.20 B2E 72.92\u00b10.37 73.36\u00b10.19 73.46\u00b10.21 74.79\u00b10.19 73.31\u00b10.17 77.04\u00b10.64 D2B 74.10\u00b10.29 74.04\u00b10.20 74.08\u00b10.18 78.73\u00b10.23 77.07\u00b10.15 79.95\u00b10.25 D2K 75.19\u00b10.33 75.37\u00b10.31 75.44\u00b10.31 76.98\u00b10.19 76.77\u00b10.10 82.13\u00b10.20 D2E 73.01\u00b10.34 74.21\u00b10.30 74.09\u00b10.31 75.69\u00b10.25 73.83\u00b10.21 76.98\u00b10.54 E2B 67.58\u00b10.24 68.48\u00b10.15 68.44\u00b10.17 70.21\u00b10.16 70.47\u00b10.16 72.11\u00b10.46 E2D 70.15\u00b10.27 70.16\u00b10.23 70.06\u00b10.22 72.83\u00b10.25 71.76\u00b10.20 73.81\u00b10.59 E2K 82.23\u00b10.12 82.24\u00b10.18 82.26\u00b10.19 84.69\u00b10.11 81.31\u00b10.14 85.33\u00b10.16 K2B 70.67\u00b10.18 72.18\u00b10.21 72.18\u00b10.16 73.91\u00b10.21 72.18\u00b10.19 75.78\u00b10.55 K2D 71.51\u00b10.26 72.00\u00b10.18 72.05\u00b10.19 74.82\u00b10.26 72.59\u00b10.18 76.88\u00b10.49 K2E 80.81\u00b10.12 80.39\u00b10.18 80.46\u00b10.18 82.96\u00b10.11 80.81\u00b10.14" |
|
} |
|
} |
|
} |
|
} |