|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T02:13:10.643251Z" |
|
}, |
|
"title": "Unsupervised Aspect-Level Sentiment Controllable Style Transfer", |
|
"authors": [ |
|
{ |
|
"first": "Zishan", |
|
"middle": [], |
|
"last": "Ahmad", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology Patna Bihar", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Asif", |
|
"middle": [], |
|
"last": "Ekbal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology Patna Bihar", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharyya", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology Patna Bihar", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Unsupervised style transfer in text has previously been explored through the sentiment transfer task. The task entails inverting the overall sentiment polarity in a given input sentence, while preserving its content. From the Aspect-Based Sentiment Analysis (ABSA) task, we know that multiple sentiment polarities can often be present together in a sentence with multiple aspects. In this paper, the task of aspect-level sentiment controllable style transfer is introduced, where each of the aspect-level sentiments can individually be controlled at the output. To achieve this goal, a BERT-based encoder-decoder architecture with saliency weighted polarity injection is proposed, with unsupervised training strategies, such as ABSA masked-languagemodelling. Through both automatic and manual evaluation, we show that the system is successful in controlling aspect-level sentiments.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Unsupervised style transfer in text has previously been explored through the sentiment transfer task. The task entails inverting the overall sentiment polarity in a given input sentence, while preserving its content. From the Aspect-Based Sentiment Analysis (ABSA) task, we know that multiple sentiment polarities can often be present together in a sentence with multiple aspects. In this paper, the task of aspect-level sentiment controllable style transfer is introduced, where each of the aspect-level sentiments can individually be controlled at the output. To achieve this goal, a BERT-based encoder-decoder architecture with saliency weighted polarity injection is proposed, with unsupervised training strategies, such as ABSA masked-languagemodelling. Through both automatic and manual evaluation, we show that the system is successful in controlling aspect-level sentiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "With a rapid increase in the quality of generated text, due to the rise of neural text generation models (Kalchbrenner and Blunsom, 2013; Cho et al., 2014; Sutskever et al., 2014; Vaswani et al., 2017) , controllable text generation is quickly becoming the next frontier in the field of text generation. Controllable text generation is the task of generating realistic sentences whose attributes can be controlled. The attributes to control can be: (i). Stylistic: Like politeness, sentiment, formality etc, (ii). Content: Like information, entities, keywords etc. or (iii).", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 137, |
|
"text": "(Kalchbrenner and Blunsom, 2013;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 155, |
|
"text": "Cho et al., 2014;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 179, |
|
"text": "Sutskever et al., 2014;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 201, |
|
"text": "Vaswani et al., 2017)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Ordering: Like ordering of information, events, plots etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Controlling sentence level polarity has been well explored as a style transfer task. used unsupervised machine translation techniques for polarity transfer in sentences. Yang et al. ( ", |
|
"cite_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 183, |
|
"text": "Yang et al. (", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The service was speedy and the salads were great, but the chicken was bland and stale.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2018) \u00a7 equal contribution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The service was slow, but the salads were great and the chicken was tasty and fresh. Figure 1 : An example of the proposed aspect-level sentiment style transfer task used language models as discriminators to achieve style (polarity) transfer in sentences. Li et al. (2018a) proposed a simpler method where they deleted the attribute markers and devise a method to replace or generate the target attribute-key phrases in the sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 256, |
|
"end": 273, |
|
"text": "Li et al. (2018a)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 93, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "2018) \u00a7 equal contribution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper we explore a more fine-grained style transfer task, where each aspect's polarities can be changed individually. Recent interest in Aspect-Based Sentiment Analysis (ABSA) (Pontiki et al., 2014) has shown that sentiment information can vary within a sentence, with differing sentiments expressed towards different aspect terms of target entities (e.g. 'food', 'service' in a restaurant domain). We introduce the task of aspect-level sentiment transfer -the task of rewriting sentences to transfer them from a given set of aspect-term polarities (such as 'positive sentiment' towards the service of a restaurant and a 'positive sentiment' towards the taste of the food) to a different set of aspect-term polarities (such as 'negative sentiment' towards the service of a restaurant and a 'positive' sentiment towards the taste of the food). This is a more challenging task than regular style transfer as the style attributes here are not the overall attributes for the whole sentence, but are localized to specific parts of the sentence, and multiple opposing at-tributes could be present within the same sentence. The target of the transformation made needs to be localized and the other content expressed in the rest of the sentence need to be preserved at the output. An example of the task is shown in Figure 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 206, |
|
"text": "(Pontiki et al., 2014)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1316, |
|
"end": 1324, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "2018) \u00a7 equal contribution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For successful manipulation of the generated sentences, a few challenges need to be addressed: (i). The model should learn to associate the right polarities with the right aspects. (ii). The model needs to be able to correctly process the aspectpolarity query and accordingly delete, replace and generate text sequence to satisfy the query. (iii). The polarities of the aspects not in the query should not be affected. (iv). The non-attribute content and fluency of the text should be preserved.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2018) \u00a7 equal contribution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We explore this task in an unsupervised setting (as is common with most style-transfer tasks due to the lack of an aligned parallel corpus) using only monolingual unaligned corpora. In this work, a novel encoder-decoder architecture is proposed to perform unsupervised aspect-level sentiment transfer. A BERT (Devlin et al., 2019) based encoder is used that is trained to understand aspect-specific polarity information. We also propose using a 'polarity injection' method, where saliency-weighted aspect-specific polarity information is added to the hidden representations from the encoder to complete the query for the decoder.", |
|
"cite_spans": [ |
|
{ |
|
"start": 309, |
|
"end": 330, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "2018) \u00a7 equal contribution", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Aspect-Based Sentiment Analysis (ABSA) task shows that differing sentiments can be present within the same sentence, localized to different entities or parts of the text. The notion of styles in natural language can be used to refer to the attributes, such as sentiment, formality in content, emotion, sarcasm, etc. Similar to the sentiment, these other attributes can also be present localized to different entities taking differing values at each location. If we consider the style 'emotion' with the example \"Although Alice infuriates me with her prattle and Bob scares me, I am quite happy about how things are turning out.\" -A single piece of text (such as a single sentence) can express an emotion, such as 'happiness' about an event while expressing 'fear' towards some entity and 'anger' towards a second entity. This shows that style transfer in language needs a more nuanced understanding. Especially when generating larger pieces of text, multiple such styles could intermingle, and differing styles can often be present together when discussing different topics and entities. Our work intends to take the first step towards a more controllable form of finegrained style transfer with the task of aspect-level sentiment style transfer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Motivation", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "In this section we present an overview of the related literature.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To the best of our knowledge, our current work is the first to tackle aspect-level sentiment transfer. Most of the previous works involving sentiment transfer (Li et al., 2018b; Yang et al., 2018; Shen et al., 2017; Xu et al., 2018; Prabhumoye et al., 2018; Wu et al., 2019) consider the style that is present throughout the sentence and seek to transfer only the overall sentiment polarities expressed. Tian et al. (2018) proposed a new training objective for content preservation during style transfer. They used Part-of-Speech (PoS) tagging to collect nouns at inputs, and expect them to be present at the output for content preservation. To achieve this, they proposed a PoS preservation constraint and 'Content Conditional Language Modelling'. They tested their system on sentiment style transfer task. Wang et al. (2019) proposed a method that can also control the degree of polarity transfer in a sentence with multiple aspect categories present in it. Unlike their task which deals with predefined aspect categories, our task deals with opinion target expressions. Aspect categories are coarse entities that are few in number and predefined for a certain domain, while aspect-terms or opinion target expressions are fine-grained entities that are present in the text. They also did not investigate selectively transferring the polarity over a subset of aspects with multiple differing polarities at the output and only invert the overall polarity expressed by the sentence. Our method works across thousands of unique opinion target expressions ( Table 1 shows the number unique target aspects present in each of our datasets).Our method also does not need these to be predefined, and so could be used to control the polarities of previously unseen target expressions as well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 177, |
|
"text": "(Li et al., 2018b;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 196, |
|
"text": "Yang et al., 2018;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 215, |
|
"text": "Shen et al., 2017;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 232, |
|
"text": "Xu et al., 2018;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 233, |
|
"end": 257, |
|
"text": "Prabhumoye et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 258, |
|
"end": 274, |
|
"text": "Wu et al., 2019)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 404, |
|
"end": 422, |
|
"text": "Tian et al. (2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 808, |
|
"end": 826, |
|
"text": "Wang et al. (2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1555, |
|
"end": 1562, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentiment Transfer", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Previous works in unsupervised neural machine translation (Artetxe et al., 2017) and unsupervised style transfer have shown that, with only monolingual data, using a denoising autoencoder loss and an on-the-fly back-translation loss can be very successful in achieving transfer. Both of these training steps are used as part of our method to train the network in an unsupervised fashion.", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 80, |
|
"text": "(Artetxe et al., 2017)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised Machine Translation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Lai et al. 2019proposed an adversarial training mechanism for Gated Recurrent Unit (GRU) based encoder-decoder model for sentiment polarity transfer and multiple-attribute transfer tasks. They split the training mechanism of their model into two phases, viz. (i). Style transfer phase and (ii). Reconstruction phase. Pryzant et al. (2020) proposed a method to remove subjective bias in the sentences. They proposed adding a 'join-embedding' weighted by a word subjective-bias probability to automatically edit the hidden states from the encoder. We adapt this 'join-embedding' method to inject weighted polarities into our encoder outputs as described in Section 3.5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 317, |
|
"end": 338, |
|
"text": "Pryzant et al. (2020)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Natural Language Generation Architecture", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Aspect based sentiment analysis (ABSA) has been explored in a series of SemEval shared tasks. The task consists of both aspect term extraction and aspect sentiment prediction. Tay et al. (2018) proposed 'Aspect Fusion LSTM' to attend on the associative relationships between sentence words and aspect words to classify aspect polarities. Xu et al. (2019) proposed BERT based models for aspect term extraction and aspect-polarity classification tasks. We build similar BERT based aspect termextraction and aspect-polarity classification models and use them to label Yelp reviews dataset. This dataset is then used for aspect-level sentiment controllable style transfer task in this paper.", |
|
"cite_spans": [ |
|
{ |
|
"start": 176, |
|
"end": 193, |
|
"text": "Tay et al. (2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 354, |
|
"text": "Xu et al. (2019)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Aspect Based Sentiment Analysis", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Let us assume we have access to a corpora of labelled sentences", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "D = (x 1 , l 1 ) . . . (x n , l n ),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where x i is a sentence, and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "l i = {(t i1 , p i1 ) . . . (t im , p im )}.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Here, t ij is an aspect-target or 'Opinion Target Expression' (Pontiki et al., 2014) , and p ij is the corresponding sentiment-polarity expressed towards t ij , where p ij \u2208 {\"positive\", \"negative\"}. A model is to be learned that takes as input (x, l tgt ) where x is the source sentence expressing some aspectpolarity set l src , and outputs y that retain all the non-polarity content in x while expressing the aspect-polarity set l tgt . This is to be performed in an unsupervised manner, where we do not assume access to an aligned set of parallel sentences with the same content but different aspect-polarities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 84, |
|
"text": "(Pontiki et al., 2014)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The overall architecture consists of a Transformer (Vaswani et al., 2017) encoder-decoder neural network, where the encoder is BERT (Devlin et al., 2019) . In this section, we describe the architecture and the training methodology used. The inputs provided to the model are the sentence, a list of aspects, their corresponding desired target polarities l tgt and their corresponding per-token weights (explained in Section 3.5).", |
|
"cite_spans": [ |
|
{ |
|
"start": 51, |
|
"end": 73, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 153, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The BERT model (Devlin et al., 2019) was originally trained with two objectives: (i). A cloze objective where the classifier predicts missing words in a sequence, and (ii). A sentence-pair classification objective. For the sentence-pair objective, BERT was trained to take inputs as segment-pairs, where each segment has a different embedding added to it and are separated by a [SEP ] token. For our input representation, we construct such segment-pairs. The first segment consists of an aspect-polarity sequence", |
|
"cite_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 36, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ABSA Input Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "SEG A = \"T 1 P 1 [SEP ASP ] . . . T k P k [SEP ASP ]\",", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ABSA Input Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where T i is the tokenized target aspect term and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ABSA Input Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "P i \u2208 {[P OS], [N EG]} is a polarity correspond- ing to it. [SEP ASP ]\" is a separator token. [P OS], [N EG]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ABSA Input Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "are the special tokens corresponding to the 'positive' and 'negative' sentiments, for which the unused tokens from the BERT vocabulary were used. The second segment SEG B consists of a sentence expressing some sentiment towards these targets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "ABSA Input Representation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We precondition the BERT encoder to better understand the ABSA task and to learn the tokenembeddings for [P OS] and [N EG] with MLM pre-training (a cloze objective) (c.f. Figure 2 ). For each data instance, with an equal probability, we randomly mask out either (i). all the polarity tokens from aspect-polarity sequence (SEG A ), or (ii). random tokens from the sentence (SEG B ) and train the encoder to correctly predict the masked- out tokens. When the polarities get masked, the encoder learns to correctly understand the aspectlevel sentiment polarities from a sentence. When the words from the sentence get masked, the encoder also learns to correctly predict attribute markers corresponding to a given aspect and a sentiment. For example, it would learn associations between the markers, such as 'personable' (or 'rude') when given an aspect-term, such as 'staff' with a polarity '[POS]' (or '[NEG]') as opposed to an aspectmarker, such as 'delicious', which cannot be used with 'staff'.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 171, |
|
"end": 179, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Preconditioning the BERT-encoder for ABSA Input Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "v i v i \u2208{v pos , v neg } \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 \u2022 p i H H' Transformer Decoder y } (For each aspect) \u2200 i From classifier saliency-maps (expressing Aspect-level sentiments l tgt ) v pos v neg", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preconditioning the BERT-encoder for ABSA Input Representations", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "To convert a sentence from one set of aspectlevel polarities to another, the input to the encoder consists of the target aspect-level polarities l tgt as SEG A with the source sentence x passed as SEG B . The full architecture is shown in Figure 3 . The source sentence x expresses some source aspect-level polarities l src which is not provided to the model. The polarity-injection (explained in Section 3.5) adds the weighted target polarities l tgt into the hidden-representation H from the BERTencoder to obtain H which is passed to the decoder. The decoder is trained to output the target sentence y which consists of the same content as present in x but expressing the target aspect-level polarities l tgt . This architecture is trained in an unsupervised fashion as explained in Section 3.6.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 239, |
|
"end": 248, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Encoder-Decoder Architecture", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "In Pryzant et al. (2020) , authors showed that the hidden states of the encoder can be edited by adding weighted vectors to indicate subjective-bias, before being input to the decoder. They proposed this as a method to join the results from two sub-modules in their system. Here, we extend this to cover multiple attributes -the 'positive' and 'negative' sentiments, and substitute the supervised model they train with saliency-based weights. We inject (add) weighted amounts of two vectors corresponding to these two attributes to edit the hidden states output by the encoder. For each aspect, the vector added corresponds to the desired target polarity of this aspect, and the amount added to a given token depends on the saliency-based weight for this token calculated from the gradient for this aspect's polarity from a classification model (described in Section 4.1.1). More formally, the polarity injection is calculated from equation 1. H = [h 1 , h 2 , . . . h k ] that denotes the hidden-state output from the encoder, and H = [h 1 , h 2 , . . . h k ] are the new hidden-states calculated after polarity-injection. The number p ij denotes the saliency-based weightage for token j with respect to aspect i. Figure 3 shows the polarityinjection architecture. v pos and v neg are the special vector-embeddings, which have the same size as the hidden dimension, and trained to denote the positive and negative sentiment, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 24, |
|
"text": "Pryzant et al. (2020)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1215, |
|
"end": 1223, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Polarity Injection", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "h j = h j + k i=1 p ij \u2022 v i (1) v i = v pos if pol i desired is positive v neg if pol i desired is negative (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Polarity Injection", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "where pol i is the target (desired) polarity from l tgt for the i th aspect-term. For calculating p ij , saliencymaps obtained for each aspect from the polarity classifier described in 4.1 are used. Saliency-maps (Simonyan et al., 2014) are calculated with the gradient of the loss at the input, as given in equation 3. The s tok values for all the tokens tok in the sentence x are normalized between 0 and 1 for each (target t, sentence x) pair to obtain the p ij values.", |
|
"cite_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 236, |
|
"text": "(Simonyan et al., 2014)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Polarity Injection", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "Since the saliency-maps produce high values for the tokens that are important in calculating the sentiment's polarity, adding the 'positive' or 'negative' embedding weighted by these probabilities would provide hints to the decoder about the important words to be rewritten with the required sentiment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Polarity Injection", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "The p ij values over Segment-A is set to 1 over all the tokens corresponding to the i th aspect term (T i P i ) (see Section 3.2) and 0 over the other tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Polarity Injection", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s tok = \u2202L(y t ; x, t, \u03b8) \u2202emb tok ; \u2200tok \u2208 x", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Polarity Injection", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "One-Zero Alternative to Saliency: To test for performance in the absence of any saliency information, we also propose using a one-zero setup. Here, p ij is set to 1 over the tokens corresponding to the i th aspect-term and 0 elsewhere. So in this setup, v pos gets added to the tokens corresponding to the positive aspects and v neg gets added to the tokens corresponding to the negative aspects. For example, in Figure 1 , v pos gets added to the subword tokens corresponding to the word 'salads' and 'chicken', while v neg gets added to the sub-word tokens corresponding to the word 'service'.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 413, |
|
"end": 421, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Polarity Injection", |
|
"sec_num": "3.5" |
|
}, |
|
{ |
|
"text": "For training the model in an unsupervised setting, we alternate training steps between a denoising auto-encoding objective and a back-translation objective. During the denoising step, we add random noise to the sentence part of the input SEG B . We also randomly mask the polarities in the aspectpolarity sequence in SEG A with a small probability to ensure the model learns to generate outputs using the polarity injection clues. During the backtranslation step, a random query l tgt aspect-polarity sequence is used to produce an intermediate translation (using the model), and the same model is trained to regenerate the original input when provided the aspect-polarity sequence from the original input sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised Training", |
|
"sec_num": "3.6" |
|
}, |
|
{ |
|
"text": "In this section we report the datasets used for the experiments and the implementation details.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Text generation tasks require huge amounts of data, however there are no aspect-sentiment annotated datasets that are large enough for our task. Fortunately, aspect extraction and aspect-sentiment classification tasks have been well explored and have several publicly available datasets. We used datasets (only restaurant domain) from SemEval 2014, 2015 and 2016 (ABSA task) to train BERT based aspect extraction and aspect-sentiment classification systems. We only consider positive and negative polarities for our experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For the task of aspect-level sentiment style transfer, we use Yelp dataset. Since this dataset does not contain aspect-level polarity information or the target-aspects extracted, we use our BERT-based target-extraction model and BERT-based polarity classification model which were trained on the Se-mEval ABSA training data, to generate aspect-level sentiment data from the Yelp reviews dataset. Table 1 shows some statistics from the datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 396, |
|
"end": 404, |
|
"text": "Table 1", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "A pipeline of BERT-based models was trained for target-extraction and aspect-level polarity classification over the SemEval dataset. These are the models used to extract target-aspects and their polarities from the Yelp dataset. The target extraction task was posed as a sequential token classification problem with BERT using the IOB2 format (SANG, 1999 ). This BERT model was fed the whole sentence as the input segment and it obtained an F1-score of 0.8012 (evaluation carried out similar to Sang and Buchholz (2000) ). The sentiment-polarity prediction task is posed as a sentence-pair classification problem using BERT, with the sentence provided as the first segment and the aspect-term as the second segment. This model obtained an F1-score of 0.9080 for the positive po- Table 3 : Results of manual evaluation. Here, 'Att' stands for attribute match, 'Con' stands for content preservation and 'Gra' stands for grammaticality or fluency. Manual evaluation is performed on a subset of 100 queries from all the test set queries, and averaged scores are shown. larity and 0.8239 for the negative polarity on the ABSA restaurant dataset. Using this classifier, for each (Sentence, Target) pair the gradient of the loss was taken at the input token embeddings and normalized to obtain the saliency-based weights used for polarity-injection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 343, |
|
"end": 354, |
|
"text": "(SANG, 1999", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 504, |
|
"end": 519, |
|
"text": "Buchholz (2000)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 779, |
|
"end": 786, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Aspect based Sentiment Analysis with BERT", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "All the models were implemented using PyTorch (Paszke et al., 2017 ). The BERT model was implemented using the transformers library (Wolf et al., 2019) . Models are trained with an initial learning rate of 1e-4 with a linear schedule and a warmup (Vaswani et al., 2017) , using the Adam Optimizer (Kingma and Ba, 2019). Mini-batches of size 32 were used during training. A linear schedule was used for the weight of the loss from the denoising auto-encoding step, which was set to decrease from 1 to 0.1 for the first 30,000 optimization steps and then decrease linearly to 0 over the next 70,000 steps. The models were each trained for 8 epochs on the Yelp dataset. The random masking probability used during pre-training was 0.25. During the denoising step, a probability of 0.25 was used for dropping words, and words were shuffled with a window-size of 3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 46, |
|
"end": 66, |
|
"text": "(Paszke et al., 2017", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 151, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 269, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The evaluation metrics we use are an extension of the metrics used for evaluating the sentiment transfer task by previous work (such as Li et al. (2018b) ; Wang et al. (2019) ). The evaluation was done with the SemEval test dataset. Queries were generated from this data by randomly inverting a subset (non-null, improper subset) of the polarities expressed at the input. For queries with 2 or more aspects, as many queries were generated as there were aspects in the sentence with different random inversions, resulting in a total of 513 evaluation queries. A sample consisting of 100 queries from the test set was used for manual evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 153, |
|
"text": "Li et al. (2018b)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 174, |
|
"text": "Wang et al. (2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For automatic evaluation, we use a classifier score and a BLEU score. The results for automatic evaluation are shown in Table 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 120, |
|
"end": 127, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluation", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "Classifier Score: We use an aspect-level sentiment polarity classifier to measure how many of the outputs express the necessary target polarities (Li et al., 2018b) . We use the classifier described in 4.1.1 for the polarity prediction. We define the classifier score to be the fraction of aspect-level sentiment polarities (predicted by the classifier from the output) that match with the desired aspect-level polarity (from the query). While averaging, each the waiter was attentive , the food was delicious and the views of the city were great waiter -negative food -positive views of the city -positive the waiter was inattentive , the food was delicious and the views of the city were great . waiter -positive food -negative views of the city -positive the waiter was attentive , the food was disappointing but the views of the city were great . waiter -positive food -negative views of the city -negative the waiter was attentive , the food was disappointing but the views of the city were terrible . aspect-level sentiment in a query was treated as a separate instance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 146, |
|
"end": 164, |
|
"text": "(Li et al., 2018b)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluation", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "BLEU Scores: Like in Li et al. (2018b) ; Gan et al. (2017) , human reference outputs were written for 100 of the queries. Three Human experts were asked to rewrite the reviews with as much content preserved as possible, without compromising fluency. These experts had good language abilities and having satisfactory knowledge in the relevant area. We report BLEU scores for the models against these references. A BLEU score could be treated as a measure of content preservation from the input or the output fluency.", |
|
"cite_spans": [ |
|
{ |
|
"start": 21, |
|
"end": 38, |
|
"text": "Li et al. (2018b)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 41, |
|
"end": 58, |
|
"text": "Gan et al. (2017)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluation", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "Following the previous methods (Li et al., 2018b; Wang et al., 2019) for manual evaluation of style transfer, workers were asked to rate the output sentences on the Likert-scale (1 to 5) for three criteria -Attribute match to the query set of aspect-level polarities (Att), Fluency (Gra) measuring the naturalness of the output and Content preservation (Con). They were shown the source sentence, the query aspect-level polarities and the model output. The results of manual evaluation are shown in Table 3 1", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 49, |
|
"text": "(Li et al., 2018b;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 50, |
|
"end": 68, |
|
"text": "Wang et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 499, |
|
"end": 507, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Manual Evaluation", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "The importance of each component in our model is shown through an ablation study in Table 2 and Table 3. From the classifier-based score, we see that the full model with saliency-based polarity injection is the most successful in transferring sentimentlevel polarities. Polarity injection, even without saliency information is seen to be useful. The models with polarity injection are especially better at transferring sentiments when three or more aspects are present, showing that the polarity signals are useful in localizing the style attributes with multiple targets present. The model using saliencybased weighting for the polarity injection has a significantly higher classifier score. This could be because of the saliency information acting like an adversarial white-box attack on the classifier, making it easier to obtain higher classifier scores.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 84, |
|
"end": 91, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The Content preservation (Con) scores and BLEU scores for the baseline models are significantly high, but these models also show poor Attribute match (Att) scores. This means that many of the sentiments at the output were left untransferred resulting in the poor Att score, while large parts of the input text were copied over to the output resulting in the larger BLEU and Content preservation scores. The improved Content preservation (Con) scores and the Fluency (Gra) from the model without saliency information to the model with saliencybased weighting shows that the attribute transfer with saliency-based info is more successful in inverting the correct polarities, while maintaining the content and fluency, due to the added information about the words to be edited.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The Table 5 shows how the model outputs change with different components of the model are ablated. With queries involving mostly positive or negative attributes, the saliency-based polarity injection supports the localized inversion of sentiment in the output. Outputs also show how polarity injection helps produce the required change with more content and fluency preserved, by selectively editing the correct words.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 4, |
|
"end": 11, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Error Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "the veal and the mushrooms were cooked perfectly . Query veal -positive, mushrooms -negative BERT-Baseline (BB) the veal and the mushrooms were not cooked perfectly . BB + MLM pretraining (BB-MLM) the veal and the mushrooms were over cooked perfectly . BB-MLM + one-zero polarity injection the veal was gross and the mushrooms were over cooked . BB-MLM + saliency-based polarity injection loved the veal and the mushrooms were over cooked .", |
|
"cite_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 196, |
|
"text": "(BB-MLM)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "the waiter was attentive , the food was delicious and the views of the city were great . Query waiter -negative, food -positive, views of the city -positive BERT-Baseline (BB) the waiter was attentive , the food was delicious and the city were great .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BB + MLM pretraining (BB-MLM) the waiter was attentive , the food was delicious and the views of the city were great .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BB-MLM + one-zero Polarity injection the waiter was attentive , the food was delicious and the views of the city were great .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "BB-MLM + saliency-based polarity injection the waiter was inattentive , the food was delicious and the views of the city were great .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Input for 7 years they have put out the most tasty, most delicious food and kept it that way... Query food -negative BERT-Baseline (BB)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "for 7 years they have put out the most tasty food and kept it that way.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "for 7 years they have put out the most greasy food and bland food that way...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BB + MLM pretraining (BB-MLM)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "for 6 years they have put out the most bland food and kept it that way... BB-MLM + saliency-based polarity injection for 7 years they have put out the most bland food and kept it that way... ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BB-MLM + one-zero polarity injection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "the space is limited so be prepared to wait up to 45 minutes -1 hour , but be richly rewarded when you savor the delicious indo-chinese food Query space -positive, indo-chinese food -positive Output the space is extensive so be prepared to 10 -15 -20 + minutes , but delicious chinese food .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Disfluency and dropped content due to the length of input and the negative sentiment implied through the word 'waiting'. Input i'd be horrified if my staff were turning away customers so early and so rudely! Query staff -positive Output i'd be delighted if my staff were turning away customers so early and nicely! Comment Lower naturalness of the output from real-world knowledge that turning away customers is bad.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comment", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "i had fish and my husband had the filet -both of which exceeded our expectations . Query fish -negative, filet -positive Output i had fish and my husband had the filet -both of which exceeded our expectations .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Input", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The attribute markers for fish and filet are shared, making transfer difficult. Significant rewriting of the input in needed to produce acceptable fluent output. To understand the errors in outputs better, the outputs marked with low Att, Con and Gra scores were examined. Some of these outputs are shown and discussed in Table 6 . Many of the failures in Attribute match were found to be due to the complexity involved in the language, such as when the sentiment expressed towards a target is implicit from the content of the review. The absence of attribute markers also makes it harder to convert sentiment. Most outputs with low Con and Gra scores were found to contain very long sentences. The models were trained on the Yelp dataset which mostly contained smaller sentences. Failed examples with multiple different polarities at the output were often also due to the attribute markers towards different aspects being shared in the input sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 322, |
|
"end": 329, |
|
"text": "Table 6", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comment", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Such examples require significant rewriting and reordering to produce sentences of acceptable fluency, and our method seems most successful when making localized changes such as with word replacements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comment", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In this paper, the task of aspect-level sentiment style transfer has been introduced, where stylistic attributes can be localized to different parts of a sentence. We have proposed a BERT-based encoder-decoder architecture with saliency-based polarity injection and show that it can be successful at the task when trained in an unsupervised setting. The experiments have been conducted on an aspect level polarity tagged benchmark dataset related to the restaurant domain. This work is hopefully an important initial step in developing a fine-grained controllable style transfer system. In the future, we would like to explore the ability to transfer such systems to data-sparse domains, and explore injecting attributes such as emotions to targets attributes in larger pieces of text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Inter-annotator agreement measured through the Krippendorff's alpha was found to be 0.92, 0.82, 0.87 for 'Att', 'Con', and 'Gra' respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Authors duly acknowledge the support from the Project titled Sevak-An Intelligent Indian Language Chatbot, Sponsored by SERB, Govt. of India (IMP/2018/002072).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Unsupervised neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Mikel", |
|
"middle": [], |
|
"last": "Artetxe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gorka", |
|
"middle": [], |
|
"last": "Labaka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1710.11041" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mikel Artetxe, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. 2017. Unsupervised neural ma- chine translation. arXiv preprint arXiv:1710.11041.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "On the properties of neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kyunghyun", |
|
"middle": [], |
|
"last": "Cho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Van Merri\u00ebnboer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dzmitry", |
|
"middle": [], |
|
"last": "Bahdanau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1409.1259" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kyunghyun Cho, Bart Van Merri\u00ebnboer, Dzmitry Bah- danau, and Yoshua Bengio. 2014. On the properties of neural machine translation: Encoder-decoder ap- proaches. arXiv preprint arXiv:1409.1259.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "NAACL-HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. In NAACL-HLT (1).", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Stylenet: Generating attractive visual captions with styles", |
|
"authors": [ |
|
{ |
|
"first": "Chuang", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3137--3146", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chuang Gan, Zhe Gan, Xiaodong He, Jianfeng Gao, and Li Deng. 2017. Stylenet: Generating attractive visual captions with styles. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3137-3146.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Recurrent continuous translation models", |
|
"authors": [ |
|
{ |
|
"first": "Nal", |
|
"middle": [], |
|
"last": "Kalchbrenner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1700--1709", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nal Kalchbrenner and Phil Blunsom. 2013. Recurrent continuous translation models. In Proceedings of the 2013 Conference on Empirical Methods in Natu- ral Language Processing, pages 1700-1709.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J Adam", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6980" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and J Adam Ba. 2019. A method for stochastic optimization. arxiv 2014. arXiv preprint arXiv:1412.6980, 434.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Multiple text style transfer by using word-level conditional generative adversarial network with two-phase training", |
|
"authors": [ |
|
{ |
|
"first": "Chih-Te", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi-Te", |
|
"middle": [], |
|
"last": "Hong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong-You", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chi-Jen", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shou-De", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3570--3575", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chih-Te Lai, Yi-Te Hong, Hong-You Chen, Chi-Jen Lu, and Shou-De Lin. 2019. Multiple text style transfer by using word-level conditional generative adversar- ial network with two-phase training. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Interna- tional Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 3570-3575.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "He He, and Percy Liang. 2018a. Delete, retrieve, generate: A simple approach to sentiment and style transfer", |
|
"authors": [ |
|
{ |
|
"first": "Juncen", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.06437" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juncen Li, Robin Jia, He He, and Percy Liang. 2018a. Delete, retrieve, generate: A simple approach to sentiment and style transfer. arXiv preprint arXiv:1804.06437.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Delete, retrieve, generate: A simple approach to sentiment and style transfer", |
|
"authors": [ |
|
{ |
|
"first": "Juncen", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.06437" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juncen Li, Robin Jia, He He, and Percy Liang. 2018b. Delete, retrieve, generate: A simple approach to sentiment and style transfer. arXiv preprint arXiv:1804.06437.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Automatic differentiation in PyTorch", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Paszke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soumith", |
|
"middle": [], |
|
"last": "Chintala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Chanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Devito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeming", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alban", |
|
"middle": [], |
|
"last": "Desmaison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Antiga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lerer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "NeurIPS Autodiff Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. 2017. Automatic differentiation in PyTorch. In NeurIPS Autodiff Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "SemEval-2014 task 4: Aspect based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Pontiki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitris", |
|
"middle": [], |
|
"last": "Galanis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Pavlopoulos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Harris", |
|
"middle": [], |
|
"last": "Papageorgiou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 8th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "27--35", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/S14-2004" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maria Pontiki, Dimitris Galanis, John Pavlopoulos, Harris Papageorgiou, Ion Androutsopoulos, and Suresh Manandhar. 2014. SemEval-2014 task 4: As- pect based sentiment analysis. In Proceedings of the 8th International Workshop on Semantic Evaluation (SemEval 2014), pages 27-35, Dublin, Ireland. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Style transfer through back-translation", |
|
"authors": [ |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Shrimai Prabhumoye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alan", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Black", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1804.09000" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shrimai Prabhumoye, Yulia Tsvetkov, Ruslan Salakhut- dinov, and Alan W Black. 2018. Style trans- fer through back-translation. arXiv preprint arXiv:1804.09000.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Automatically neutralizing subjective bias in text", |
|
"authors": [ |
|
{ |
|
"first": "Reid", |
|
"middle": [], |
|
"last": "Pryzant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [ |
|
"Diehl" |
|
], |
|
"last": "Martinez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Dass", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diyi", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "34", |
|
"issue": "", |
|
"pages": "480--489", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Reid Pryzant, Richard Diehl Martinez, Nathan Dass, Sadao Kurohashi, Dan Jurafsky, and Diyi Yang. 2020. Automatically neutralizing subjective bias in text. In Proceedings of the AAAI Conference on Ar- tificial Intelligence, volume 34, pages 480-489.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Representing text chunks", |
|
"authors": [ |
|
{ |
|
"first": "Sang", |
|
"middle": [], |
|
"last": "Eftk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "Proceedings of EACL'99", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "173--179", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "EFTK SANG. 1999. Representing text chunks. In Pro- ceedings of EACL'99, pages 173-179.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Introduction to the conll-2000 shared task chunking", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"Tjong" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Buchholz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Fourth Conference on Computational Natural Language Learning and the Second Learning Language in Logic Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik Tjong Kim Sang and Sabine Buchholz. 2000. In- troduction to the conll-2000 shared task chunking. In Fourth Conference on Computational Natural Language Learning and the Second Learning Lan- guage in Logic Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Style transfer from non-parallel text by cross-alignment", |
|
"authors": [ |
|
{ |
|
"first": "Tianxiao", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Lei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Jaakkola", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6830--6841", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianxiao Shen, Tao Lei, Regina Barzilay, and Tommi Jaakkola. 2017. Style transfer from non-parallel text by cross-alignment. In Advances in neural informa- tion processing systems, pages 6830-6841.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Deep inside convolutional networks: Visualising image classification models and saliency maps", |
|
"authors": [ |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Vedaldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Workshop at International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karen Simonyan, Andrea Vedaldi, and Andrew Zisser- man. 2014. Deep inside convolutional networks: Vi- sualising image classification models and saliency maps. In Workshop at International Conference on Learning Representations. Iclr.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Sequence to sequence learning with neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3104--3112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural networks. In Advances in neural information processing sys- tems, pages 3104-3112.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Learning to attend via word-aspect associative fusion for aspect-based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Tay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anh", |
|
"middle": [], |
|
"last": "Luu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siu Cheung", |
|
"middle": [], |
|
"last": "Tuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hui", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Thirtysecond AAAI conference on artificial intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Tay, Luu Anh Tuan, and Siu Cheung Hui. 2018. Learning to attend via word-aspect associative fu- sion for aspect-based sentiment analysis. In Thirty- second AAAI conference on artificial intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Structured content preservation for unsupervised text style transfer", |
|
"authors": [ |
|
{ |
|
"first": "Youzhi", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiting", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhou", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1810.06526" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Youzhi Tian, Zhiting Hu, and Zhou Yu. 2018. Struc- tured content preservation for unsupervised text style transfer. arXiv preprint arXiv:1810.06526.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Controllable unsupervised text attribute transfer via editing entangled latent representation", |
|
"authors": [ |
|
{ |
|
"first": "Ke", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hang", |
|
"middle": [], |
|
"last": "Hua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaojun", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "11034--11044", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ke Wang, Hang Hua, and Xiaojun Wan. 2019. Control- lable unsupervised text attribute transfer via editing entangled latent representation. In Advances in Neu- ral Information Processing Systems, pages 11034- 11044.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Huggingface's transformers: Stateof-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtow- icz, et al. 2019. Huggingface's transformers: State- of-the-art natural language processing. ArXiv, pages arXiv-1910.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Applying masked language model to sentiment transfer", |
|
"authors": [ |
|
{ |
|
"first": "Xing", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liangjun", |
|
"middle": [], |
|
"last": "Zang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jizhong", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Songlin", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.08039" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xing Wu, Tao Zhang, Liangjun Zang, Jizhong Han, and Songlin Hu. 2019. \" mask and infill\": Applying masked language model to sentiment transfer. arXiv preprint arXiv:1908.08039.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Bert post-training for review reading comprehension and aspect-based sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Hu", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Shu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip S", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1904.02232" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hu Xu, Bing Liu, Lei Shu, and Philip S Yu. 2019. Bert post-training for review reading comprehension and aspect-based sentiment analysis. arXiv preprint arXiv:1904.02232.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Unpaired sentiment-to-sentiment translation: A cycled reinforcement learning approach", |
|
"authors": [ |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xuancheng", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houfeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenjie", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1805.05181" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jingjing Xu, Xu Sun, Qi Zeng, Xuancheng Ren, Xi- aodong Zhang, Houfeng Wang, and Wenjie Li. 2018. Unpaired sentiment-to-sentiment translation: A cycled reinforcement learning approach. arXiv preprint arXiv:1805.05181.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Unsupervised text style transfer using language models as discriminators", |
|
"authors": [ |
|
{ |
|
"first": "Zichao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiting", |
|
"middle": [], |
|
"last": "Hu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Xing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taylor", |
|
"middle": [], |
|
"last": "Berg-Kirkpatrick", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7287--7298", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zichao Yang, Zhiting Hu, Chris Dyer, Eric P Xing, and Taylor Berg-Kirkpatrick. 2018. Unsupervised text style transfer using language models as discrimina- tors. In Advances in Neural Information Processing Systems, pages 7287-7298.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Style transfer as unsupervised machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Zhirui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuo", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shujie", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianyong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enhong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1808.07894" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhirui Zhang, Shuo Ren, Shujie Liu, Jianyong Wang, Peng Chen, Mu Li, Ming Zhou, and Enhong Chen. 2018. Style transfer as unsupervised machine trans- lation. arXiv preprint arXiv:1808.07894.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "The encoder-decoder network used, with the polarity injection." |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Inputi must say i am surprised by the bad reviews of the restaurant earlier in the year , though . Query restaurant -negative Output i must say i am surprised by the bad reviews of the restaurant earlier in the year , though . Comment No change. Sentiment here is implied and latent." |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td/><td/><td/><td>The</td><td>slow</td><td>food</td></tr><tr><td/><td/><td colspan=\"2\">BERT Encoder</td></tr><tr><td/><td/><td>[SEP]</td></tr><tr><td>[POS]</td><td>[NEG]</td><td/></tr><tr><td/><td/><td colspan=\"2\">BERT Encoder</td></tr><tr><td colspan=\"2\">service [MASK] food [MASK]</td><td>[SEP]</td><td>The service was slow but the food was delicious.</td></tr><tr><td colspan=\"3\">SEG A : Aspect-polarity sequence with</td><td>SEG B : Source sentence tokens (x) (expressing</td></tr><tr><td colspan=\"3\">Masked polarities, constructed from l src</td><td>aspect-level sentiments l src )</td></tr><tr><td colspan=\"4\">Figure 2: BERT Encoder pre-training</td></tr><tr><td/><td colspan=\"3\">+ + + + + + + +</td></tr><tr><td/><td/><td colspan=\"2\">BERT Encoder</td></tr><tr><td colspan=\"3\">Asp 1 [pol 1 ] [SEP ASP ] \u2026 Asp k [pol k ] [SEP ASP ]</td><td>[SEP]</td><td>x: input sentence</td></tr><tr><td colspan=\"3\">SEG A : Goal aspect-polarity sequence constructed from l tgt</td><td>SEG B : source sentence tokens (expressing aspect-level sentiments l src )</td></tr></table>", |
|
"html": null, |
|
"text": "[MASK] service was[MASK] but the [MASK] was delicious. service [NEG] food [POS] SEG A : Source aspect-polarity sequence constructed from l src SEG B : Randomly masked source sentence tokens (x) (expressing aspect-level sentiments l src )", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td>SemEval (Train and Validation)</td><td>2,242</td><td>4,016</td><td>1,437</td></tr><tr><td>SemEval (Test)</td><td>401</td><td>513</td><td>269</td></tr><tr><td>Yelp (Train and Validation)</td><td>361,968</td><td>471,820</td><td>47,750</td></tr></table>", |
|
"html": null, |
|
"text": "DatasetNo. of Sentences No. of Target Aspects No. of Unique Target Aspects", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Data distribution for the restaurant domain. The Yelp dataset does not contain target aspects and their polarities extracted, and these were extracted with a classifier trained on the SemEval training data", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"content": "<table><tr><td>Model</td><td>Att Con Gra</td></tr><tr><td>BERT-Baseline (BB)</td><td>2.48 3.99 3.96</td></tr><tr><td>BB + MLM pretraining (BB-MLM)</td><td>2.64 3.95 4.04</td></tr><tr><td>BB-MLM + one-zero polarity injection</td><td>2.80 4.00 4.05</td></tr><tr><td colspan=\"2\">BB-MLM + saliency-based polarity injection 2.98 4.08 4.05</td></tr></table>", |
|
"html": null, |
|
"text": "Results of automatic evaluation. The overall classifier score is calculated over all queries. The other columns show the score calculated only on queries with one aspect, two aspects or three or more aspects. The classifier scores are calculated on the full test set, while the BLEU scores are measured with reference-outputs for a subset of 100 queries.", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Example outputs from the full model with saliency-based polarity injection with different aspect-level polarity queries.", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Example outputs from the SemEval data showing aspect-level sentiment transfer from the ablated models. Aspects colored red (negative) or green (positive) indicate their sentiment.", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF9": { |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Example sentences that show difficulty in transferring sentiment.", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |