|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:40:23.655180Z" |
|
}, |
|
"title": "Large Scale Author Obfuscation Using Siamese Variational Auto-Encoder: The SiamAO System", |
|
"authors": [ |
|
{ |
|
"first": "Chakaveh", |
|
"middle": [], |
|
"last": "Saedi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Macquarie University", |
|
"location": { |
|
"settlement": "Sydney", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dras", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Macquarie University", |
|
"location": { |
|
"settlement": "Sydney", |
|
"country": "Australia" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Author obfuscation is the task of masking the author of a piece of text, with applications in privacy. Recent advances in deep neural networks have boosted author identification performance making author obfuscation more challenging. Existing approaches to author obfuscation are largely heuristic. Obfuscation can, however, be thought of as the construction of adversarial examples to attack author identification, suggesting that the deep learning architectures used for adversarial attacks could have application here. Current architectures are proposed to construct adversarial examples against classification-based models, which in author identification would exclude the highperforming similarity-based models employed when facing large number of authorial classes. In this paper, we propose the first deep learning architecture for constructing adversarial examples against similarity-based learners, and explore its application to author obfuscation. We analyse the output for both success in obfuscation and language acceptability, as well as comparing the performance with some common baselines, showing promising results in finding a balance between safety and soundness of the perturbed texts.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Author obfuscation is the task of masking the author of a piece of text, with applications in privacy. Recent advances in deep neural networks have boosted author identification performance making author obfuscation more challenging. Existing approaches to author obfuscation are largely heuristic. Obfuscation can, however, be thought of as the construction of adversarial examples to attack author identification, suggesting that the deep learning architectures used for adversarial attacks could have application here. Current architectures are proposed to construct adversarial examples against classification-based models, which in author identification would exclude the highperforming similarity-based models employed when facing large number of authorial classes. In this paper, we propose the first deep learning architecture for constructing adversarial examples against similarity-based learners, and explore its application to author obfuscation. We analyse the output for both success in obfuscation and language acceptability, as well as comparing the performance with some common baselines, showing promising results in finding a balance between safety and soundness of the perturbed texts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The ability of machine learning to infer information about the author of a piece of text raises issues about privacy in textual data. Blogs, reviews, even tweets can be significantly revealing when authors follow textual authorial patterns, which can lead to disclosure of sensitive information. This has led to real-world problems, such as with Amazon's machine learning-based recruitment system, This work is licensed under a Creative Commons Attribution 4.0 International License. License details: http: //creativecommons.org/licenses/by/4.0/. which was discontinued when it turned out to disadvantage female candidates. 1 Cases like this have generated interest in NLP in concealing authorial characteristics such as gender or age, for example by producing representations that make this information difficult to infer (Li et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 823, |
|
"end": 840, |
|
"text": "(Li et al., 2018)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Author identification is the task of inferring the actual identity of the author. The potential number of author candidates can be very large, making author identification different from author profiling where the possible values of an attribute (e.g. gender) are typically limited to a small closed set, as in standard classification tasks. Depending on the number of included authorial classes, approaches in author identification are either classification-based or similarity-based, in the framing of Stamatatos (2009) . Similarity-based approaches are proven to be better suited when facing large numbers of authors (Koppel et al., 2011) , and have also underpinned several successful methods in the annual PAN authorship shared tasks 2 such as Seidman (2013) and Khonji and Iraqi (2014) . Author obfuscation is the task of concealing the identity of an author. This task is fairly challenging even for humans (McDonald et al., 2012) , as authors are often not aware of hidden patterns in their writing; and the computational task is relatively underexplored. Some work has been carried out as part of a PAN authorship obfuscation task, since 2016, while other research has been independent of this. These approaches have included using backtranslation or heuristic application of paraphrase rules (Rosso et al., 2016; Hagen et al., 2017; Potthast et al., 2018) , and more recently applying heuristic solution methods to the task framed as an optimization problem (Bevendorff et al., 2019; Li et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 504, |
|
"end": 521, |
|
"text": "Stamatatos (2009)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 620, |
|
"end": 641, |
|
"text": "(Koppel et al., 2011)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 749, |
|
"end": 763, |
|
"text": "Seidman (2013)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 768, |
|
"end": 791, |
|
"text": "Khonji and Iraqi (2014)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 914, |
|
"end": 937, |
|
"text": "(McDonald et al., 2012)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 1302, |
|
"end": 1322, |
|
"text": "(Rosso et al., 2016;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 1323, |
|
"end": 1342, |
|
"text": "Hagen et al., 2017;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1343, |
|
"end": 1365, |
|
"text": "Potthast et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1468, |
|
"end": 1493, |
|
"text": "(Bevendorff et al., 2019;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1494, |
|
"end": 1510, |
|
"text": "Li et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Author obfuscation can be seen as the generation of adversarial examples to attack an author identification system. Work in other areas of adversarial example generation (Iyyer et al., 2018; Alzantot et al., 2018; Xiao et al., 2020; Bai et al., 2020) has seen rapid progress with the application of deep learning, and could potentially be adapted here. For example, Zhao et al. (2018b) define a GAN-style architecture to generate 'natural' adversarial examples that -unlike approaches searching the input space -works on the dense representation of each data point. Dense representations lie on the manifold that defines the data distribution and finding close points to them leads to natural adversarial examples. They apply this both to image classification tasks and a standard three-class natural language inference task, producing natural-looking adversarial examples.", |
|
"cite_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 190, |
|
"text": "(Iyyer et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 191, |
|
"end": 213, |
|
"text": "Alzantot et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 214, |
|
"end": 232, |
|
"text": "Xiao et al., 2020;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 233, |
|
"end": 250, |
|
"text": "Bai et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 385, |
|
"text": "Zhao et al. (2018b)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Such architectures have so far only been defined for producing adversarial examples against classification-based learners (limited number of classes). In author identification, this would exclude the high-performing similarity-based approaches. In this paper we introduce SIAMAO, an architecture that can generate adversarial examples against a similarity-based learner (specifically a deep Siamese network (Saedi and Dras, 2019) ) and evaluate whether it can obfuscate against authorship identification. SIAMAO draws on ideas from Variational Autoencoders (VAEs), and the specific use of them by Bowman et al. (2016) for generating novel sentences close to some input, and from the Adversarially Regularized Autoencoders (ARAEs) of Zhao et al. (2018b) : the intuition here is for the autoencoder to regenerate close to the original text but with some perturbation to fool an authorship identification system.", |
|
"cite_spans": [ |
|
{ |
|
"start": 407, |
|
"end": 429, |
|
"text": "(Saedi and Dras, 2019)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 733, |
|
"end": 752, |
|
"text": "Zhao et al. (2018b)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our main contributions are: (i) A method for integrating Siamese networks into VAEs in order to generate adversaries against similarity based models, and testing it under author obfuscation. (ii) A performance comparison on properties of the obfuscated text between our model and baselines: our focus is on how well the obfuscated text can fool an author identification system, how much the obfuscator changes the text, and how acceptable the resulting text is. We find that SIAMAO provides a promising deep learning approach to this task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There has been longstanding interest in determining the identity of authors of pieces of texts. Early work has been surveyed by Stamatatos (2009) , and much of the activity on the problem has been carried out in the context of PAN authorship tasks (Kestemont et al., 2019, for example) . Other work has occurred outside that context, such as the high-performing CNN approach of Ruder et al. (2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 128, |
|
"end": 145, |
|
"text": "Stamatatos (2009)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 248, |
|
"end": 285, |
|
"text": "(Kestemont et al., 2019, for example)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 378, |
|
"end": 397, |
|
"text": "Ruder et al. (2016)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Author Identification", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "While most approaches tackle this as a classification task using standard machine learning classifiers, this is only suitable where the number of authors is small and known in advance, as argued by Koppel et al. (2011) . An alternative approach is similarity-based models, where a metric is used to measure similarity between texts; this is appropriate for large number of authors, which is the context of the work in the present paper. Similarity-based methods include the WritePrints method (Abbasi and Chen, 2008) and that of Koppel et al. (2011) . The latter, for example, represents documents as bags of character n-grams, and measures distances between documents over repeated samples by various fixed metrics (e.g. cosine similarity, Ruzicka).", |
|
"cite_spans": [ |
|
{ |
|
"start": 198, |
|
"end": 218, |
|
"text": "Koppel et al. (2011)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 493, |
|
"end": 516, |
|
"text": "(Abbasi and Chen, 2008)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 529, |
|
"end": 549, |
|
"text": "Koppel et al. (2011)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Author Identification", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "An end-to-end trainable deep learning author obfuscation architecture needs a deep learning component for author identification. A deep learning similarity-based approach to author identification has been proposed by Saedi and Dras (2019) , using a Siamese network. This approach outperforms alternatives on up to 5000 authors, and is suitable for our work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 217, |
|
"end": 238, |
|
"text": "Saedi and Dras (2019)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Author Identification", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Author obfuscation is a less explored area which shares interest with fields including style transfer (Prabhumoye et al., 2018) or attribute masking (Reddy and Knight, 2016) . The goal is to change or perturb a text, so that the accuracy of a specific authorship inference mechanism is worsened while the modified text conveys the original message.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 127, |
|
"text": "(Prabhumoye et al., 2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 149, |
|
"end": 173, |
|
"text": "(Reddy and Knight, 2016)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Author Obfuscation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Early research like that of Kacmarcik and Gamon (2006) worked at the level of machine learning features, proposing to eliminate those that are more effective in classification; this, however, resulted in mostly unreadable texts. At the level of working directly with text, one approach uses backtranslation: input text is translated to a pivot language and translated back to the original one, producing a more or less similar text. The result is greatly affected by the availability of a successful bidirectional machine translator (Rao et al., 2000; Prabhumoye et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 54, |
|
"text": "Kacmarcik and Gamon (2006)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 551, |
|
"text": "(Rao et al., 2000;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 552, |
|
"end": 576, |
|
"text": "Prabhumoye et al., 2018)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Author Obfuscation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Other approaches have been largely rule-based or heuristic in nature. Most rule-based obfuscators are designed against specific techniques. The PAN organization has included author obfuscation among the authorial tasks. The 7 participants of PAN2018 author obfuscation were also mostly rulebased, but with different levels of aggressiveness (Potthast et al., 2018) , and they varied in how well they defeated inference attackers and preserved the essence of the original text. In a recent comprehensive model, Bevendorff et al. (2019) also approached obfuscation from a verification perspective. This heuristic model calculates Jensen-Shannon distance over 3-gram frequency representations, iteratively applies perturbation operators (e.g. char-flip, deletion, context-free synonymy), picks the best nodes in the search space, and continues until the original classification result changes. They proposed \"operator cost\" to keep the text modification minimum and as minimally disruptive as possible. This was evaluated on the relatively small datasets of the PAN tasks. Outside of the PAN context (and of NLP research in general), Li et al. (2019) proposed TextBugger, a different heuristic model that first extracts a list of most important words based on the effect they have on the classification, and then modifies the selected words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 341, |
|
"end": 364, |
|
"text": "(Potthast et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 510, |
|
"end": 534, |
|
"text": "Bevendorff et al. (2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1131, |
|
"end": 1147, |
|
"text": "Li et al. (2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Author Obfuscation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Author obfuscation can be viewed as constructing adversarial examples against an authorship identification inference attacker: this is precisely the viewpoint of TextBugger. However, as noted above, TextBugger takes a heuristic approach to this, while state of the art approaches to constructing adversarial examples in many tasks involve deep learning architectures (Iyyer et al., 2018; Alzantot et al., 2018; Xiao et al., 2020; Bai et al., 2020) . And even though these are well explored in the context of continuous representations that occur in image processing, with operators like affine transformations or lighting changes, it is less straightforward for the discrete nature of text. While there is some existing work, we note that all aim to construct adversarial examples against a classification model that typically handles only a small number of classes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 367, |
|
"end": 387, |
|
"text": "(Iyyer et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 388, |
|
"end": 410, |
|
"text": "Alzantot et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 411, |
|
"end": 429, |
|
"text": "Xiao et al., 2020;", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 430, |
|
"end": 447, |
|
"text": "Bai et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adversarial Examples", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "One possibility is to use auto-encoders: Minor data distortions can be formalized as an optimization problem to minimize the classification accuracy. Such optimization has been proven successful in image processing (Biggio et al., 2013; Goodfellow et al., 2014) . In the context of textual adversarial examples, approaches take ideas from a range of sources, including encoder-decoder architectures, variational auto-encoders and GANs (Kusner et al., 2017; Pu et al., 2016; Pol et al., 2019, for exam-ple) . A key work that we draw on in this paper is that of Zhao et al. (2018b) . Rather than working directly in the text space, they search for adversaries that lie on the data manifold: in their text application, this attacks a (three-class) textual entailment classifier. First, projections of data points are learnt, then the distance between each adversary and the closest real data point is measured in the vector space to choose the best fake sample. Finally, the selected adversary is mapped back to the input space. Their system combines ideas from encoder-decoder architecture, VAEs and GANs, and has two main training objectives: (1) bringing the encoder and generator output close to each other; and (2) making the sampled noise (i.e. generator's input) less random by using a module they call the 'Inverter'. The inverter is a network that learns to sample close-to-input points in the data manifold. Their search algorithm identifies the best adversary by incrementally increasing the search space till the classification result of the sampled point(s) is different from that of the original input data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 236, |
|
"text": "(Biggio et al., 2013;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 261, |
|
"text": "Goodfellow et al., 2014)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 435, |
|
"end": 456, |
|
"text": "(Kusner et al., 2017;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 457, |
|
"end": 473, |
|
"text": "Pu et al., 2016;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 474, |
|
"end": 505, |
|
"text": "Pol et al., 2019, for exam-ple)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 560, |
|
"end": 579, |
|
"text": "Zhao et al. (2018b)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adversarial Examples", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "While not explicitly cast as adversarial example generation, the process of paraphrase generation can be seen in this light. Gupta et al. (2018) proposed a VAE-LSTM containing 2 LSTM-encoders which encode both the original sentence and the paraphrase. Encoded vectors are used in the sampling process of the VAE. On the decoder side, there is an encoder for original sentences and a decoder for paraphrase generation that is fed the embedding vector and the encoder output. In our approach, our encoder is a CNN but we also use two encoded vectors for sampling, and the modified embeddings are used by the decoder.", |
|
"cite_spans": [ |
|
{ |
|
"start": 125, |
|
"end": 144, |
|
"text": "Gupta et al. (2018)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adversarial Examples", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "An optimization-based alternative to these deep learning approaches was proposed by Alzantot et al. (2018) , using population-based optimization. They encode the sentences and perturb them in the vector space. Unlike the above work, they propose a gradient-free optimization by employing genetic algorithms. Perturbation is at the word level based on semantic similarity of candidates and original vectors going through cross-over and mutation instead of expanding the search space iteratively. We use a similar notion of perturbation operators, including cross-over.", |
|
"cite_spans": [ |
|
{ |
|
"start": 84, |
|
"end": 106, |
|
"text": "Alzantot et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adversarial Examples", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Here we present SIAMAO, an author obfuscation neural network that integrates a large scale Siamese author identifier in a VAE architecture to generate ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SiamAO", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "V 1 , V 2 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SiamAO", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": ". Perturbation operator (continues red arrow) modifies one of the inputs. This changes the verification result from Y to N . S 2 is the perturbation output when mapped back to the text space. (b) Schematic view of SiamAO; the network is composed of a Siamese author identifier and a VAE which share an encoder. adversarial text. This system takes a pair of texts as input and generates an adversary with the aim of changing the author identification results. Figure 1 shows (a) a high-level schema of the process and (b) the components of SIAMAO respectively. A key innovation is incorporation of a similaritybased author identification approach, in contrast to other work described in \u00a72 that only constructs adversarial examples against classification-based inference.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 459, |
|
"end": 467, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "SiamAO", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our similarity-based author identification component is taken from Saedi and Dras (2019) . The model consists of (1) a dual encoding sub-network and (2) a decision sub-network. The encoding subnetwork (a deep CNN model) receives an input pair of texts (S 1 , S 2 ) and maps each S i into the vector space (V i ). The decision sub-network compares V 1 and V 2 and generates a similarity score (more information available in supplementary material). We adopt the version of the model that proved best overall in the source work for large numbers of authors: the encoding sub-networks are characterlevel rather than word-level, and L 1 distance is employed in the decision network. This similaritybased model produces a score between a pair of texts that can be interpreted as an answer to the author verification problem: are these two texts by the same author? These two components of the author identification system are marked as Encoder and Siamese Decision in Figure 1 -(b) respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 88, |
|
"text": "Saedi and Dras (2019)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 963, |
|
"end": 971, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Author Identification", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Our overall approach to generating adversarial examples draws on the VAE architecture of Gupta et al. (2018) for paraphrase generation, and the idea of Zhao et al. (2018a) to generate perturbations in the encoded space. Implementation details can be found in supplementary material. Encoder-Decoder A successful VAE for text perturbation requires a strong encoder as well as a decoder capable of perturbation. In our proposed architecture, shown in Figure 1 -(b), the author identifier network and the VAE share the encoder. This results in an authorial feature aware decoder since the encoder is trained on author verification. SIA-MAO's decoder is trained for a) normal decoding (i.e. as a decoder: loss is 0, when input=output= target) and for b) obfuscation (i.e. as a perturber: loss is 0, when input =output=target). In both cases, the input to the decoder is sampled from V i . However, when trained for obfuscation, perturbation operators modify the sampler's input and output. Sampler In finding adversarial examples, we have two aims: (1) like Zhao et al. (2018b), we look for points that lie close to the original in terms of the manifold that defines the data distribution; and (2) we look for adversaries that can change the author verification results while preserving the original message. In other words, we need to generate a piece of text that is very close to the original one but different enough to change the verification result. In SiamAO, when training the decoder for normal decoding, V i is directly used for sampling (i.e. to generate V from the normal distribution). However, when training the decoder for obfuscation, unlike non-Siamese models, we have access to two sample inputs (V 1 , V 2 ) which can help to remain within the acceptable area 3 in the vector space. We start by interpolating between these two inputs. Specifically, if", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 108, |
|
"text": "Gupta et al. (2018)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 152, |
|
"end": 171, |
|
"text": "Zhao et al. (2018a)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 449, |
|
"end": 457, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Author Obfuscation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "V 1 = [v 11 , . . . , v 1n ] and V 2 = [v 21 , . . . , v 2n ], and V 1 > V 2 and the distance between them is is d, either of V 1 = [v 11 \u2212 d , . . . , v 1n \u2212 d ] and V 2 = [v 21 + d , . . . , v 2n + d ] (where d = d/3)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Author Obfuscation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "can be used by the sampler. 4 Perturbation Operators To combine embedding and sampled vectors in the decoding step, we could concatenate them as in most VAE models. There is a risk, however, that the network focuses on the embedding part and mostly ignores the sampled vector which results in very few changes in the text such that it is unable to mislead the classifier. To add perturbations to the vectors, we adopt some of the techniques of Alzantot et al. (2018) . In SiamAO, when training the decoder for obfuscation, after moving the original vector and sampling as explained above, we use cross-over as the final perturbation step. Cross-over, taken from genetic algorithms, keeps vector elements mostly the same, only making changes at specific indices. The inputs to the cross-over operator are the V vector and the character-level embedding. 5 After crossover, we sum the two vectors. Alternative methods are compared in \u00a75.", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 29, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 444, |
|
"end": 466, |
|
"text": "Alzantot et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Author Obfuscation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Objectives The first part of the objective is a standard one for VAEs, the reconstruction loss, in Eqn (1). In terms of generating adversarial examples, training our generative model consists of (1) training for normal decoding and (2) training for perturbation. In the latter, the decoder learns to make changes to the input and the sampler learns to pick a vector that flips the Siamese author verification original (binary) decision (y OD ) to the perturbed decision (y PD ), in Eqn (2). Eqn (3) combines these two component losses.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Author Obfuscation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "lcons = (E q \u03c6 (V |S) [log p \u03b8 (S|V )] \u2212 KL(q \u03c6 (V |S) p(V )) (1) l sampler = MSE(y PD , |1 \u2212 y OD |) (2) l pert = \u03b1 \u00d7 lcons + (1 \u2212 \u03b1) \u00d7 l sampler", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Author Obfuscation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Eqn (1) provides a lower bound on the model evidence p(S|\u03b8, \u03c6), KL stands for Kullback-Leibler divergence. \u03b1 is set to 0.5 in all our experiments, making the backpropagation uniform on the sampler and the decoder. Obfuscation Training For the perturbation objective, we generate training data by applying widely used text modification operators very similar to rule-based systems such as Bevendorff et al. (2019) and Li et al. (2019) . We emphasise that unlike common rule-based or heuristic techniques, these operators are merely to generate the data entries as the target while training the decoder for obfuscation. Our selected modification rules can be categorized into four classes: shape similarity (e.g.\u00e4 \u2192a, O \u2192 0), sound similarity (e.g. ee \u2192 ea), swap (e.g. ie \u2192 ei) and punctuation modification (e.g. . \u2192 .. or :\" \u2192 :). As Bevendorff et al. 2019, we only apply these changes to a subset of instances in each text piece, which we select uniformly randomly with probability 1/3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 388, |
|
"end": 412, |
|
"text": "Bevendorff et al. (2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 417, |
|
"end": 433, |
|
"text": "Li et al. (2019)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Author Obfuscation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "There is not yet a standard evaluation framework for this kind of work. Hence we observe various different evaluation techniques in the literature. This has also resulted in project specific definitions. For instance, in both PAN2018 and the Text-Bugger system, mis-spelled words are considered as valid \"paraphrasing\" due to the little impact they cause on human understanding. They argue characterlevel perturbation (i.e. mis-spelled words) are visually and semantically similar to the original ones (e.g. their and thier, some and s0me) and can deliver the original message (Potthast et al., 2018; Li et al., 2019; Rawlinson, 2007) . Work on adversarial example attacks has two broad types of evaluation. Misclassification or attack success (how well the adversarial examples fool the inference mechanism); and utility or imperceptibility (how well the adversarial examples preserve important aspects of the original). Work on author obfuscation generally fits with this, although in disparate ways; the PAN tasks, 6 for example, consider safety (broadly misclassification), soundness (textual entailment between original and adversarial texts) and sensibleness (inconspicuousness, or looking like regular text); the latter two are related to the typical utility criteria. Working on large authorial classes, we could not employ the exact set-up in PAN evaluation, however, our evaluation metrics also assess misclassification and utility.", |
|
"cite_spans": [ |
|
{ |
|
"start": 577, |
|
"end": 600, |
|
"text": "(Potthast et al., 2018;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 601, |
|
"end": 617, |
|
"text": "Li et al., 2019;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 618, |
|
"end": 634, |
|
"text": "Rawlinson, 2007)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Framework", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We calculate \"Perturbation Wins\" (P W ): the average proportion of times where a perturbed vector or text misleads an authorship identification inference model (Alzantot et al., 2018; Potthast et al., 2018) . Robust Vector Representation We first look at a system-internal evaluation. As noted above, the system objective is to generate a vector representation which is similar to the original message while eliminating clues to authorship. Having (S 1 , S 2 ) as an input pair with (V 1 , V 2 ) as their corresponding representations in vector space, V i is perturbed to V i which is then sent back to Siamese decision by replacing V i . It shows whether the Siamese author verification's original decision on", |
|
"cite_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 183, |
|
"text": "(Alzantot et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 206, |
|
"text": "Potthast et al., 2018)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Misclassification", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "(V 1 , V 2 ) is dif- ferent from the decision on (V 1 , V 2 ) and (V 1 , V 2 ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Misclassification", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "This gives a preliminary result: if the system cannot produce vectors that can fool the decider, it will not produce successfully perturbed texts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Misclassification", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "Perturbation Win in Text Space The author identification work of Saedi and Dras (2019) had as its primary evaluation, following the first work on deep Siamese networks (Koch et al., 2015) , N -way one-shot classification: a 'query' text is compared against texts by N authors, one of whom is also the author of the query text. The N -way task is tackled by assigning pairwise similarities to the query text and each author, in effect carrying out N author verification attempts. N -way inference performance is evaluated by the average accuracy over 150 N -way classifications. We consider N \u2208 {3, 5, 10, 50}.", |
|
"cite_spans": [ |
|
{ |
|
"start": 168, |
|
"end": 187, |
|
"text": "(Koch et al., 2015)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Misclassification", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "Our misclassification evaluation in text space involves calculating perturbation wins on both author verification and N -way classification. In the Nway evaluation, a perturbed query text is presented. We use two authorship inference models for this: the standalone Siamese authorship identification system of Saedi and Dras (2019), and the system of Koppel et al. (2011) . This latter is a key inference attacker in PAN tasks, and also the only similaritybased system with available code. Koppel works on iterative representation of pieces of text using a subset of all extracted character 4-grams and similarity measurements (Ruzicka metric) to identify the author of a piece of text (Koppel et al., 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 351, |
|
"end": 371, |
|
"text": "Koppel et al. (2011)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 686, |
|
"end": 707, |
|
"text": "(Koppel et al., 2011)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Misclassification", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "In addition to the N -way evaluation above, we evaluated misclassification under Koppel with 1000 authors, randomly selected from SIAMAO's testset. (Koppel does not require training, apart from counting character n-grams, and so is fast to use for many authors.) In the results we call this setup K-LG.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Misclassification", |
|
"sec_num": "4.1.1" |
|
}, |
|
{ |
|
"text": "We use the following measures to quantify the similarity between original and perturbed texts. (1) Bleu score (BL) (Papineni et al., 2002) , measuring n-gram overlap between original and generated texts, previously used to assess difference in style transfer (Shen et al., 2017) . (2) Edit distance (ED), considering the texts as strings and counting the minimum number of operations required to transform the original texts into their perturbed counterparts (Przybocki et al., 2006; Li et al., 2019) . This metric is believed to be used in commercial translation memory models (Bloodgood and Strauss, 2014) . 3Euclidean Distance (EC) between the vector representations: closeness in vector space typically corresponds to greater semantic similarity (Li et al., 2019; Alzantot et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 115, |
|
"end": 138, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 278, |
|
"text": "(Shen et al., 2017)", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 483, |
|
"text": "(Przybocki et al., 2006;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 484, |
|
"end": 500, |
|
"text": "Li et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 578, |
|
"end": 607, |
|
"text": "(Bloodgood and Strauss, 2014)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 750, |
|
"end": 767, |
|
"text": "(Li et al., 2019;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 768, |
|
"end": 790, |
|
"text": "Alzantot et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Utility: Text similarity", |
|
"sec_num": "4.1.2" |
|
}, |
|
{ |
|
"text": "The perturbed text should be natural-looking, in terms of grammaticality / acceptability. Prediction of language acceptability is now a standard NLP task, e.g. the CoLA task that is part of the GLUE benchmark . However, that is a binary task: sentences are judged acceptable or not. There is, instead, a notion of gradient grammaticality, where sentence grammaticality is measured on a scale of 0 to 1 (Lau et al., 2014) ; this could be more suited to capturing the changes we might see in our adversarial examples.", |
|
"cite_spans": [ |
|
{ |
|
"start": 402, |
|
"end": 420, |
|
"text": "(Lau et al., 2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Utility: Language acceptability", |
|
"sec_num": "4.1.3" |
|
}, |
|
{ |
|
"text": "BERT has previously been fine-tuned to produce a high-performing model for the CoLA task (Devlin et al., 2019) . For gradient grammaticality, a variety of models predating BERT have been trained on the Statistical Models of Grammaticality (SMOG) dataset, 7 and have been shown to correlate fairly well with human judgements (Lau et al., 2014 (Lau et al., , 2017 . Given the improvements over earlier models shown by BERT on the CoLA task, we built our model of language naturalness by fine-tuning BERT-large on the SMOG dataset. We refer to this model as BERT-SMOG. To validate our BERT-SMOG, we compare with models proposed in Lau et al. (2017) on the original dataset: its Pearson's r correlation with human judgements is around 0.8, much higher than their best scoring model (which predates contextual LMs).", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 110, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 324, |
|
"end": 341, |
|
"text": "(Lau et al., 2014", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 361, |
|
"text": "(Lau et al., , 2017", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 639, |
|
"end": 645, |
|
"text": "(2017)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Utility: Language acceptability", |
|
"sec_num": "4.1.3" |
|
}, |
|
{ |
|
"text": "In this evaluation category, we also provide the scores for the more common binary acceptability. For this, we fine-tuned BERT only on the CoLA dataset (BERT-CoLA). Evaluating BERT-CoLA on CoLA testset, our results are in line with the published benchmarks (Devlin et al., 2019) . Final evaluations are done on a subset of 700 randomly selected sentences from the Fanfiction database going through backtranslation, RAND modification and SIAMAO.", |
|
"cite_spans": [ |
|
{ |
|
"start": 257, |
|
"end": 278, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Utility: Language acceptability", |
|
"sec_num": "4.1.3" |
|
}, |
|
{ |
|
"text": "Several datasets have been used for author identification, including various PAN datasets. We use the dataset from Saedi and Dras (2019) consisting of 10000 authors from the domain of fanfiction, 8 as one that is large enough to train a deep learning system. We followed the FF-5K (5000 author) dataset setup under the one-shot evaluation (i.e. disjoint authors between train and test sets). This test set consists of over 10000 pairs covering 1665 authors not seen in training (more information in the supplementary material).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Core Models As in a VAE, our SIAMAO system starts with text that looks somewhat random, and as training proceeds comes to look more like the original text, encouraged by the reconstruction loss. At each epoch, then, there will be varying effects on misclassification and utility. Training the model for 6 epochs, we present results for both epoch 3 (SIAMAO 3 ) and epoch 5 (SIAMAO 5 ) to show the effect training has on different aspects of text modification with opposing objectives.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Baselines The author obfuscation approaches of the PAN competition are typically tailored to the PAN setup (classification-based, over a relatively small number of authors). Heuristic-based approaches are potentially applicable, but could not be applied here. 9 We therefore used backtranslation as our key baseline, as one that has recently produced decent results in related tasks (Prabhumoye et al., 2018) . Our experiments are done on two sets of languages with different accuracy in Google machine translation, English-French (BT-FR: good quality MT) and English-Persian (BT-PR: average-high MT). Random character modification (RAND), following the same rules explained in \u00a73.2, is another baseline. Variant Models To examine the effect of choices in the architecture (in particular, in \u00a73.2 under Perturbation Operators), we explored various ways of transferring the encoder's outputs to the sampler and generating the input to the decoder. The encoder generates two vectors, V 1 and V 2 . These vectors can be directly sent to the sampler (e.g. JUST-SUM method below), or go through some changes in the vector space before being fed to the sampler (e.g. SHIFT and AVE below). The sampler uses its input vector to sample a similar point (V ) from the normal distribution, which is then sent to the decoder. The decoder needs both V and embedding to generate an output sentence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 260, |
|
"end": 261, |
|
"text": "9", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 383, |
|
"end": 408, |
|
"text": "(Prabhumoye et al., 2018)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "The five methods we report are 1) SHIFT (the core method we define in \u00a73): V 1 and V 2 are shifted towards each other by 1/3 of their distance; the resulting vectors are sent to the sampler. 2) JUST-SUM: V i is the input to the sampler. 3) AVE: the element-wise average of V 1 and V 2 is the input to the sampler. In all these three methods the sum over cross-over between embedding vector and V is the input to the decoder. For both 4) CATEMB and 5) NOCROSS, the first step is the same as the SHIFT method. Then, in the former, the concatenation of embedding and V is the input to the decoder; in the latter sum of embedding and V is the input to the decoder.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Robust Vector Representation Replacing vectors with their perturbed version as explained in \u00a74.1.1 changes the inputs to the Siamese Decision sub-network (e.g", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Misclassification", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "(V 1 , V 2 ) \u2192 (V 1 , V 2 ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Misclassification", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": ". This modification results in PW of over 90%, indicating authorial information can be hidden in vector space using SIAMAO.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Misclassification", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Perturbation Win in Text Space In terms of the classification across a large number of authors, K-LG in Table 1 shows that Koppel's accuracy of 0.644 over 1000 authors drops dramatically under all modifications. SIAMAO 3 causes the maximum fall in accuracy, RAND ranks second, followed by BT-PR. For SIAMAO, as expected, at epoch 5, where the VAE-style architecture has reconstructed the perturbed text to be closer to the original, the drop in classification accuracy is smaller. The two middle columns in Table 1 show the accuracy on original and perturbed data for N -way classification (N \u2208 {3, 5, 10, 50}). We see different behaviour across the two author identifiers and under different N s. Koppel classification accuracy decreases with all methods, with one of the SIA-MAO methods generally best. None of the methods -SIAMAO, backtranslation, or random changes -seem to be effective against the Siamese author identifier, which is rather surprising. However, in one way these results are in line with what Zhao et al. (2018b) lower when the classifier (i.e. Siamese author identifier in our case) is stronger. For the binary classification task of author verification that underpins the classification across all authors and N -way classification, we give some results under PW in Table 2 . It is interesting that while the proportion of perturbation wins in the verification context is relatively low, it still results in noticeable drops in the overall classification scores for Koppel as noted above. This is likely to be because the similarity scores are changed enough to affect the selection among N authors while not changing the pairwise binary prediction. Table 2 provides the Bleu scores, edit and Euclidean distances in the verification task, under random, back-translation and SIAMAO modifications. For our two variants of SIAMAO, SIAMAO 3 results in more modifications than SIAMAO 5 , reflecting the nature of VAEs. However, due to the other objective of the network, training must improve perturbations too. We observe higher perturbation win as well as higher Blue score for SIAMAO 5 . Given the fact that Blue score is calculated on word n-grams, this suggests the model may have learnt to modify texts mostly at spaces that do not break words (e.g. punctuation modification).", |
|
"cite_spans": [ |
|
{ |
|
"start": 1014, |
|
"end": 1033, |
|
"text": "Zhao et al. (2018b)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 104, |
|
"end": 111, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 507, |
|
"end": 514, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 1289, |
|
"end": 1296, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 1673, |
|
"end": 1680, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Misclassification", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In terms of the baselines, BT-PR and BT-FR result in more modifications than RAND (higher edit and Euclidean distances). However, they achieve the highest Bleu score as well as perturbation win. SIAMAO ranks in the middle, with SIAMAO 5 showing the least text modification, being significantly more successful than RAND in all the four metrics but less successful in perturbation win and Bleu compared to the back-translation models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Similarity", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In Figure 2 we give two sample extracts of perturbed texts from SIAMAO that fooled classifiers, to illustrate how the system changes text. It can be seen that the perturbation operators described in \u00a73.2 are applied only at some places: for example, the replacement of s by 5 does not occur at all possible locations, and similarly l by 1.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Text Similarity", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Training and finding a balance An obfuscation model has several objectives that contradict each other. So, the network learning process involves finding a balance between them; specifically, finding important positions in the input text to minimally modify, as well as improving obfuscation success. Using SIAMAO's test set after each training epoch, we evaluated the 4 aforementioned parameters. Figure 3 displays the trends for edit distance, Euclidean distance, Bleu and perturbation win follow during SIAMAO's 6 training epochs.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 397, |
|
"end": 405, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Text Similarity", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Epochs 1 to 3 present rather sharp upward trends for edit distance, Euclidean distance and perturbation win, coinciding with an expected major drop in Bleu. Epochs 3 to 5, on the other hand, show Bleu score increasing to its maximum in epoch 4 while edit and Euclidean distance experience a noticeable fall. Epoch 5 reaches an favorable balance in the parameters plus the most successful modification from a privacy point of view. However, this doesn't continue in epoch 6 which is an indicator of over-training. Table 3 presents the results for language acceptability as measured by BERT-SMOG and BERT-CoLA on a subset of perturbed Fanfiction database. One issue with applying these models to the obfuscated text is that SIAMAO is more likely to generate outof-vocabulary (OOV) words (e.g. cheaks) than the backtranslation models, and this affects the acceptability score, even if the OOV words themselves might be considered reasonable. The table thus also contains average number of OOV tokens in generated texts. The scores on the original are relatively high; the scores on the backtranslation models are close. This is not surprising given that number of OOV tokens is similar (in fact, it is surprising that number of OOV tokens is actually lower than in the original texts, even more for BT-PR than for BT-FR-perhaps the original OOVs are lost in translation). The average number of OOVs is much larger for the SIAMAO models and RAND. To understand the effect of number of OOVs, we took the original CoLA dataset and systematically replaced words with OOV tokens, and carried out some curve fitting of number of OOVs against BERT-CoLA score; this would allow us to estimate how a score might drop with an increasing number of OOVs. An exponential decay function appears to be a good fit. However, because CoLA sentences are much shorter than the generated texts, it is not possible to use such a curve for direct extrapolation. 10 Nevertheless, it does illustrate that it is not surprising for the language acceptability scores to be lower for the SIAMAO models, and that this is not necessarily indicative of substantially worse quality.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1936, |
|
"end": 1938, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 513, |
|
"end": 520, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Text Similarity", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "As noted in \u00a74.3, we studied the effect of different ways of transferring the encoder's outputs to the sampler, beyond just a standard concatenation as in regular VAEs. SHIFT approach outperforms the other variants in most respects (misclassification, etc) while being similar in the text similarity measures (edit and Euclidean distance). This supports the intuition that regular VAE concatenation is not sufficient for this task, and perturbation operators of the sort we have proposed are necessary (scores are included in the supplementary material).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Variant Models", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "This work is the first to propose a deep learning architecture for generating textual adversarial examples that incorporates a similarity-based inference model rather than a standard classifier-based one. We explored this in the context of authorship obfuscation, where the goal is to hide the author from a similarity-based authorship identifier. Results indicate that our SIAMAO model can degrade the performance of a key standard authorship identification system, compared to baseline systems, with modifications that are of similar magnitude or lower. All approaches had difficulty against a Siamese authorship identification system, however.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Further Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "As this is the first work in this direction, many improvements are possible, particularly in the area of language acceptability. These improvements would be both to SIAMAO, in encouraging the adversarial examples towards greater acceptability, also in terms of the automatic evaluation metrics. Employing other deep learning adversarial architectures as a base would also be interesting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Further Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://bit.ly/2ycdnVV 2 https://pan.webis.de/: shared tasks that are run annually on various aspects of authorship related tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There are infinite data points in the vector space, not all of them can be mapped back to a meaningful piece of text; an acceptable area in the vector space has similar distribution to the input space.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We conducted experiments with the average vector and the 1/3 distance shift as explained here. We leave finding the best interpolation for further study.5 Specifically, we apply five crossovers between the V and the embedding vectors at random positions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://pan.webis.de/clef18/ pan18-web/author-obfuscation.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Project website: https://clasp.gu.se/about/ people/shalom-lappin/smog.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/ChakavehSaedi/ Siamese-Author-Identification.9 TextBugger(Li et al., 2019) does not have an available associated code.Bevendorff et al. (2019) do helpfully provide code, but we could not get it to work for our setup.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Adjusting the scores on the original texts to match the number of OOVs in the SIAMAO and RAND models leads to values close to the curve asymptote, of around 0.3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Writeprints: A Stylometric Approach to Identity-Level Identification and Similarity Detection in Cyberspace", |
|
"authors": [ |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Abbasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hsinchun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "ACM Transactions on Information Systems", |
|
"volume": "26", |
|
"issue": "2", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ahmed Abbasi and Hsinchun Chen. 2008. Writeprints: A Stylometric Approach to Identity-Level Identifica- tion and Similarity Detection in Cyberspace. ACM Transactions on Information Systems, 26(2).", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Generating natural language adversarial examples", |
|
"authors": [ |
|
{ |
|
"first": "Moustafa", |
|
"middle": [], |
|
"last": "Alzantot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yash", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Elgohary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo-Jhang", |
|
"middle": [], |
|
"last": "Ho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mani", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2890--2896", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1316" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moustafa Alzantot, Yash Sharma, Ahmed Elgohary, Bo-Jhang Ho, Mani Srivastava, and Kai-Wei Chang. 2018. Generating natural language adversarial ex- amples. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2890-2896, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Ai-gan: Attackinspired generation of adversarial examples", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinlin", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shoudong", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Bai, Jun Zhao, Jinlin Zhu, Shoudong Han, J. Chen, and Bo Li. 2020. Ai-gan: Attack- inspired generation of adversarial examples. ArXiv, abs/2002.02196.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Heuristic authorship obfuscation", |
|
"authors": [ |
|
{ |
|
"first": "Janek", |
|
"middle": [], |
|
"last": "Bevendorff", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Potthast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Hagen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benno", |
|
"middle": [], |
|
"last": "Stein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1098--1108", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Janek Bevendorff, Martin Potthast, Matthias Hagen, and Benno Stein. 2019. Heuristic authorship ob- fuscation. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 1098-1108.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Evasion attacks against machine learning at test time", |
|
"authors": [ |
|
{ |
|
"first": "Battista", |
|
"middle": [], |
|
"last": "Biggio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Igino", |
|
"middle": [], |
|
"last": "Corona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Davide", |
|
"middle": [], |
|
"last": "Maiorca", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Blaine", |
|
"middle": [], |
|
"last": "Nelson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pavel", |
|
"middle": [], |
|
"last": "Nedim\u0161rndi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giorgio", |
|
"middle": [], |
|
"last": "Laskov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Giacinto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Roli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Joint European conference on machine learning and knowledge discovery in databases", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "387--402", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Battista Biggio, Igino Corona, Davide Maiorca, Blaine Nelson, Nedim\u0160rndi\u0107, Pavel Laskov, Giorgio Giac- into, and Fabio Roli. 2013. Evasion attacks against machine learning at test time. In Joint European conference on machine learning and knowledge dis- covery in databases, pages 387-402. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Translation memory retrieval methods", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Bloodgood", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Strauss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "202--210", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/E14-1022" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Bloodgood and Benjamin Strauss. 2014. Translation memory retrieval methods. In Proceed- ings of the 14th Conference of the European Chap- ter of the Association for Computational Linguistics, pages 202-210, Gothenburg, Sweden. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Generating sentences from a continuous space", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vilnis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rafal", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samy", |
|
"middle": [], |
|
"last": "Jozefowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "10--21", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K16-1002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel R. Bowman, Luke Vilnis, Oriol Vinyals, An- drew Dai, Rafal Jozefowicz, and Samy Bengio. 2016. Generating sentences from a continuous space. In Proceedings of The 20th SIGNLL Con- ference on Computational Natural Language Learn- ing, pages 10-21, Berlin, Germany. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 NAACL-HLT. Association for Computing Machinery", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. In Proceedings of the 2019 NAACL-HLT. Asso- ciation for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Explaining and harnessing adversarial examples", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Ian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathon", |
|
"middle": [], |
|
"last": "Goodfellow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Shlens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Szegedy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6572" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. 2014. Explaining and harnessing adversar- ial examples. arXiv preprint arXiv:1412.6572.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "A deep generative framework for paraphrase generation", |
|
"authors": [ |
|
{ |
|
"first": "Ankush", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prawaan", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piyush", |
|
"middle": [], |
|
"last": "Rai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ankush Gupta, Arvind Agarwal, Prawaan Singh, and Piyush Rai. 2018. A deep generative framework for paraphrase generation. In Thirty-Second AAAI Con- ference on Artificial Intelligence.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Overview of the author obfuscation task at pan 2017: Safety evaluation revisited", |
|
"authors": [ |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Hagen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Potthast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benno", |
|
"middle": [], |
|
"last": "Stein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "CLEF", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthias Hagen, Martin Potthast, and Benno Stein. 2017. Overview of the author obfuscation task at pan 2017: Safety evaluation revisited. In CLEF.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Adversarial example generation with syntactically controlled paraphrase networks", |
|
"authors": [ |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Wieting", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1875--1885", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-1170" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohit Iyyer, John Wieting, Kevin Gimpel, and Luke Zettlemoyer. 2018. Adversarial example generation with syntactically controlled paraphrase networks. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1875-1885, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Obfuscating document stylometry to preserve author anonymity", |
|
"authors": [ |
|
{ |
|
"first": "Gary", |
|
"middle": [], |
|
"last": "Kacmarcik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Gamon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the COLING/ACL on Main conference poster sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "444--451", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gary Kacmarcik and Michael Gamon. 2006. Ob- fuscating document stylometry to preserve author anonymity. In Proceedings of the COLING/ACL on Main conference poster sessions, pages 444-451. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Overview of the cross-domain authorship attribution task at {PAN} 2019", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Kestemont", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Efstathios", |
|
"middle": [], |
|
"last": "Stamatatos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enrique", |
|
"middle": [], |
|
"last": "Manjavacas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walter", |
|
"middle": [], |
|
"last": "Daelemans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Potthast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benno", |
|
"middle": [], |
|
"last": "Stein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Working Notes of CLEF 2019-Conference and Labs of the Evaluation Forum", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Kestemont, Efstathios Stamatatos, Enrique Man- javacas, Walter Daelemans, Martin Potthast, and Benno Stein. 2019. Overview of the cross-domain authorship attribution task at {PAN} 2019. In Work- ing Notes of CLEF 2019-Conference and Labs of the Evaluation Forum, Lugano, Switzerland, September 9-12, 2019, pages 1-15.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A Slightlymodified GI-based Author-verifier with Lots of Features (ASGALF)", |
|
"authors": [ |
|
{ |
|
"first": "Mahmoud", |
|
"middle": [], |
|
"last": "Khonji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Youssef", |
|
"middle": [], |
|
"last": "Iraqi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Working Notes for CLEF 2014 Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mahmoud Khonji and Youssef Iraqi. 2014. A Slightly- modified GI-based Author-verifier with Lots of Fea- tures (ASGALF). In Working Notes for CLEF 2014 Conference.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Siamese neural networks for one-shot image recognition", |
|
"authors": [ |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Koch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "ICML Deep Learning Workshop", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gregory Koch, Richard Zemel, and Ruslan Salakhut- dinov. 2015. Siamese neural networks for one-shot image recognition. In ICML Deep Learning Work- shop, volume 2.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Authorship attribution in the wild", |
|
"authors": [ |
|
{ |
|
"first": "Moshe", |
|
"middle": [], |
|
"last": "Koppel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Schler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shlomo", |
|
"middle": [], |
|
"last": "Argamon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Language Resources and Evaluation", |
|
"volume": "45", |
|
"issue": "1", |
|
"pages": "83--94", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moshe Koppel, Jonathan Schler, and Shlomo Argamon. 2011. Authorship attribution in the wild. Language Resources and Evaluation, 45(1):83-94.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Grammar variational autoencoder", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Kusner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooks", |
|
"middle": [], |
|
"last": "Paige", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jos\u00e9 Miguel Hern\u00e1ndez-Lobato", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 34th International Conference on Machine Learning", |
|
"volume": "70", |
|
"issue": "", |
|
"pages": "1945--1954", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt J. Kusner, Brooks Paige, and Jos\u00e9 Miguel Hern\u00e1ndez-Lobato. 2017. Grammar variational au- toencoder. In Proceedings of the 34th Interna- tional Conference on Machine Learning -Volume 70, ICML'17, page 1945-1954. JMLR.org.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Measuring gradience in speakers' grammaticality judgements", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Jey Han Lau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shalom", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lappin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jey Han Lau, Alexander Clark, and Shalom Lappin. 2014. Measuring gradience in speakers' grammat- icality judgements. In CogSci.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "A probabilistic view of linguistic knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Acceptability", |
|
"middle": [], |
|
"last": "Grammaticality", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Probability", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Cognitive science", |
|
"volume": "41", |
|
"issue": "", |
|
"pages": "1202--1241", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grammaticality, acceptability, and probabil- ity: A probabilistic view of linguistic knowledge. Cognitive science, 41 5:1202-1241.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "TextBugger: Generating Adversarial Text Against Real-world Applications", |
|
"authors": [ |
|
{ |
|
"first": "Jinfeng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shouling", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianyu", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 26th Annual Network and Distributed System Security Symposium (NDSS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinfeng Li, Shouling Ji, Tianyu Du, Bo Li, and Ting Wang. 2019. TextBugger: Generating Adversarial Text Against Real-world Applications. In Proceed- ings of the 26th Annual Network and Distributed Sys- tem Security Symposium (NDSS).", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Towards robust and privacy-preserving text representations", |
|
"authors": [ |
|
{ |
|
"first": "Yitong", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timothy", |
|
"middle": [], |
|
"last": "Baldwin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "25--30", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-2005" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yitong Li, Timothy Baldwin, and Trevor Cohn. 2018. Towards robust and privacy-preserving text represen- tations. In Proceedings of the 56th Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers), pages 25-30, Melbourne, Australia. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Use fewer instances of the letter \"i\": Toward writing style anonymization", |
|
"authors": [ |
|
{ |
|
"first": "W", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Andrew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadia", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aylin", |
|
"middle": [], |
|
"last": "Afroz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ariel", |
|
"middle": [], |
|
"last": "Caliskan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [], |
|
"last": "Stolerman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Greenstadt", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "International Symposium on Privacy Enhancing Technologies Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "299--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrew WE McDonald, Sadia Afroz, Aylin Caliskan, Ariel Stolerman, and Rachel Greenstadt. 2012. Use fewer instances of the letter \"i\": Toward writ- ing style anonymization. In International Sympo- sium on Privacy Enhancing Technologies Sympo- sium, pages 299-318. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th annual meeting on association for computational linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th annual meeting on association for compu- tational linguistics, pages 311-318. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Anomaly detection with conditional variational autoencoders", |
|
"authors": [ |
|
{ |
|
"first": "Adrian", |
|
"middle": [], |
|
"last": "Pol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Berger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gianluca", |
|
"middle": [], |
|
"last": "Cerminara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C\u00e9cile", |
|
"middle": [], |
|
"last": "Germain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maurizio", |
|
"middle": [], |
|
"last": "Pierini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "18th IEEE International Conference on Machine Learning and Applications. ICMLA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adrian Pol, Victor Berger, Gianluca Cerminara, C\u00e9cile Germain, and Maurizio Pierini. 2019. Anomaly detection with conditional variational autoencoders. In 18th IEEE International Conference on Machine Learning and Applications. ICMLA.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Overview of the author obfuscation task at pan 2018: A new approach to measuring safety", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Potthast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Schremmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Hagen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benno", |
|
"middle": [], |
|
"last": "Stein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "CLEF (Working Notes)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Potthast, Felix Schremmer, Matthias Hagen, and Benno Stein. 2018. Overview of the author ob- fuscation task at pan 2018: A new approach to mea- suring safety. In CLEF (Working Notes).", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Edit distance: A metric for machine translation evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Przybocki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Sanders", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Audrey", |
|
"middle": [ |
|
"Le" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Fifth International Conference on Language Resources and Evaluation (LREC'06)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Przybocki, Gregory Sanders, and Audrey Le. 2006. Edit distance: A metric for machine trans- lation evaluation. In Proceedings of the Fifth In- ternational Conference on Language Resources and Evaluation (LREC'06), Genoa, Italy. European Lan- guage Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Variational autoencoder for deep learning of images, labels and captions", |
|
"authors": [ |
|
{ |
|
"first": "Yunchen", |
|
"middle": [], |
|
"last": "Pu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ricardo", |
|
"middle": [], |
|
"last": "Henao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunyuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Stevens", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Carin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "29", |
|
"issue": "", |
|
"pages": "2352--2360", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yunchen Pu, Zhe Gan, Ricardo Henao, Xin Yuan, Chunyuan Li, Andrew Stevens, and Lawrence Carin. 2016. Variational autoencoder for deep learning of images, labels and captions. In D. D. Lee, M. Sugiyama, U. V. Luxburg, I. Guyon, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 29, pages 2352-2360. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Can pseudonymity really guarantee privacy?", |
|
"authors": [ |
|
{ |
|
"first": "Pankaj", |
|
"middle": [], |
|
"last": "Josyula R Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rohatgi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "USENIX Security Symposium", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "85--96", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Josyula R Rao, Pankaj Rohatgi, et al. 2000. Can pseudonymity really guarantee privacy? In USENIX Security Symposium, pages 85-96.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "The significance of letter position in word recognition", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Rawlinson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "IEEE Aerospace and Electronic Systems Magazine", |
|
"volume": "22", |
|
"issue": "", |
|
"pages": "26--27", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Rawlinson. 2007. The significance of letter posi- tion in word recognition. IEEE Aerospace and Elec- tronic Systems Magazine, 22(1):26-27.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Obfuscating gender in social media writing", |
|
"authors": [ |
|
{ |
|
"first": "Sravana", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the First Workshop on NLP and Computational Social Science", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "17--26", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sravana Reddy and Kevin Knight. 2016. Obfuscating gender in social media writing. In Proceedings of the First Workshop on NLP and Computational So- cial Science, pages 17-26.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Overview of pan'16 -new challenges for authorship analysis: Cross-genre profiling, clustering, diarization, and obfuscation", |
|
"authors": [ |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [ |
|
"Rangel" |
|
], |
|
"last": "Francisco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Pardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Efstathios", |
|
"middle": [], |
|
"last": "Potthast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Stamatatos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benno", |
|
"middle": [], |
|
"last": "Tschuggnall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Stein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "CLEF", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paolo Rosso, Francisco M. Rangel Pardo, Martin Pot- thast, Efstathios Stamatatos, Michael Tschuggnall, and Benno Stein. 2016. Overview of pan'16 -new challenges for authorship analysis: Cross-genre pro- filing, clustering, diarization, and obfuscation. In CLEF.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Character-level and multi-channel convolutional neural networks for large-scale authorship attribution", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Ruder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parsa", |
|
"middle": [], |
|
"last": "Ghaffari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John G", |
|
"middle": [], |
|
"last": "Breslin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1609.06686" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Ruder, Parsa Ghaffari, and John G Breslin. 2016. Character-level and multi-channel convolu- tional neural networks for large-scale authorship at- tribution. arXiv preprint arXiv:1609.06686.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Siamese networks for large-scale author identification", |
|
"authors": [ |
|
{ |
|
"first": "Chakaveh", |
|
"middle": [], |
|
"last": "Saedi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dras", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1912.10616" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chakaveh Saedi and Mark Dras. 2019. Siamese net- works for large-scale author identification. arXiv preprint arXiv:1912.10616.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Authorship Verification Using the Imposters Method", |
|
"authors": [ |
|
{ |
|
"first": "Shachar", |
|
"middle": [], |
|
"last": "Seidman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Working Notes for CLEF 2013 Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shachar Seidman. 2013. Authorship Verification Using the Imposters Method. In Working Notes for CLEF 2013 Conference.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Style transfer from non-parallel text by cross-alignment", |
|
"authors": [ |
|
{ |
|
"first": "Tianxiao", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Lei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommi", |
|
"middle": [], |
|
"last": "Jaakkola", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6830--6841", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianxiao Shen, Tao Lei, Regina Barzilay, and Tommi Jaakkola. 2017. Style transfer from non-parallel text by cross-alignment. In Advances in neural informa- tion processing systems, pages 6830-6841.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "A survey of modern authorship attribution methods", |
|
"authors": [ |
|
{ |
|
"first": "Efstathios", |
|
"middle": [], |
|
"last": "Stamatatos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Journal of the American Society for information Science and Technology", |
|
"volume": "60", |
|
"issue": "3", |
|
"pages": "538--556", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Efstathios Stamatatos. 2009. A survey of modern au- thorship attribution methods. Journal of the Ameri- can Society for information Science and Technology, 60(3):538-556.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "7th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In 7th International Conference on Learning Representa- tions, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Adversarial example generation with adaptive gradient search for single and ensemble deep neural network. Information Sciences", |
|
"authors": [ |
|
{ |
|
"first": "Yatie", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chi-Man", |
|
"middle": [], |
|
"last": "Pun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "528", |
|
"issue": "", |
|
"pages": "147--167", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yatie Xiao, Chi-Man Pun, and Bo Liu. 2020. Adversar- ial example generation with adaptive gradient search for single and ensemble deep neural network. Infor- mation Sciences, 528:147-167.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Adversarially regularized autoencoders", |
|
"authors": [ |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kelly", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann", |
|
"middle": [], |
|
"last": "Lecun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 35th International Conference on Machine Learning", |
|
"volume": "80", |
|
"issue": "", |
|
"pages": "5902--5911", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Junbo Zhao, Yoon Kim, Kelly Zhang, Alexander Rush, and Yann LeCun. 2018a. Adversarially regularized autoencoders. In Proceedings of the 35th International Conference on Machine Learn- ing, volume 80 of Proceedings of Machine Learn- ing Research, pages 5902-5911, Stockholmsm\u00e4ssan, Stockholm Sweden. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Generating natural adversarial examples", |
|
"authors": [ |
|
{ |
|
"first": "Zhengli", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dheeru", |
|
"middle": [], |
|
"last": "Dua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations (ICLR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhengli Zhao, Dheeru Dua, and Sameer Singh. 2018b. Generating natural adversarial examples. In Inter- national Conference on Learning Representations (ICLR).", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"text": "(a) A pair of texts (S 1 , S 2 ) are mapped into the vector-space (", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"text": "Rosie laughs as my cheeks cheaks burn, and I watch as she stands elegantly, blocking the sun from my eyes. 'I want to go swimming,' she tells tel1s me. I felt my cheeks cheaks flush flu5h slight1y. \"Sh...well, uh...\" Sam and Embry chuckled at my response resp0n5e .", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"text": "Samples of SIAMAO's perturbation that successfully fooled classification.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"text": "Effect of 6 epochs of training on edit distance, Euclidean distance, Bleu and perturbation win.", |
|
"uris": null, |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"4\">Koppel Author Identification</td><td colspan=\"4\">Siamese Author Identification</td><td>K-LG</td></tr><tr><td>Model</td><td colspan=\"8\">3-way 5-way 10-way 50-way 3-way 5-way 10-way 50-way</td><td/></tr><tr><td>Original</td><td>0.640</td><td>0.567</td><td>0.427</td><td>0.327</td><td>0.933</td><td>0.853</td><td>0.707</td><td>0.400</td><td>0.644</td></tr><tr><td>SIAMAO3</td><td>0.513</td><td>0.493</td><td>0.353</td><td>0.260</td><td>0.913</td><td>0.867</td><td>0.773</td><td>0.433</td><td>0.407</td></tr><tr><td>SIAMAO5</td><td>0.540</td><td>0.487</td><td>0.360</td><td>0.220</td><td>0.940</td><td>0.873</td><td>0.760</td><td>0.433</td><td>0.446</td></tr><tr><td>RAND</td><td>0.593</td><td>0.513</td><td>0.400</td><td>0.240</td><td>0.933</td><td>0.900</td><td>0.793</td><td>0.507</td><td>0.414</td></tr><tr><td>BT-FR</td><td>0.613</td><td>0.526</td><td>0.433</td><td>0.273</td><td>0.827</td><td>0.740</td><td>0.573</td><td>0.353</td><td>0.558</td></tr><tr><td>BT-PR</td><td>0.607</td><td>0.500</td><td>0.340</td><td>0.293</td><td>0.913</td><td>0.847</td><td>0.733</td><td>0.413</td><td>0.429</td></tr></table>", |
|
"html": null, |
|
"text": "reported: success rate is noticeably", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>Model</td><td>PW</td><td>EC</td><td>ED</td><td>BL</td></tr><tr><td colspan=\"5\">SIAMAO3 0.238 2597 289 0.098</td></tr><tr><td colspan=\"5\">SIAMAO5 0.375 1927 215 0.168</td></tr><tr><td>RAND</td><td colspan=\"4\">0.340 3442 222 0.070</td></tr><tr><td>BT-FR</td><td colspan=\"4\">0.399 4649 235 0.486</td></tr><tr><td>BT-PR</td><td colspan=\"4\">0.512 4891 427 0.256</td></tr></table>", |
|
"html": null, |
|
"text": "First two columns, Koppel and Siamese author identification accuracy on N -way classification. K-LG shows Koppel accuracy on 1000 authors.", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>: Perturbation win (PW), Euclidean distance</td></tr><tr><td>(EC), edit distance (ED), and Bleu score (BL), compar-</td></tr><tr><td>ing perturbed text against the original.</td></tr></table>", |
|
"html": null, |
|
"text": "", |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"html": null, |
|
"text": "Language acceptability scores on a subset of original and perturbed Fanfiction data. Also included are average number of OOV tokens in texts.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |