|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:08:39.560030Z" |
|
}, |
|
"title": "TextAttack: A Framework for Adversarial Attacks, Data Augmentation, and Adversarial Training in NLP", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"X" |
|
], |
|
"last": "Morris", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Virginia", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Eli", |
|
"middle": [], |
|
"last": "Lifland", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Virginia", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jin", |
|
"middle": [ |
|
"Yong" |
|
], |
|
"last": "Yoo", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Virginia", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jake", |
|
"middle": [], |
|
"last": "Grigsby", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Virginia", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Computer Science and Artificial Intelligence Laboratory", |
|
"institution": "", |
|
"location": { |
|
"region": "MIT" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yanjun", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Virginia", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "While there has been substantial research using adversarial attacks to analyze NLP models, each attack is implemented in its own code repository. It remains challenging to develop NLP attacks and utilize them to improve model performance. This paper introduces TextAttack, a Python framework for adversarial attacks, data augmentation, and adversarial training in NLP. TextAttack builds attacks from four components: a goal function, a set of constraints, a transformation, and a search method. TextAttack's modular design enables researchers to easily construct attacks from combinations of novel and existing components. TextAttack provides implementations of 16 adversarial attacks from the literature and supports a variety of models and datasets, including BERT and other transformers, and all GLUE tasks. TextAttack also includes data augmentation and adversarial training modules for using components of adversarial attacks to improve model accuracy and robustness. TextAttack is democratizing NLP: anyone can try data augmentation and adversarial training on any model or dataset, with just a few lines of code. Code and tutorials are available at https://github.com/QData/TextAttack.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "While there has been substantial research using adversarial attacks to analyze NLP models, each attack is implemented in its own code repository. It remains challenging to develop NLP attacks and utilize them to improve model performance. This paper introduces TextAttack, a Python framework for adversarial attacks, data augmentation, and adversarial training in NLP. TextAttack builds attacks from four components: a goal function, a set of constraints, a transformation, and a search method. TextAttack's modular design enables researchers to easily construct attacks from combinations of novel and existing components. TextAttack provides implementations of 16 adversarial attacks from the literature and supports a variety of models and datasets, including BERT and other transformers, and all GLUE tasks. TextAttack also includes data augmentation and adversarial training modules for using components of adversarial attacks to improve model accuracy and robustness. TextAttack is democratizing NLP: anyone can try data augmentation and adversarial training on any model or dataset, with just a few lines of code. Code and tutorials are available at https://github.com/QData/TextAttack.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Over the last few years, there has been growing interest in investigating the adversarial robustness of NLP models, including new methods for generating adversarial examples and better approaches to defending against these adversaries (Alzantot et al., 2018; Kuleshov et al., 2018; Gao et al., 2018; Ebrahimi et al., 2017; Zang et al., 2020; Pruthi et al., 2019) . It is difficult to compare these attacks directly and fairly, since they are often evaluated on different data samples and victim models. Re- (2019)'s TextFooler for a BERT-based sentiment classifier. Swapping out \"perfect\" with synonym \"spotless\" completely changes the model's prediction, even though the underlying meaning of the text has not changed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 258, |
|
"text": "(Alzantot et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 281, |
|
"text": "Kuleshov et al., 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 282, |
|
"end": 299, |
|
"text": "Gao et al., 2018;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 300, |
|
"end": 322, |
|
"text": "Ebrahimi et al., 2017;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 341, |
|
"text": "Zang et al., 2020;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 362, |
|
"text": "Pruthi et al., 2019)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "implementing previous work as a baseline is often time-consuming and error-prone due to a lack of source code, and precisely replicating results is complicated by small details left out of the publication. These barriers make benchmark comparisons hard to trust and severely hinder the development of this field.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To encourage the development of the adversarial robustness field, we introduce TextAttack, a Python framework for adversarial attacks, data augmentation, and adversarial training in NLP.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To unify adversarial attack methods into one system, we decompose NLP attacks into four components: a goal function, a set of constraints, a transformation, and a search method. The attack attempts to perturb an input text such that the model output fulfills the goal function (i.e., indicating whether the attack is successful) and the perturbation adheres to the set of constraints (e.g., grammar constraint, semantic similarity constraint). A search method is used to find a sequence of transformations that produce a successful adversarial example.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "This modular design enables us to easily assemble attacks from the literature while reusing components that are shared across attacks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "TextAttack provides clean, readable implementations of 16 adversarial attacks from the literature. For the first time, these attacks can be benchmarked, compared, and analyzed in a standardized setting. TextAttack's design also allows researchers to easily construct new attacks from combinations of novel and existing components. In just a few lines of code, the same search method, transformation and constraints used in 's", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "TextFooler can be modified to attack a translation model with the goal of changing every word in the output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "TextAttack is directly integrated with Hug-gingFace's transformers and nlp libraries. This allows users to test attacks on models and datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "TextAttack provides dozens of pre-trained models (LSTM, CNN, and various transformerbased models) on a variety of popular datasets. Currently TextAttack supports a multitude of tasks including summarization, machine translation, and all nine tasks from the GLUE benchmark.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "TextAttack also allows users to provide their own models and datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Ultimately, the goal of studying adversarial attacks is to improve model performance and robustness. To that end, TextAttack provides easyto-use tools for data augmentation and adversarial training. TextAttack's Augmenter class uses a transformation and a set of constraints to produce new samples for data augmentation. Attack recipes are re-used in a training loop that allows models to train on adversarial examples. These tools make it easier to train accurate and robust models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Uses for TextAttack include 1 : 1 All can be done in < 5 lines of code. See A.1. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "TextAttack aims to implement attacks which, given an NLP model, find a perturbation of an input sequence that satisfies the attack's goal and adheres to certain linguistic constraints. In this way, attacking an NLP model can be framed as a combinatorial search problem. The attacker must search within all potential transformations to find a sequence of transformations that generate a successful adversarial example.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The TextAttack Framework", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Each attack can be constructed from four components:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The TextAttack Framework", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "1. A task-specific goal function that determines whether the attack is successful in terms of the model outputs. Examples: untargeted classification, targeted classification, non-overlapping output, minimum BLEU score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The TextAttack Framework", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "2. A set of constraints that determine if a perturbation is valid with respect to the original input. Examples: maximum word embedding distance, part-of-speech consistency, grammar checker, minimum sentence encoding cosine similarity. 3. A transformation that, given an input, generates a set of potential perturbations. Examples: word embedding word swap, thesaurus word swap, homoglyph character substitution. 4. A search method that successively queries the model and selects promising perturbations from a set of transformations. Examples: greedy with word importance ranking, beam search, genetic algorithm. See A.2 for a full explanation of each goal function, constraint, transformation, and search method that's built-in to TextAttack.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The TextAttack Framework", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "TextAttack is available as a Python package installed from PyPI, or via direct download from GitHub. TextAttack is also available for use through our demo web app, displayed in Figure 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 185, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Developing NLP Attacks with TextAttack", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Python users can test attacks by creating and manipulating Attack objects. The command-line API offers textattack attack, which allows users to specify attacks from their four components or from a single attack recipe and test them on different models and datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Developing NLP Attacks with TextAttack", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "TextAttack supports several different output formats for attack results:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Developing NLP Attacks with TextAttack", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Printing results to stdout.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Developing NLP Attacks with TextAttack", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Printing to a text file or CSV.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Developing NLP Attacks with TextAttack", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Printing attack results to an HTML table.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Developing NLP Attacks with TextAttack", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Writing a table of attack results to a visualization server, like Visdom or Weights & Biases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Developing NLP Attacks with TextAttack", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "TextAttack's modular design allows us to implement many different attacks from past work in a shared framework, often by adding only one or two new components. Table 1 categorizes 16 attacks based on their goal functions, constraints, transformations and search methods.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 160, |
|
"end": 182, |
|
"text": "Table 1 categorizes 16", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Benchmarking Existing Attacks with Attack Recipes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "All of these attacks are implemented as \"attack recipes\" in TextAttack and can be benchmarked with just a single command. See A.3 for a comparison between papers' reported attack results and the results achieved by running TextAttack.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Benchmarking Existing Attacks with Attack Recipes", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As is clear from Table 1, many components are shared between NLP attacks. New attacks often reuse components from past work, adding one or two novel pieces. TextAttack allows researchers to focus on the generation of new components rather than replicating past results. For example, introduced TextFooler as a method for attacking classification and entailment models. If a researcher wished to experiment with applying TextFooler's search method, transformations, and constraints to attack translation models, all they need is to implement a translation goal function in TextAttack. They would then be able to plug in this goal function to create a novel attack that could be used to analyze translation models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Creating New Attacks by Combining Novel and Existing Components", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "As of the date of this submission, TextAttack provides users with 82 pre-trained models, including word-level LSTM, word-level CNN, BERT, and other transformer based models pre-trained on various datasets provided by HuggingFace nlp. Since", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluating Attacks on TextAttack's Pre-Trained Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "TextAttack is integrated with the nlp library, it can automatically load the test or validation data set for the corresponding pre-trained model. While the literature has mainly focused on classification and entailment, TextAttack's pretrained models enable research on the robustness of models across all GLUE tasks. TextAttack is model-agnostic -meaning it can run attacks on models implemented in any deep learning framework. Model objects must be able to take a string (or list of strings) and return an output that can be processed by the goal function. For example, machine translation models take a list of strings as input and produce a list of strings as output. Classification and entailment models return an array of scores. As long as the user's model meets this specification, the model is fit to use with TextAttack.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluating Attacks on TextAttack's Pre-Trained Models", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "TextAttack users can train standard LSTM, CNN, and transformer based models, or a usercustomized model on any dataset from the nlp library using the textattack train command. Just like pre-trained models, user-trained models are compatible with commands like textattack attack and textattack eval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Training", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "While searching for adversarial examples, TextAttack's transformations generate perturbations of the input text, and apply constraints to verify their validity. These tools can be reused to dramatically expand the training dataset by introducing perturbed versions of existing samples. The textattack augment command gives users access to a number of pre-packaged recipes for augmenting their dataset. This is a stand-alone feature that can be used with any model or training framework. When using TextAttack's models and training pipeline, textattack train --augment automatically expands the dataset before training begins. Users can specify the fraction of each input that should be modified and how many additional versions of each example to create. This makes it easy to use existing augmentation recipes on different models and datasets, and is a great way to benchmark new techniques. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Augmentation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "With textattack train --attack, attack recipes can be used to create new training sets of adversarial examples. After training for a number of epochs on the clean training set, the attack generates an adversarial version of each input. This perturbed version of the dataset is substituted for the original, and is periodically regenerated according to the model's current weaknesses. The resulting model can be significantly more robust against the attack used during training. Table 2 shows the accuracy of a standard LSTM classifier with and without adversarial training against different attack recipes implemented in TextAttack.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 478, |
|
"end": 485, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Adversarial Training", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "TextAttack is optimized under-the-hood to make implementing and running adversarial attacks simple and fast.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TextAttack Under the Hood", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "AttackedText. A common problem with implementations of NLP attacks is that the original text is discarded after tokenization; thus, the transformation is performed on the tokenized version of the text. This causes issues with capitalization and word segmentation. Sometimes attacks swap a piece of a word for a complete word (for example, transforming ''aren't\" into ''aren'too\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TextAttack Under the Hood", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "To solve this problem, TextAttack stores each input as a AttackedText object which contains the original text and helper methods for transforming the text while retaining tokenization.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TextAttack Under the Hood", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Instead of strings or tensors, Table 2 : The default LSTM model trained on 3k samples from the sst2 dataset. The baseline uses early stopping on a clean training set. deepwordbug and textfooler attacks are used for adversarial training. 'Accuracy Under Attack' on the eval set is reported for several different attack types.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 38, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "TextAttack Under the Hood", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "classes in TextAttack operate primarily on AttackedText objects. When words are added, swapped, or deleted, an AttackedText can maintain proper punctuation and capitalization. The AttackedText also contains implementations for common linguistic functions like splitting text into words, splitting text into sentences, and part-of-speech tagging.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TextAttack Under the Hood", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Caching. Search methods frequently encounter the same input at different points in the search. In these cases, it is wise to pre-store values to avoid unnecessary computation. For each input examined during the attack, TextAttack caches its model output, as well as the whether or not it passed all of the constraints. For some search methods, this memoization can save a significant amount of time. 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TextAttack Under the Hood", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We draw inspiration from the Transformers library (Wolf et al., 2019) as an example of a well-designed Natural Language Processing library. Some of TextAttack's models and tokenizers are implemented using Transformers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 69, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "cleverhans (Papernot et al., 2018 ) is a library for constructing adversarial examples for computer vision models. Like cleverhans, we aim to provide methods that generate adversarial examples across a variety of models and datasets. In some sense, TextAttack strives to be a solution like cleverhans for the NLP community. Like cleverhans, attacks in TextAttack all implement a base Attack class. However, while cleverhans implements many disparate attacks in separate modules, TextAttack builds attacks from a library of shared components.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 33, |
|
"text": "(Papernot et al., 2018", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "There are some existing open-source libraries related to adversarial examples in NLP. Trickster proposes a method for attacking NLP models based on graph search, but lacks the ability to ensure that generated examples satisfy a given constraint (Kulynych et al., 2018) . TEAPOT is a library for evaluating adversarial perturbations on text, but only supports the application of ngram-based comparisons for evaluating attacks on machine translation models (Michel et al., 2019) . Most recently, AllenNLP Interpret includes functionality for running adversarial attacks on NLP models, but is intended only for the purpose of interpretability, and only supports attacks via input-reduction or greedy gradient-based word swap (Wallace et al., 2019) . TextAttack has a broader scope than any of these libraries: it is designed to be extendable to any NLP attack.", |
|
"cite_spans": [ |
|
{ |
|
"start": 245, |
|
"end": 268, |
|
"text": "(Kulynych et al., 2018)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 455, |
|
"end": 476, |
|
"text": "(Michel et al., 2019)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 722, |
|
"end": 744, |
|
"text": "(Wallace et al., 2019)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We presented TextAttack, an open-source framework for testing the robustness of NLP models. TextAttack defines an attack in four modules: a goal function, a list of constraints, a transformation, and a search method. This allows us to compose attacks from previous work from these modules and compare them in a shared environment. These attacks can be reused for data augmentation and adversarial training. As new attacks are developed, we will add their components to TextAttack. We hope TextAttack helps lower the barrier to entry for research into robustness and data augmentation in NLP. 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "The authors would like to thank everyone who has contributed to make TextAttack a reality: Hanyu Liu, Kevin Ivey, Bill Zhang, and Alan Zheng, to name a few. Thanks to the IGA creators for contributing an implementation of their algorithm to our framework. Thanks to the folks at HuggingFace for creating such easy-touse software; without them, TextAttack would not be what it is today.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Caching alone speeds up the genetic algorithm of Alzantot et al. (2018) by a factor of 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For more information, an appendix is available online here.6", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Meteor, m-bleu and m-ter: Evaluation metrics for highcorrelation with human rankings of machine translation output", |
|
"authors": [ |
|
{ |
|
"first": "Abhaya", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "WMT@ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhaya Agarwal and Alon Lavie. 2008. Meteor, m-bleu and m-ter: Evaluation metrics for high- correlation with human rankings of machine trans- lation output. In WMT@ACL.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Generating natural language adversarial examples", |
|
"authors": [ |
|
{ |
|
"first": "Moustafa", |
|
"middle": [], |
|
"last": "Alzantot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yash", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Elgohary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo-Jhang", |
|
"middle": [], |
|
"last": "Ho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mani", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Moustafa Alzantot, Yash Sharma, Ahmed Elgohary, Bo-Jhang Ho, Mani B. Srivastava, and Kai-Wei Chang. 2018. Generating natural language adversar- ial examples. ArXiv, abs/1804.07998.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Chris Tar, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Matthew Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinfei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Sheng Yi Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicole", |
|
"middle": [], |
|
"last": "Hua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rhomni", |
|
"middle": [], |
|
"last": "Limtiaco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "St", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mario", |
|
"middle": [], |
|
"last": "Constant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Guajardo-Cespedes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Matthew Cer, Yinfei Yang, Sheng yi Kong, Nan Hua, Nicole Limtiaco, Rhomni St. John, Noah Constant, Mario Guajardo-Cespedes, Steve Yuan, Chris Tar, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil. 2018. Universal sentence encoder. ArXiv, abs/1803.11175.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Seq2sick: Evaluating the robustness of sequence-to-sequence models with adversarial examples", |
|
"authors": [ |
|
{ |
|
"first": "Minhao", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinfeng", |
|
"middle": [], |
|
"last": "Yi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pin-Yu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cho-Jui", |
|
"middle": [], |
|
"last": "Hsieh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Minhao Cheng, Jinfeng Yi, Pin-Yu Chen, Huan Zhang, and Cho-Jui Hsieh. 2018. Seq2sick: Evaluating the robustness of sequence-to-sequence models with ad- versarial examples.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Supervised learning of universal sentence representations from natural language inference data", |
|
"authors": [ |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bordes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "EMNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo\u00efc Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Hownet and the computation of meaning", |
|
"authors": [ |
|
{ |
|
"first": "Zhendong", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiang", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changling", |
|
"middle": [], |
|
"last": "Hao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhendong Dong, Qiang Dong, and Changling Hao. 2006. Hownet and the computation of meaning.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Hotflip: White-box adversarial examples for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Javid", |
|
"middle": [], |
|
"last": "Ebrahimi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anyi", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Lowd", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dejing", |
|
"middle": [], |
|
"last": "Dou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Javid Ebrahimi, Anyi Rao, Daniel Lowd, and Dejing Dou. 2017. Hotflip: White-box adversarial exam- ples for text classification. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Pathologies of neural models make interpretations difficult", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Shi Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alvin", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [ |
|
"I" |
|
], |
|
"last": "Grissom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Iyyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pedro", |
|
"middle": [], |
|
"last": "Rodriguez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "Boyd-Graber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shi Feng, Eric Wallace, Alvin Grissom II, Mohit Iyyer, Pedro Rodriguez, and Jordan Boyd-Graber. 2018. Pathologies of neural models make interpretations difficult.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Black-box generation of adversarial text sequences to evade deep learning classifiers", |
|
"authors": [ |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jack", |
|
"middle": [], |
|
"last": "Lanchantin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"Lou" |
|
], |
|
"last": "Soffa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanjun", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IEEE Security and Privacy Workshops", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "50--56", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ji Gao, Jack Lanchantin, Mary Lou Soffa, and Yanjun Qi. 2018. Black-box generation of adversarial text sequences to evade deep learning classifiers. 2018 IEEE Security and Privacy Workshops (SPW), pages 50-56.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Bae: Bert-based adversarial examples for text classification", |
|
"authors": [ |
|
{ |
|
"first": "Siddhant", |
|
"middle": [], |
|
"last": "Garg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Goutham", |
|
"middle": [], |
|
"last": "Ramakrishnan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Siddhant Garg and Goutham Ramakrishnan. 2020. Bae: Bert-based adversarial examples for text clas- sification.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Learning to write with cooperative discriminators", |
|
"authors": [ |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Holtzman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Buys", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxwell", |
|
"middle": [], |
|
"last": "Forbes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bosselut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Golub", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1638--1649", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1152" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ari Holtzman, Jan Buys, Maxwell Forbes, Antoine Bosselut, David Golub, and Yejin Choi. 2018. Learning to write with cooperative discriminators. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 1638-1649, Melbourne, Aus- tralia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Adversarial examples for evaluating reading comprehension systems", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2021--2031", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1215" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Jia and Percy Liang. 2017. Adversarial exam- ples for evaluating reading comprehension systems. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 2021-2031, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Certified robustness to adversarial word substitutions", |
|
"authors": [ |
|
{ |
|
"first": "Robin", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditi", |
|
"middle": [], |
|
"last": "Raghunathan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kerem", |
|
"middle": [], |
|
"last": "G\u00f6ksel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "EMNLP/IJCNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robin Jia, Aditi Raghunathan, Kerem G\u00f6ksel, and Percy Liang. 2019. Certified robustness to adversar- ial word substitutions. In EMNLP/IJCNLP.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Is bert really robust? natural language attack on text classification and entailment", |
|
"authors": [ |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhijing", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joey", |
|
"middle": [ |
|
"Tianyi" |
|
], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Szolovits", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Di Jin, Zhijing Jin, Joey Tianyi Zhou, and Peter Szolovits. 2019. Is bert really robust? natural lan- guage attack on text classification and entailment. ArXiv, abs/1907.11932.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Exploring the limits of language modeling", |
|
"authors": [ |
|
{ |
|
"first": "Rafal", |
|
"middle": [], |
|
"last": "J\u00f3zefowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Schuster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonghui", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rafal J\u00f3zefowicz, Oriol Vinyals, Mike Schuster, Noam Shazeer, and Yonghui Wu. 2016. Exploring the lim- its of language modeling. ArXiv, abs/1602.02410.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Particle swarm optimization", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Kennedy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Russell", |
|
"middle": [], |
|
"last": "Eberhart", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proceedings of ICNN'95-International Conference on Neural Networks", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "1942--1948", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James Kennedy and Russell Eberhart. 1995. Particle swarm optimization. In Proceedings of ICNN'95- International Conference on Neural Networks, vol- ume 4, pages 1942-1948. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Antonio Torralba, and Sanja Fidler. 2015. Skip-thought vectors", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Kiros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yukun", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruslan", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Zemel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raquel", |
|
"middle": [], |
|
"last": "Urtasun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan Kiros, Yukun Zhu, Ruslan Salakhutdinov, Richard S. Zemel, Raquel Urtasun, Antonio Tor- ralba, and Sanja Fidler. 2015. Skip-thought vectors. ArXiv, abs/1506.06726.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Adversarial examples for natural language classification problems", |
|
"authors": [ |
|
{ |
|
"first": "Volodymyr", |
|
"middle": [], |
|
"last": "Kuleshov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shantanu", |
|
"middle": [], |
|
"last": "Thakoor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tingfung", |
|
"middle": [], |
|
"last": "Lau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Ermon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Volodymyr Kuleshov, Shantanu Thakoor, Tingfung Lau, and Stefano Ermon. 2018. Adversarial exam- ples for natural language classification problems.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Evading classifiers in discrete domains with provable optimality guarantees", |
|
"authors": [ |
|
{ |
|
"first": "Bogdan", |
|
"middle": [], |
|
"last": "Kulynych", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Hayes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Samarin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carmela", |
|
"middle": [], |
|
"last": "Troncoso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bogdan Kulynych, Jamie Hayes, Nikita Samarin, and Carmela Troncoso. 2018. Evading classifiers in dis- crete domains with provable optimality guarantees. CoRR, abs/1810.10939.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Textbugger: Generating adversarial text against real-world applications", |
|
"authors": [ |
|
{ |
|
"first": "Jinfeng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shouling", |
|
"middle": [], |
|
"last": "Ji", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianyu", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ting", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinfeng Li, Shouling Ji, Tianyu Du, Bo Li, and Ting Wang. 2019. Textbugger: Generating adversar- ial text against real-world applications. ArXiv, abs/1812.05271.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Xiangyang Xue, and Xipeng Qiu. 2020. Bert-attack: Adversarial attack against bert using bert", |
|
"authors": [ |
|
{ |
|
"first": "Linyang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruotian", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qipeng", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linyang Li, Ruotian Ma, Qipeng Guo, Xiangyang Xue, and Xipeng Qiu. 2020. Bert-attack: Adversarial at- tack against bert using bert.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "On evaluation of adversarial perturbations for sequence-to-sequence models", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Michel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xian", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [ |
|
"Miguel" |
|
], |
|
"last": "Pino", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Michel, Xian Li, Graham Neubig, and Juan Miguel Pino. 2019. On evaluation of ad- versarial perturbations for sequence-to-sequence models. CoRR, abs/1903.06620.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Introduction to wordnet: An on-line lexical database", |
|
"authors": [ |
|
{ |
|
"first": "George Armitage", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Beckwith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christiane", |
|
"middle": [], |
|
"last": "Fellbaum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Derek", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "International Journal of Lexicography", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "235--244", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "George Armitage Miller, Richard Beckwith, Christiane Fellbaum, Derek Gross, and Katherine J. Miller. 1990. Introduction to wordnet: An on-line lexical database. International Journal of Lexicography, 3:235-244.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Counter-fitting word vectors to linguistic constraints", |
|
"authors": [ |
|
{ |
|
"first": "Nikola", |
|
"middle": [], |
|
"last": "Mrk\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Diarmuid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Blaise", |
|
"middle": [], |
|
"last": "S\u00e9aghdha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Milica", |
|
"middle": [], |
|
"last": "Thomson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lina", |
|
"middle": [], |
|
"last": "Ga\u0161i\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pei-Hao", |
|
"middle": [], |
|
"last": "Rojas-Barahona", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsung-Hsien", |
|
"middle": [], |
|
"last": "Vandyke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1603.00892" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikola Mrk\u0161i\u0107, Diarmuid O S\u00e9aghdha, Blaise Thom- son, Milica Ga\u0161i\u0107, Lina Rojas-Barahona, Pei- Hao Su, David Vandyke, Tsung-Hsien Wen, and Steve Young. 2016. Counter-fitting word vec- tors to linguistic constraints. arXiv preprint arXiv:1603.00892.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "A rule-based style and grammar checker", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Naber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Naber et al. 2003. A rule-based style and gram- mar checker. Citeseer.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Technical report on the cleverhans v2.1.0 adversarial examples library", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Matyasko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vahid", |
|
"middle": [], |
|
"last": "Behzadan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Hambardzumyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhishuai", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi-Lin", |
|
"middle": [], |
|
"last": "Juang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Sheatsley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhibhav", |
|
"middle": [], |
|
"last": "Garg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Uesato", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Willi", |
|
"middle": [], |
|
"last": "Gierke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinpeng", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Berthelot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Hendricks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Rauber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rujun", |
|
"middle": [], |
|
"last": "Long", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1610.00768" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander Matyasko, Vahid Behzadan, Karen Ham- bardzumyan, Zhishuai Zhang, Yi-Lin Juang, Zhi Li, Ryan Sheatsley, Abhibhav Garg, Jonathan Uesato, Willi Gierke, Yinpeng Dong, David Berthelot, Paul Hendricks, Jonas Rauber, and Rujun Long. 2018. Technical report on the cleverhans v2.1.0 adversarial examples library. arXiv preprint arXiv:1610.00768.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2001. Bleu: a method for automatic eval- uation of machine translation. In ACL.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "chrf: character n-gram f-score for automatic mt evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Popovic", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maja Popovic. 2015. chrf: character n-gram f-score for automatic mt evaluation. In WMT@EMNLP.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Combating adversarial misspellings with robust word recognition", |
|
"authors": [ |
|
{ |
|
"first": "Danish", |
|
"middle": [], |
|
"last": "Pruthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bhuwan", |
|
"middle": [], |
|
"last": "Dhingra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Danish Pruthi, Bhuwan Dhingra, and Zachary C. Lip- ton. 2019. Combating adversarial misspellings with robust word recognition.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Language models are unsupervised multitask learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeff Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Generating natural language adversarial examples through probability weighted word saliency", |
|
"authors": [ |
|
{ |
|
"first": "Yihe", |
|
"middle": [], |
|
"last": "Shuhuai Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanxiang", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Che", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1085--1097", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1103" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuhuai Ren, Yihe Deng, Kun He, and Wanxiang Che. 2019. Generating natural language adversarial ex- amples through probability weighted word saliency. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1085-1097, Florence, Italy. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "It's morphin' time! Combating linguistic discrimination with inflectional perturbations", |
|
"authors": [ |
|
{ |
|
"first": "Samson", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shafiq", |
|
"middle": [], |
|
"last": "Joty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min-Yen", |
|
"middle": [], |
|
"last": "Kan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2920--2935", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samson Tan, Shafiq Joty, Min-Yen Kan, and Richard Socher. 2020. It's morphin' time! Combating linguistic discrimination with inflectional perturba- tions. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 2920-2935, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Allennlp interpret: A framework for explaining predictions of nlp models", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Tuyls", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junlin", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjay", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameer", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Wallace, Jens Tuyls, Junlin Wang, Sanjay Subra- manian, Matthew Gardner, and Sameer Singh. 2019. Allennlp interpret: A framework for explaining pre- dictions of nlp models. ArXiv, abs/1909.09251.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Natural language adversarial attacks and defenses in word level", |
|
"authors": [ |
|
{ |
|
"first": "Xiaosen", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jin", |
|
"middle": [], |
|
"last": "Hao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaosen Wang, Hao Jin, and Kun He. 2019. Natural language adversarial attacks and defenses in word level.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "EDA: easy data augmentation techniques for boosting performance on text classification tasks", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason W. Wei and Kai Zou. 2019. EDA: easy data aug- mentation techniques for boosting performance on text classification tasks. CoRR, abs/1901.11196.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Morgan Funtowicz, and Jamie Brew. 2019. Transformers: State-ofthe-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R'emi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R'emi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. Transformers: State-of- the-art natural language processing.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Word-level textual adversarial attacking as combinatorial optimization", |
|
"authors": [ |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Zang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fanchao", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chenghao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyuan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maosong", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6066--6080", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yuan Zang, Fanchao Qi, Chenghao Yang, Zhiyuan Liu, Meng Zhang, Qun Liu, and Maosong Sun. 2020. Word-level textual adversarial attacking as combina- torial optimization. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 6066-6080, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Bertscore: Evaluating text generation with bert", |
|
"authors": [ |
|
{ |
|
"first": "Tianyi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "*", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varsha", |
|
"middle": [], |
|
"last": "Kishore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "*", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "*", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kilian", |
|
"middle": [ |
|
"Q" |
|
], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianyi Zhang*, Varsha Kishore*, Felix Wu*, Kilian Q. Weinberger, and Yoav Artzi. 2020. Bertscore: Eval- uating text generation with bert. In International Conference on Learning Representations.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Adversarial example generated using Jin et al." |
|
}, |
|
"FIGREF1": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Main features of TextAttack." |
|
}, |
|
"FIGREF2": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Screenshot of TextAttack's web interface running the TextBugger black-box attack." |
|
}, |
|
"FIGREF3": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "shows empirical results we obtained using TextAttack's augmentation. Augmentation with TextAttack immediately improves the performance of a WordCNN model on small datasets." |
|
}, |
|
"FIGREF4": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Performance of the built-in WordCNN model on the rotten tomatoes dataset with increasing training set size. Data augmentation recipes like EasyDataAugmenter (EDA,(Wei and Zou, 2019)) and Embedding are most helpful when working with very few samples. Shaded regions represent 95% confidence intervals over N = 5 runs." |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"content": "<table><tr><td>123</td></tr></table>", |
|
"html": null, |
|
"text": "", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |