python_code
stringlengths 0
91.3k
|
---|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run masked LM/next sentence masked_lm pre-training for BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import modeling
import optimization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer(
"max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = features["next_sentence_labels"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
bert_config, model.get_sequence_output(), model.get_embedding_table(),
masked_lm_positions, masked_lm_ids, masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels)
total_loss = masked_lm_loss + next_sentence_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metrics = (metric_fn, [
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels
])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(input_files,
max_seq_length,
max_predictions_per_seq,
is_training,
num_cpu_threads=4):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True))
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False)
result = estimator.evaluate(
input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm TF examples for BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None,
"Input raw text file (or comma-separated list of files).")
flags.DEFINE_string(
"output_file", None,
"Output TF example file (or comma-separated list of files).")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool(
"do_whole_word_mask", False,
"Whether to use whole word masking rather than per-WordPiece masking.")
flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.")
flags.DEFINE_integer("max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence.")
flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
flags.DEFINE_integer(
"dupe_factor", 10,
"Number of times to duplicate the input data (with different masks).")
flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
flags.DEFINE_float(
"short_seq_prob", 0.1,
"Probability of creating sequences which are shorter than the "
"maximum length.")
class TrainingInstance(object):
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
is_random_next):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
def __str__(self):
s = ""
s += "tokens: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens]))
s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
s += "is_random_next: %s\n" % self.is_random_next
s += "masked_lm_positions: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_positions]))
s += "masked_lm_labels: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.masked_lm_labels]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def write_instance_to_example_files(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_files):
"""Create TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["segment_ids"] = create_int_feature(segment_ids)
features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
features["next_sentence_labels"] = create_int_feature([next_sentence_label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info("Wrote %d total instances", total_written)
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
with tf.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word. When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and
token.startswith("##")):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
assert len(masked_lms) <= num_to_predict
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Reading from input files ***")
for input_file in input_files:
tf.logging.info(" %s", input_file)
rng = random.Random(FLAGS.random_seed)
instances = create_training_instances(
input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,
FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,
rng)
output_files = FLAGS.output_file.split(",")
tf.logging.info("*** Writing to output files ***")
for output_file in output_files:
tf.logging.info(" %s", output_file)
write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,
FLAGS.max_predictions_per_seq, output_files)
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_file")
flags.mark_flag_as_required("vocab_file")
tf.app.run()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.gather()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import random
import re
import modeling
import six
import tensorflow as tf
class BertModelTest(tf.test.TestCase):
class BertModelTester(object):
def __init__(self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02,
scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.scope = scope
def create_model(self):
input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length],
self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = BertModelTest.ids_tensor(
[self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = BertModelTest.ids_tensor(
[self.batch_size, self.seq_length], self.type_vocab_size)
config = modeling.BertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range)
model = modeling.BertModel(
config=config,
is_training=self.is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=token_type_ids,
scope=self.scope)
outputs = {
"embedding_output": model.get_embedding_output(),
"sequence_output": model.get_sequence_output(),
"pooled_output": model.get_pooled_output(),
"all_encoder_layers": model.get_all_encoder_layers(),
}
return outputs
def check_output(self, result):
self.parent.assertAllEqual(
result["embedding_output"].shape,
[self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertAllEqual(
result["sequence_output"].shape,
[self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertAllEqual(result["pooled_output"].shape,
[self.batch_size, self.hidden_size])
def test_default(self):
self.run_tester(BertModelTest.BertModelTester(self))
def test_config_to_json_string(self):
config = modeling.BertConfig(vocab_size=99, hidden_size=37)
obj = json.loads(config.to_json_string())
self.assertEqual(obj["vocab_size"], 99)
self.assertEqual(obj["hidden_size"], 37)
def run_tester(self, tester):
with self.test_session() as sess:
ops = tester.create_model()
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
output_result = sess.run(ops)
tester.check_output(output_result)
self.assert_all_tensors_reachable(sess, [init_op, ops])
@classmethod
def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
return tf.constant(value=values, dtype=tf.int32, shape=shape, name=name)
def assert_all_tensors_reachable(self, sess, outputs):
"""Checks that all the tensors in the graph are reachable from outputs."""
graph = sess.graph
ignore_strings = [
"^.*/assert_less_equal/.*$",
"^.*/dilation_rate$",
"^.*/Tensordot/concat$",
"^.*/Tensordot/concat/axis$",
"^testing/.*$",
]
ignore_regexes = [re.compile(x) for x in ignore_strings]
unreachable = self.get_unreachable_ops(graph, outputs)
filtered_unreachable = []
for x in unreachable:
do_ignore = False
for r in ignore_regexes:
m = r.match(x.name)
if m is not None:
do_ignore = True
if do_ignore:
continue
filtered_unreachable.append(x)
unreachable = filtered_unreachable
self.assertEqual(
len(unreachable), 0, "The following ops are unreachable: %s" %
(" ".join([x.name for x in unreachable])))
@classmethod
def get_unreachable_ops(cls, graph, outputs):
"""Finds all of the tensors in graph that are unreachable from outputs."""
outputs = cls.flatten_recursive(outputs)
output_to_op = collections.defaultdict(list)
op_to_all = collections.defaultdict(list)
assign_out_to_in = collections.defaultdict(list)
for op in graph.get_operations():
for x in op.inputs:
op_to_all[op.name].append(x.name)
for y in op.outputs:
output_to_op[y.name].append(op.name)
op_to_all[op.name].append(y.name)
if str(op.type) == "Assign":
for y in op.outputs:
for x in op.inputs:
assign_out_to_in[y.name].append(x.name)
assign_groups = collections.defaultdict(list)
for out_name in assign_out_to_in.keys():
name_group = assign_out_to_in[out_name]
for n1 in name_group:
assign_groups[n1].append(out_name)
for n2 in name_group:
if n1 != n2:
assign_groups[n1].append(n2)
seen_tensors = {}
stack = [x.name for x in outputs]
while stack:
name = stack.pop()
if name in seen_tensors:
continue
seen_tensors[name] = True
if name in output_to_op:
for op_name in output_to_op[name]:
if op_name in op_to_all:
for input_name in op_to_all[op_name]:
if input_name not in stack:
stack.append(input_name)
expanded_names = []
if name in assign_groups:
for assign_name in assign_groups[name]:
expanded_names.append(assign_name)
for expanded_name in expanded_names:
if expanded_name not in stack:
stack.append(expanded_name)
unreachable_ops = []
for op in graph.get_operations():
is_unreachable = False
all_names = [x.name for x in op.inputs] + [x.name for x in op.outputs]
for name in all_names:
if name not in seen_tensors:
is_unreachable = True
if is_unreachable:
unreachable_ops.append(op)
return unreachable_ops
@classmethod
def flatten_recursive(cls, item):
"""Flattens (potentially nested) a tuple/dictionary/list to a list."""
output = []
if isinstance(item, list):
output.extend(item)
elif isinstance(item, tuple):
output.extend(list(item))
elif isinstance(item, dict):
for (_, v) in six.iteritems(item):
output.append(v)
else:
return [item]
flat_output = []
for x in output:
flat_output.extend(cls.flatten_recursive(x))
return flat_output
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import optimization
import tensorflow as tf
class OptimizationTest(tf.test.TestCase):
def test_adam(self):
with self.test_session() as sess:
w = tf.get_variable(
"w",
shape=[3],
initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
x = tf.constant([0.4, 0.2, -0.5])
loss = tf.reduce_mean(tf.square(x - w))
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
global_step = tf.train.get_or_create_global_step()
optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
for _ in range(100):
sess.run(train_op)
w_np = sess.run(w)
self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
adapter_weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = []
for collection in ["adapters", "layer_norm", "head"]:
tvars += tf.get_collection(collection)
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
# a different optimizer, you should probably take this line out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
adapter_weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.adapter_weight_decay_rate = adapter_weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
self._adapter_variable_names = {
self._get_variable_name(v.name) for v in tf.get_collection("adapters")
}
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
if param_name in self._adapter_variable_names:
update += self.adapter_weight_decay_rate * param
else:
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if param_name in self._adapter_variable_names:
if not self.adapter_weight_decay_rate:
return False
else:
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import tokenization
import six
import tensorflow as tf
class TokenizationTest(tf.test.TestCase):
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
if six.PY2:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
else:
vocab_writer.write("".join(
[x + "\n" for x in vocab_tokens]).encode("utf-8"))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_chinese(self):
tokenizer = tokenization.BasicTokenizer()
self.assertAllEqual(
tokenizer.tokenize(u"ah\u535A\u63A8zz"),
[u"ah", u"\u535A", u"\u63A8", u"zz"])
def test_basic_tokenizer_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["hello", "!", "how", "are", "you", "?"])
self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["HeLLo", "!", "how", "Are", "yoU", "?"])
def test_wordpiece_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
self.assertAllEqual(tokenizer.tokenize(""), [])
self.assertAllEqual(
tokenizer.tokenize("unwanted running"),
["un", "##want", "##ed", "runn", "##ing"])
self.assertAllEqual(
tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_convert_tokens_to_ids(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
self.assertAllEqual(
tokenization.convert_tokens_to_ids(
vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
def test_is_whitespace(self):
self.assertTrue(tokenization._is_whitespace(u" "))
self.assertTrue(tokenization._is_whitespace(u"\t"))
self.assertTrue(tokenization._is_whitespace(u"\r"))
self.assertTrue(tokenization._is_whitespace(u"\n"))
self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
self.assertFalse(tokenization._is_whitespace(u"A"))
self.assertFalse(tokenization._is_whitespace(u"-"))
def test_is_control(self):
self.assertTrue(tokenization._is_control(u"\u0005"))
self.assertFalse(tokenization._is_control(u"A"))
self.assertFalse(tokenization._is_control(u" "))
self.assertFalse(tokenization._is_control(u"\t"))
self.assertFalse(tokenization._is_control(u"\r"))
self.assertFalse(tokenization._is_control(u"\U0001F4A9"))
def test_is_punctuation(self):
self.assertTrue(tokenization._is_punctuation(u"-"))
self.assertTrue(tokenization._is_punctuation(u"$"))
self.assertTrue(tokenization._is_punctuation(u"`"))
self.assertTrue(tokenization._is_punctuation(u"."))
self.assertFalse(tokenization._is_punctuation(u"A"))
self.assertFalse(tokenization._is_punctuation(u" "))
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
collections=["head", tf.GraphKeys.GLOBAL_VARIABLES])
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer(),
collections=["head", tf.GraphKeys.GLOBAL_VARIABLES])
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None,
adapter_fn="feedforward_adapter"):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
adapter_fn: (optional) string identifying trainable adapter that takes
as input a Tensor and returns a Tensor of the same shape.
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True,
adapter_fn=get_adapter(adapter_fn))
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def feedforward_adapter(input_tensor, hidden_size=64, init_scale=1e-3):
"""A feedforward adapter layer with a bottleneck.
Implements a bottleneck layer with a user-specified nonlinearity and an
identity residual connection. All variables created are added to the
"adapters" collection.
Args:
input_tensor: input Tensor of shape [batch size, hidden dimension]
hidden_size: dimension of the bottleneck layer.
init_scale: Scale of the initialization distribution used for weights.
Returns:
Tensor of the same shape as x.
"""
with tf.variable_scope("adapters"):
in_size = input_tensor.get_shape().as_list()[1]
w1 = tf.get_variable(
"weights1", [in_size, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=init_scale),
collections=["adapters", tf.GraphKeys.GLOBAL_VARIABLES])
b1 = tf.get_variable(
"biases1", [1, hidden_size],
initializer=tf.zeros_initializer(),
collections=["adapters", tf.GraphKeys.GLOBAL_VARIABLES])
net = tf.tensordot(input_tensor, w1, [[1], [0]]) + b1
net = gelu(net)
w2 = tf.get_variable(
"weights2", [hidden_size, in_size],
initializer=tf.truncated_normal_initializer(stddev=init_scale),
collections=["adapters", tf.GraphKeys.GLOBAL_VARIABLES])
b2 = tf.get_variable(
"biases2", [1, in_size],
initializer=tf.zeros_initializer(),
collections=["adapters", tf.GraphKeys.GLOBAL_VARIABLES])
net = tf.tensordot(net, w2, [[1], [0]]) + b2
return net + input_tensor
def get_adapter(function_string):
"""Maps a string to a Python function.
Args:
function_string: String name of the adapter function.
Returns:
A Python function corresponding to the adatper function.
`function_string` is None or empty, will return None.
If `function_string` is not a string, it will return `function_string`.
Raises:
ValueError: The `function_string` does not correspond to a known
adapter.
"""
# We assume that anything that"s not a string is already an adapter
# function, so we just return it.
if not isinstance(function_string, six.string_types):
return function_string
if not function_string:
return None
fn = function_string.lower()
if fn == "feedforward_adapter":
return feedforward_adapter
else:
raise ValueError("Unsupported adapters: %s" % fn)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name,
variables_collections=["layer_norm", tf.GraphKeys.GLOBAL_VARIABLES])
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.gather()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False,
adapter_fn=None):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
adapter_fn: (optional) trainable adapter function that takes as input a
Tensor and returns a Tensor of the same shape.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
if adapter_fn:
attention_output = adapter_fn(attention_output)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
if adapter_fn:
layer_output = adapter_fn(layer_output)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import random
import re
from albert import modeling
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
class AlbertModelTest(tf.test.TestCase):
class AlbertModelTester(object):
def __init__(self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
vocab_size=99,
embedding_size=32,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02,
scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.scope = scope
def create_model(self):
input_ids = AlbertModelTest.ids_tensor([self.batch_size, self.seq_length],
self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = AlbertModelTest.ids_tensor(
[self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = AlbertModelTest.ids_tensor(
[self.batch_size, self.seq_length], self.type_vocab_size)
config = modeling.AlbertConfig(
vocab_size=self.vocab_size,
embedding_size=self.embedding_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range)
model = modeling.AlbertModel(
config=config,
is_training=self.is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=token_type_ids,
scope=self.scope)
outputs = {
"embedding_output": model.get_embedding_output(),
"sequence_output": model.get_sequence_output(),
"pooled_output": model.get_pooled_output(),
"all_encoder_layers": model.get_all_encoder_layers(),
}
return outputs
def check_output(self, result):
self.parent.assertAllEqual(
result["embedding_output"].shape,
[self.batch_size, self.seq_length, self.embedding_size])
self.parent.assertAllEqual(
result["sequence_output"].shape,
[self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertAllEqual(result["pooled_output"].shape,
[self.batch_size, self.hidden_size])
def test_default(self):
self.run_tester(AlbertModelTest.AlbertModelTester(self))
def test_config_to_json_string(self):
config = modeling.AlbertConfig(vocab_size=99, hidden_size=37)
obj = json.loads(config.to_json_string())
self.assertEqual(obj["vocab_size"], 99)
self.assertEqual(obj["hidden_size"], 37)
def test_einsum_via_matmul(self):
batch_size = 8
seq_length = 12
num_attention_heads = 3
head_size = 6
hidden_size = 10
input_tensor = np.random.uniform(0, 1,
[batch_size, seq_length, hidden_size])
input_tensor = tf.constant(input_tensor, dtype=tf.float32)
w = np.random.uniform(0, 1, [hidden_size, num_attention_heads, head_size])
w = tf.constant(w, dtype=tf.float32)
ret1 = tf.einsum("BFH,HND->BFND", input_tensor, w)
ret2 = modeling.einsum_via_matmul(input_tensor, w, 1)
self.assertAllClose(ret1, ret2)
input_tensor = np.random.uniform(0, 1,
[batch_size, seq_length,
num_attention_heads, head_size])
input_tensor = tf.constant(input_tensor, dtype=tf.float32)
w = np.random.uniform(0, 1, [num_attention_heads, head_size, hidden_size])
w = tf.constant(w, dtype=tf.float32)
ret1 = tf.einsum("BFND,NDH->BFH", input_tensor, w)
ret2 = modeling.einsum_via_matmul(input_tensor, w, 2)
self.assertAllClose(ret1, ret2)
def run_tester(self, tester):
with self.test_session() as sess:
ops = tester.create_model()
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
output_result = sess.run(ops)
tester.check_output(output_result)
self.assert_all_tensors_reachable(sess, [init_op, ops])
@classmethod
def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
return tf.constant(value=values, dtype=tf.int32, shape=shape, name=name)
def assert_all_tensors_reachable(self, sess, outputs):
"""Checks that all the tensors in the graph are reachable from outputs."""
graph = sess.graph
ignore_strings = [
"^.*/assert_less_equal/.*$",
"^.*/dilation_rate$",
"^.*/Tensordot/concat$",
"^.*/Tensordot/concat/axis$",
"^testing/.*$",
]
ignore_regexes = [re.compile(x) for x in ignore_strings]
unreachable = self.get_unreachable_ops(graph, outputs)
filtered_unreachable = []
for x in unreachable:
do_ignore = False
for r in ignore_regexes:
m = r.match(six.ensure_str(x.name))
if m is not None:
do_ignore = True
if do_ignore:
continue
filtered_unreachable.append(x)
unreachable = filtered_unreachable
self.assertEqual(
len(unreachable), 0, "The following ops are unreachable: %s" %
(" ".join([x.name for x in unreachable])))
@classmethod
def get_unreachable_ops(cls, graph, outputs):
"""Finds all of the tensors in graph that are unreachable from outputs."""
outputs = cls.flatten_recursive(outputs)
output_to_op = collections.defaultdict(list)
op_to_all = collections.defaultdict(list)
assign_out_to_in = collections.defaultdict(list)
for op in graph.get_operations():
for x in op.inputs:
op_to_all[op.name].append(x.name)
for y in op.outputs:
output_to_op[y.name].append(op.name)
op_to_all[op.name].append(y.name)
if str(op.type) == "Assign":
for y in op.outputs:
for x in op.inputs:
assign_out_to_in[y.name].append(x.name)
assign_groups = collections.defaultdict(list)
for out_name in assign_out_to_in.keys():
name_group = assign_out_to_in[out_name]
for n1 in name_group:
assign_groups[n1].append(out_name)
for n2 in name_group:
if n1 != n2:
assign_groups[n1].append(n2)
seen_tensors = {}
stack = [x.name for x in outputs]
while stack:
name = stack.pop()
if name in seen_tensors:
continue
seen_tensors[name] = True
if name in output_to_op:
for op_name in output_to_op[name]:
if op_name in op_to_all:
for input_name in op_to_all[op_name]:
if input_name not in stack:
stack.append(input_name)
expanded_names = []
if name in assign_groups:
for assign_name in assign_groups[name]:
expanded_names.append(assign_name)
for expanded_name in expanded_names:
if expanded_name not in stack:
stack.append(expanded_name)
unreachable_ops = []
for op in graph.get_operations():
is_unreachable = False
all_names = [x.name for x in op.inputs] + [x.name for x in op.outputs]
for name in all_names:
if name not in seen_tensors:
is_unreachable = True
if is_unreachable:
unreachable_ops.append(op)
return unreachable_ops
@classmethod
def flatten_recursive(cls, item):
"""Flattens (potentially nested) a tuple/dictionary/list to a list."""
output = []
if isinstance(item, list):
output.extend(item)
elif isinstance(item, tuple):
output.extend(list(item))
elif isinstance(item, dict):
for (_, v) in six.iteritems(item):
output.append(v)
else:
return [item]
flat_output = []
for x in output:
flat_output.extend(cls.flatten_recursive(x))
return flat_output
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for run_pretraining."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import tempfile
from absl.testing import flagsaver
from albert import modeling
from albert import run_pretraining
import tensorflow.compat.v1 as tf
FLAGS = tf.app.flags.FLAGS
def _create_config_file(filename, max_seq_length, vocab_size):
"""Creates an AlbertConfig and saves it to file."""
albert_config = modeling.AlbertConfig(
vocab_size,
embedding_size=5,
hidden_size=14,
num_hidden_layers=3,
num_hidden_groups=1,
num_attention_heads=2,
intermediate_size=19,
inner_group_num=1,
down_scale_factor=1,
hidden_act="gelu",
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
max_position_embeddings=max_seq_length,
type_vocab_size=2,
initializer_range=0.02)
with tf.gfile.Open(filename, "w") as outfile:
outfile.write(albert_config.to_json_string())
def _create_record(max_predictions_per_seq, max_seq_length, vocab_size):
"""Returns a tf.train.Example containing random data."""
example = tf.train.Example()
example.features.feature["input_ids"].int64_list.value.extend(
[random.randint(0, vocab_size - 1) for _ in range(max_seq_length)])
example.features.feature["input_mask"].int64_list.value.extend(
[random.randint(0, 1) for _ in range(max_seq_length)])
example.features.feature["masked_lm_positions"].int64_list.value.extend([
random.randint(0, max_seq_length - 1)
for _ in range(max_predictions_per_seq)
])
example.features.feature["masked_lm_ids"].int64_list.value.extend([
random.randint(0, vocab_size - 1) for _ in range(max_predictions_per_seq)
])
example.features.feature["masked_lm_weights"].float_list.value.extend(
[1. for _ in range(max_predictions_per_seq)])
example.features.feature["segment_ids"].int64_list.value.extend(
[0 for _ in range(max_seq_length)])
example.features.feature["next_sentence_labels"].int64_list.value.append(
random.randint(0, 1))
return example
def _create_input_file(filename,
max_predictions_per_seq,
max_seq_length,
vocab_size,
size=1000):
"""Creates an input TFRecord file of specified size."""
with tf.io.TFRecordWriter(filename) as writer:
for _ in range(size):
ex = _create_record(max_predictions_per_seq, max_seq_length, vocab_size)
writer.write(ex.SerializeToString())
class RunPretrainingTest(tf.test.TestCase):
def _verify_output_file(self, basename):
self.assertTrue(tf.gfile.Exists(os.path.join(FLAGS.output_dir, basename)))
def _verify_checkpoint_files(self, name):
self._verify_output_file(name + ".meta")
self._verify_output_file(name + ".index")
self._verify_output_file(name + ".data-00000-of-00001")
@flagsaver.flagsaver
def test_pretraining(self):
# Set up required flags.
vocab_size = 97
FLAGS.max_predictions_per_seq = 7
FLAGS.max_seq_length = 13
FLAGS.output_dir = tempfile.mkdtemp("output_dir")
FLAGS.albert_config_file = os.path.join(
tempfile.mkdtemp("config_dir"), "albert_config.json")
FLAGS.input_file = os.path.join(
tempfile.mkdtemp("input_dir"), "input_data.tfrecord")
FLAGS.do_train = True
FLAGS.do_eval = True
FLAGS.num_train_steps = 1
FLAGS.save_checkpoints_steps = 1
# Construct requisite input files.
_create_config_file(FLAGS.albert_config_file, FLAGS.max_seq_length,
vocab_size)
_create_input_file(FLAGS.input_file, FLAGS.max_predictions_per_seq,
FLAGS.max_seq_length, vocab_size)
# Run the pretraining.
run_pretraining.main(None)
# Verify output.
self._verify_checkpoint_files("model.ckpt-best")
self._verify_checkpoint_files("model.ckpt-1")
self._verify_output_file("eval_results.txt")
self._verify_output_file("checkpoint")
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from albert import optimization
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
class OptimizationTest(tf.test.TestCase):
def test_adam(self):
with self.test_session() as sess:
w = tf.get_variable(
"w",
shape=[3],
initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
x = tf.constant([0.4, 0.2, -0.5])
loss = tf.reduce_mean(tf.square(x - w))
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
global_step = tf.train.get_or_create_global_step()
optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
train_op = optimizer.apply_gradients(list(zip(grads, tvars)), global_step)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
for _ in range(100):
sess.run(train_op)
w_np = sess.run(w)
self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run ALBERT on SQuAD v1.1 using sentence piece tokenization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
import time
from albert import fine_tuning_utils
from albert import modeling
from albert import squad_utils
import six
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
# pylint: disable=g-import-not-at-top
if six.PY2:
import six.moves.cPickle as pickle
else:
import pickle
# pylint: enable=g-import-not-at-top
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"albert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string("spm_model_file", None,
"The model file for sentence piece tokenization.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string("train_feature_file", None,
"training feature file.")
flags.DEFINE_string(
"predict_feature_file", None,
"Location of predict features. If it doesn't exist, it will be written. "
"If it does exist, it will be read.")
flags.DEFINE_string(
"predict_feature_left_file", None,
"Location of predict features not passed to TPU. If it doesn't exist, it "
"will be written. If it does exist, it will be read.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"albert_hub_module_handle", None,
"If set, the ALBERT hub module to use.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"use_einsum", True,
"Whether to use tf.einsum or tf.reshape+tf.matmul for dense layers. Must "
"be set to False for TFLite compatibility.")
flags.DEFINE_string(
"export_dir",
default=None,
help=("The directory where the exported SavedModel will be stored."))
def validate_flags_or_throw(albert_config):
"""Validate the input FLAGS or throw an exception."""
if not FLAGS.do_train and not FLAGS.do_predict and not FLAGS.export_dir:
err_msg = "At least one of `do_train` or `do_predict` or `export_dir`" + "must be True."
raise ValueError(err_msg)
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if not FLAGS.predict_feature_file:
raise ValueError(
"If `do_predict` is True, then `predict_feature_file` must be "
"specified.")
if not FLAGS.predict_feature_left_file:
raise ValueError(
"If `do_predict` is True, then `predict_feature_left_file` must be "
"specified.")
if FLAGS.max_seq_length > albert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the ALBERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, albert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def build_squad_serving_input_fn(seq_length):
"""Builds a serving input fn for raw input."""
def _seq_serving_input_fn():
"""Serving input fn for raw images."""
input_ids = tf.placeholder(
shape=[1, seq_length], name="input_ids", dtype=tf.int32)
input_mask = tf.placeholder(
shape=[1, seq_length], name="input_mask", dtype=tf.int32)
segment_ids = tf.placeholder(
shape=[1, seq_length], name="segment_ids", dtype=tf.int32)
inputs = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids
}
return tf_estimator.export.ServingInputReceiver(features=inputs,
receiver_tensors=inputs)
return _seq_serving_input_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
albert_config = modeling.AlbertConfig.from_json_file(FLAGS.albert_config_file)
validate_flags_or_throw(albert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tokenizer = fine_tuning_utils.create_vocab(
vocab_file=FLAGS.vocab_file,
do_lower_case=FLAGS.do_lower_case,
spm_model_file=FLAGS.spm_model_file,
hub_module=FLAGS.albert_hub_module_handle)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
if FLAGS.do_train:
iterations_per_loop = int(min(FLAGS.iterations_per_loop,
FLAGS.save_checkpoints_steps))
else:
iterations_per_loop = FLAGS.iterations_per_loop
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
keep_checkpoint_max=0,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = squad_utils.read_squad_examples(
input_file=FLAGS.train_file, is_training=True)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(12345)
rng.shuffle(train_examples)
model_fn = squad_utils.v1_model_fn_builder(
albert_config=albert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
use_einsum=FLAGS.use_einsum,
hub_module=FLAGS.albert_hub_module_handle)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
if not tf.gfile.Exists(FLAGS.train_feature_file):
train_writer = squad_utils.FeatureWriter(
filename=os.path.join(FLAGS.train_feature_file), is_training=True)
squad_utils.convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature,
do_lower_case=FLAGS.do_lower_case)
train_writer.close()
tf.logging.info("***** Running training *****")
tf.logging.info(" Num orig examples = %d", len(train_examples))
# tf.logging.info(" Num split examples = %d", train_writer.num_features)
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
del train_examples
train_input_fn = squad_utils.input_fn_builder(
input_file=FLAGS.train_feature_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.train_batch_size,
is_v2=False)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_predict:
with tf.gfile.Open(FLAGS.predict_file) as predict_file:
prediction_json = json.load(predict_file)["data"]
eval_examples = squad_utils.read_squad_examples(
input_file=FLAGS.predict_file, is_training=False)
if (tf.gfile.Exists(FLAGS.predict_feature_file) and tf.gfile.Exists(
FLAGS.predict_feature_left_file)):
tf.logging.info("Loading eval features from {}".format(
FLAGS.predict_feature_left_file))
with tf.gfile.Open(FLAGS.predict_feature_left_file, "rb") as fin:
eval_features = pickle.load(fin)
else:
eval_writer = squad_utils.FeatureWriter(
filename=FLAGS.predict_feature_file, is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
squad_utils.convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature,
do_lower_case=FLAGS.do_lower_case)
eval_writer.close()
with tf.gfile.Open(FLAGS.predict_feature_left_file, "wb") as fout:
pickle.dump(eval_features, fout)
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_input_fn = squad_utils.input_fn_builder(
input_file=FLAGS.predict_feature_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.predict_batch_size,
is_v2=False)
def get_result(checkpoint):
"""Evaluate the checkpoint on SQuAD 1.0."""
# If running eval on the TPU, you will need to specify the number of
# steps.
reader = tf.train.NewCheckpointReader(checkpoint)
global_step = reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
all_results = []
for result in estimator.predict(
predict_input_fn, yield_single_examples=True,
checkpoint_path=checkpoint):
if len(all_results) % 1000 == 0:
tf.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_log_prob = [float(x) for x in result["start_log_prob"].flat]
end_log_prob = [float(x) for x in result["end_log_prob"].flat]
all_results.append(
squad_utils.RawResult(
unique_id=unique_id,
start_log_prob=start_log_prob,
end_log_prob=end_log_prob))
output_prediction_file = os.path.join(
FLAGS.output_dir, "predictions.json")
output_nbest_file = os.path.join(
FLAGS.output_dir, "nbest_predictions.json")
result_dict = {}
squad_utils.accumulate_predictions_v1(
result_dict, eval_examples, eval_features,
all_results, FLAGS.n_best_size, FLAGS.max_answer_length)
predictions = squad_utils.write_predictions_v1(
result_dict, eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
output_prediction_file, output_nbest_file)
return squad_utils.evaluate_v1(
prediction_json, predictions), int(global_step)
def _find_valid_cands(curr_step):
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
candidates = []
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
idx = ckpt_name.split("-")[-1]
if idx != "best" and int(idx) > curr_step:
candidates.append(filename)
return candidates
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
key_name = "f1"
writer = tf.gfile.GFile(output_eval_file, "w")
if tf.gfile.Exists(checkpoint_path + ".index"):
result = get_result(checkpoint_path)
best_perf = result[0][key_name]
global_step = result[1]
else:
global_step = -1
best_perf = -1
checkpoint_path = None
while global_step < num_train_steps:
steps_and_files = {}
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
cur_filename = os.path.join(FLAGS.output_dir, ckpt_name)
if cur_filename.split("-")[-1] == "best":
continue
gstep = int(cur_filename.split("-")[-1])
if gstep not in steps_and_files:
tf.logging.info("Add {} to eval list.".format(cur_filename))
steps_and_files[gstep] = cur_filename
tf.logging.info("found {} files.".format(len(steps_and_files)))
if not steps_and_files:
tf.logging.info("found 0 file, global step: {}. Sleeping."
.format(global_step))
time.sleep(60)
else:
for ele in sorted(steps_and_files.items()):
step, checkpoint_path = ele
if global_step >= step:
if len(_find_valid_cands(step)) > 1:
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tf.logging.info("removing {}".format(src_ckpt))
tf.gfile.Remove(src_ckpt)
continue
result, global_step = get_result(checkpoint_path)
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if result[key_name] > best_perf:
best_perf = result[key_name]
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tgt_ckpt = checkpoint_path.rsplit(
"-", 1)[0] + "-best.{}".format(ext)
tf.logging.info("saving {} to {}".format(src_ckpt, tgt_ckpt))
tf.gfile.Copy(src_ckpt, tgt_ckpt, overwrite=True)
writer.write("saved {} to {}\n".format(src_ckpt, tgt_ckpt))
writer.write("best {} = {}\n".format(key_name, best_perf))
tf.logging.info(" best {} = {}\n".format(key_name, best_perf))
if len(_find_valid_cands(global_step)) > 2:
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tf.logging.info("removing {}".format(src_ckpt))
tf.gfile.Remove(src_ckpt)
writer.write("=" * 50 + "\n")
checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
result, global_step = get_result(checkpoint_path)
tf.logging.info("***** Final Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
writer.write("best perf happened at step: {}".format(global_step))
if FLAGS.export_dir:
tf.gfile.MakeDirs(FLAGS.export_dir)
squad_serving_input_fn = (
build_squad_serving_input_fn(FLAGS.max_seq_length))
tf.logging.info("Starting to export model.")
subfolder = estimator.export_saved_model(
export_dir_base=os.path.join(FLAGS.export_dir, "saved_model"),
serving_input_receiver_fn=squad_serving_input_fn)
tf.logging.info("Starting to export TFLite.")
converter = tf.lite.TFLiteConverter.from_saved_model(
subfolder,
input_arrays=["input_ids", "input_mask", "segment_ids"],
output_arrays=["start_logits", "end_logits"])
float_model = converter.convert()
tflite_file = os.path.join(FLAGS.export_dir, "albert_model.tflite")
with tf.gfile.GFile(tflite_file, "wb") as f:
f.write(float_model)
if __name__ == "__main__":
flags.mark_flag_as_required("spm_model_file")
flags.mark_flag_as_required("albert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Exports a minimal TF-Hub module for ALBERT models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from albert import modeling
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
flags.DEFINE_string(
"albert_directory", None,
"The config json file corresponding to the pre-trained ALBERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"checkpoint_name", "model.ckpt-best",
"Name of the checkpoint under albert_directory to be exported.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool(
"use_einsum", True,
"Whether to use tf.einsum or tf.reshape+tf.matmul for dense layers. Must "
"be set to False for TFLite compatibility.")
flags.DEFINE_string("export_path", None, "Path to the output TF-Hub module.")
FLAGS = flags.FLAGS
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def get_mlm_logits(model, albert_config, mlm_positions):
"""From run_pretraining.py."""
input_tensor = gather_indexes(model.get_sequence_output(), mlm_positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=albert_config.embedding_size,
activation=modeling.get_activation(albert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
albert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[albert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(
input_tensor, model.get_embedding_table(), transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
return logits
def get_sop_log_probs(model, albert_config):
"""Get loss and log probs for the next sentence prediction."""
input_tensor = model.get_pooled_output()
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, albert_config.hidden_size],
initializer=modeling.create_initializer(
albert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
return log_probs
def module_fn(is_training):
"""Module function."""
input_ids = tf.placeholder(tf.int32, [None, None], "input_ids")
input_mask = tf.placeholder(tf.int32, [None, None], "input_mask")
segment_ids = tf.placeholder(tf.int32, [None, None], "segment_ids")
mlm_positions = tf.placeholder(tf.int32, [None, None], "mlm_positions")
albert_config_path = os.path.join(
FLAGS.albert_directory, "albert_config.json")
albert_config = modeling.AlbertConfig.from_json_file(albert_config_path)
model = modeling.AlbertModel(
config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=False,
use_einsum=FLAGS.use_einsum)
mlm_logits = get_mlm_logits(model, albert_config, mlm_positions)
sop_log_probs = get_sop_log_probs(model, albert_config)
vocab_model_path = os.path.join(FLAGS.albert_directory, "30k-clean.model")
vocab_file_path = os.path.join(FLAGS.albert_directory, "30k-clean.vocab")
config_file = tf.constant(
value=albert_config_path, dtype=tf.string, name="config_file")
vocab_model = tf.constant(
value=vocab_model_path, dtype=tf.string, name="vocab_model")
# This is only for visualization purpose.
vocab_file = tf.constant(
value=vocab_file_path, dtype=tf.string, name="vocab_file")
# By adding `config_file, vocab_model and vocab_file`
# to the ASSET_FILEPATHS collection, TF-Hub will
# rewrite this tensor so that this asset is portable.
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, config_file)
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, vocab_model)
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, vocab_file)
hub.add_signature(
name="tokens",
inputs=dict(
input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids),
outputs=dict(
sequence_output=model.get_sequence_output(),
pooled_output=model.get_pooled_output()))
hub.add_signature(
name="sop",
inputs=dict(
input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids),
outputs=dict(
sequence_output=model.get_sequence_output(),
pooled_output=model.get_pooled_output(),
sop_log_probs=sop_log_probs))
hub.add_signature(
name="mlm",
inputs=dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
mlm_positions=mlm_positions),
outputs=dict(
sequence_output=model.get_sequence_output(),
pooled_output=model.get_pooled_output(),
mlm_logits=mlm_logits))
hub.add_signature(
name="tokenization_info",
inputs={},
outputs=dict(
vocab_file=vocab_model,
do_lower_case=tf.constant(FLAGS.do_lower_case)))
def main(_):
tags_and_args = []
for is_training in (True, False):
tags = set()
if is_training:
tags.add("train")
tags_and_args.append((tags, dict(is_training=is_training)))
spec = hub.create_module_spec(module_fn, tags_and_args=tags_and_args)
checkpoint_path = os.path.join(FLAGS.albert_directory, FLAGS.checkpoint_name)
tf.logging.info("Using checkpoint {}".format(checkpoint_path))
spec.export(FLAGS.export_path, checkpoint_path=checkpoint_path)
if __name__ == "__main__":
flags.mark_flag_as_required("albert_directory")
flags.mark_flag_as_required("export_path")
app.run(main)
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from albert import lamb_optimizer
import six
from six.moves import zip
import tensorflow.compat.v1 as tf
from tensorflow.contrib import tpu as contrib_tpu
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu,
optimizer="adamw", poly_power=1.0, start_warmup_step=0,
colocate_gradients_with_ops=False, excluded_tvars=None):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=poly_power,
cycle=False)
# Implements linear warmup. I.e., if global_step - start_warmup_step <
# num_warmup_steps, the learning rate will be
# `(global_step - start_warmup_step)/num_warmup_steps * init_lr`.
if num_warmup_steps:
tf.logging.info("++++++ warmup starts at step " + str(start_warmup_step)
+ ", for " + str(num_warmup_steps) + " steps ++++++")
global_steps_int = tf.cast(global_step, tf.int32)
start_warm_int = tf.constant(start_warmup_step, dtype=tf.int32)
global_steps_int = global_steps_int - start_warm_int
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is OK that you use this optimizer for finetuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
# It is OK to use AdamW in the finetuning even the model is trained by LAMB.
# As report in the Bert pulic github, the learning rate for SQuAD 1.1 finetune
# is 3e-5, 4e-5 or 5e-5. For LAMB, the users can use 3e-4, 4e-4,or 5e-4 for a
# batch size of 64 in the finetune.
if optimizer == "adamw":
tf.logging.info("using adamw")
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
elif optimizer == "lamb":
tf.logging.info("using lamb")
optimizer = lamb_optimizer.LAMBOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
else:
raise ValueError("Not supported optimizer: ", optimizer)
if use_tpu:
optimizer = contrib_tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
for tvar in tvars:
if excluded_tvars and tvar.name in excluded_tvars:
tvars.remove(tvar)
grads = tf.gradients(
loss, tvars, colocate_gradients_with_ops=colocate_gradients_with_ops)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
list(zip(grads, tvars)), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, neither `AdamWeightDecayOptimizer` nor `LAMBOptimizer` do this.
# But if you use a different optimizer, you should probably take this line
# out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=six.ensure_str(param_name) + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=six.ensure_str(param_name) + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", six.ensure_str(param_name))
if m is not None:
param_name = m.group(1)
return param_name
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from albert import tokenization
import six
import tensorflow.compat.v1 as tf
class TokenizationTest(tf.test.TestCase):
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
if six.PY2:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
else:
contents = "".join([six.ensure_str(x) + "\n" for x in vocab_tokens])
vocab_writer.write(six.ensure_binary(contents, "utf-8"))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_chinese(self):
tokenizer = tokenization.BasicTokenizer()
self.assertAllEqual(
tokenizer.tokenize(u"ah\u535A\u63A8zz"),
[u"ah", u"\u535A", u"\u63A8", u"zz"])
def test_basic_tokenizer_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["hello", "!", "how", "are", "you", "?"])
self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["HeLLo", "!", "how", "Are", "yoU", "?"])
def test_wordpiece_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
self.assertAllEqual(tokenizer.tokenize(""), [])
self.assertAllEqual(
tokenizer.tokenize("unwanted running"),
["un", "##want", "##ed", "runn", "##ing"])
self.assertAllEqual(
tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_convert_tokens_to_ids(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
self.assertAllEqual(
tokenization.convert_tokens_to_ids(
vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
def test_is_whitespace(self):
self.assertTrue(tokenization._is_whitespace(u" "))
self.assertTrue(tokenization._is_whitespace(u"\t"))
self.assertTrue(tokenization._is_whitespace(u"\r"))
self.assertTrue(tokenization._is_whitespace(u"\n"))
self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
self.assertFalse(tokenization._is_whitespace(u"A"))
self.assertFalse(tokenization._is_whitespace(u"-"))
def test_is_control(self):
self.assertTrue(tokenization._is_control(u"\u0005"))
self.assertFalse(tokenization._is_control(u"A"))
self.assertFalse(tokenization._is_control(u" "))
self.assertFalse(tokenization._is_control(u"\t"))
self.assertFalse(tokenization._is_control(u"\r"))
self.assertFalse(tokenization._is_control(u"\U0001F4A9"))
def test_is_punctuation(self):
self.assertTrue(tokenization._is_punctuation(u"-"))
self.assertTrue(tokenization._is_punctuation(u"$"))
self.assertTrue(tokenization._is_punctuation(u"`"))
self.assertTrue(tokenization._is_punctuation(u"."))
self.assertFalse(tokenization._is_punctuation(u"A"))
self.assertFalse(tokenization._is_punctuation(u" "))
if __name__ == "__main__":
tf.test.main()
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
from six.moves import range
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
import sentencepiece as spm
SPIECE_UNDERLINE = u"▁".encode("utf-8")
def preprocess_text(inputs, remove_space=True, lower=False):
"""preprocess data by removing extra space and normalize data."""
outputs = inputs
if remove_space:
outputs = " ".join(inputs.strip().split())
if six.PY2 and isinstance(outputs, str):
try:
outputs = six.ensure_text(outputs, "utf-8")
except UnicodeDecodeError:
outputs = six.ensure_text(outputs, "latin-1")
outputs = unicodedata.normalize("NFKD", outputs)
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
if lower:
outputs = outputs.lower()
return outputs
def encode_pieces(sp_model, text, return_unicode=True, sample=False):
"""turn sentences into word pieces."""
if six.PY2 and isinstance(text, six.text_type):
text = six.ensure_binary(text, "utf-8")
if not sample:
pieces = sp_model.EncodeAsPieces(text)
else:
pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
piece = printable_text(piece)
if len(piece) > 1 and piece[-1] == "," and piece[-2].isdigit():
cur_pieces = sp_model.EncodeAsPieces(
six.ensure_binary(piece[:-1]).replace(SPIECE_UNDERLINE, b""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
# note(zhiliny): convert back to unicode for py2
if six.PY2 and return_unicode:
ret_pieces = []
for piece in new_pieces:
if isinstance(piece, str):
piece = six.ensure_text(piece, "utf-8")
ret_pieces.append(piece)
new_pieces = ret_pieces
return new_pieces
def encode_ids(sp_model, text, sample=False):
pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample)
ids = [sp_model.PieceToId(piece) for piece in pieces]
return ids
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return six.ensure_text(text, "utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return six.ensure_text(text, "utf-8", "ignore")
elif isinstance(text, six.text_type):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return six.ensure_text(text, "utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, six.text_type):
return six.ensure_binary(text, "utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip().split()[0] if token.strip() else " "
if token not in vocab:
vocab[token] = len(vocab)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True, spm_model_file=None):
self.vocab = None
self.sp_model = None
if spm_model_file:
self.sp_model = spm.SentencePieceProcessor()
tf.logging.info("loading sentence piece model")
# Handle cases where SP can't load the file, but gfile can.
sp_model_ = tf.gfile.GFile(spm_model_file, "rb").read()
self.sp_model.LoadFromSerializedProto(sp_model_)
# Note(mingdachen): For the purpose of consisent API, we are
# generating a vocabulary for the sentence piece tokenizer.
self.vocab = {self.sp_model.IdToPiece(i): i for i
in range(self.sp_model.GetPieceSize())}
else:
self.vocab = load_vocab(vocab_file)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
@classmethod
def from_scratch(cls, vocab_file, do_lower_case, spm_model_file):
return FullTokenizer(vocab_file, do_lower_case, spm_model_file)
@classmethod
def from_hub_module(cls, hub_module, use_spm=True):
"""Get the vocab file and casing info from the Hub module."""
with tf.Graph().as_default():
albert_module = hub.Module(hub_module)
tokenization_info = albert_module(signature="tokenization_info",
as_dict=True)
with tf.Session() as sess:
vocab_file, do_lower_case = sess.run(
[tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]])
if use_spm:
spm_model_file = vocab_file
vocab_file = None
return FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case,
spm_model_file=spm_model_file)
def tokenize(self, text):
if self.sp_model:
split_tokens = encode_pieces(self.sp_model, text, return_unicode=False)
else:
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
if self.sp_model:
tf.logging.info("using sentence piece tokenzier.")
return [self.sp_model.PieceToId(
printable_text(token)) for token in tokens]
else:
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
if self.sp_model:
tf.logging.info("using sentence piece tokenzier.")
return [self.sp_model.IdToPiece(id_) for id_ in ids]
else:
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + six.ensure_str(substr)
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run masked LM/next sentence masked_lm pre-training for ALBERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from albert import modeling
from albert import optimization
from six.moves import range
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"albert_config_file", None,
"The config json file corresponding to the pre-trained ALBERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained ALBERT model).")
flags.DEFINE_integer(
"max_seq_length", 512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer(
"max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.")
flags.DEFINE_bool("do_train", True, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 4096, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 64, "Total batch size for eval.")
flags.DEFINE_enum("optimizer", "lamb", ["adamw", "lamb"],
"The optimizer for training.")
flags.DEFINE_float("learning_rate", 0.00176, "The initial learning rate.")
flags.DEFINE_float("poly_power", 1.0, "The power of poly decay.")
flags.DEFINE_integer("num_train_steps", 125000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 3125, "Number of warmup steps.")
flags.DEFINE_integer("start_warmup_step", 0, "The starting step of warmup.")
flags.DEFINE_integer("save_checkpoints_steps", 5000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("keep_checkpoint_max", 5,
"How many checkpoints to keep.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_bool("init_from_group0", False, "Whether to initialize"
"parameters of other groups from group 0")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_float(
"masked_lm_budget", 0,
"If >0, the ratio of masked ngrams to unmasked ngrams. Default 0,"
"for offline masking")
def model_fn_builder(albert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, optimizer, poly_power,
start_warmup_step):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
# Note: We keep this feature name `next_sentence_labels` to be compatible
# with the original data created by lanzhzh@. However, in the ALBERT case
# it does represent sentence_order_labels.
sentence_order_labels = features["next_sentence_labels"]
is_training = (mode == tf_estimator.ModeKeys.TRAIN)
model = modeling.AlbertModel(
config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
(masked_lm_loss, masked_lm_example_loss,
masked_lm_log_probs) = get_masked_lm_output(albert_config,
model.get_sequence_output(),
model.get_embedding_table(),
masked_lm_positions,
masked_lm_ids,
masked_lm_weights)
(sentence_order_loss, sentence_order_example_loss,
sentence_order_log_probs) = get_sentence_order_output(
albert_config, model.get_pooled_output(), sentence_order_labels)
total_loss = masked_lm_loss + sentence_order_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
tf.logging.info("number of hidden group %d to initialize",
albert_config.num_hidden_groups)
num_of_initialize_group = 1
if FLAGS.init_from_group0:
num_of_initialize_group = albert_config.num_hidden_groups
if albert_config.net_structure_type > 0:
num_of_initialize_group = albert_config.num_hidden_layers
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint, num_of_initialize_group)
if use_tpu:
def tpu_scaffold():
for gid in range(num_of_initialize_group):
tf.logging.info("initialize the %dth layer", gid)
tf.logging.info(assignment_map[gid])
tf.train.init_from_checkpoint(init_checkpoint, assignment_map[gid])
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
for gid in range(num_of_initialize_group):
tf.logging.info("initialize the %dth layer", gid)
tf.logging.info(assignment_map[gid])
tf.train.init_from_checkpoint(init_checkpoint, assignment_map[gid])
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf_estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
use_tpu, optimizer, poly_power, start_warmup_step)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf_estimator.ModeKeys.EVAL:
def metric_fn(*args):
"""Computes the loss and accuracy of the model."""
(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, sentence_order_example_loss,
sentence_order_log_probs, sentence_order_labels) = args[:7]
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
metrics = {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
}
sentence_order_log_probs = tf.reshape(
sentence_order_log_probs, [-1, sentence_order_log_probs.shape[-1]])
sentence_order_predictions = tf.argmax(
sentence_order_log_probs, axis=-1, output_type=tf.int32)
sentence_order_labels = tf.reshape(sentence_order_labels, [-1])
sentence_order_accuracy = tf.metrics.accuracy(
labels=sentence_order_labels,
predictions=sentence_order_predictions)
sentence_order_mean_loss = tf.metrics.mean(
values=sentence_order_example_loss)
metrics.update({
"sentence_order_accuracy": sentence_order_accuracy,
"sentence_order_loss": sentence_order_mean_loss
})
return metrics
metric_values = [
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, sentence_order_example_loss,
sentence_order_log_probs, sentence_order_labels
]
eval_metrics = (metric_fn, metric_values)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(albert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=albert_config.embedding_size,
activation=modeling.get_activation(albert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
albert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[albert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=albert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_sentence_order_output(albert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, albert_config.hidden_size],
initializer=modeling.create_initializer(
albert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(input_files,
max_seq_length,
max_predictions_per_seq,
is_training,
num_cpu_threads=4):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
# Note: We keep this feature name `next_sentence_labels` to be
# compatible with the original data created by lanzhzh@. However, in
# the ALBERT case it does represent sentence_order_labels.
"next_sentence_labels": tf.FixedLenFeature([1], tf.int64),
}
if FLAGS.masked_lm_budget:
name_to_features.update({
"token_boundary":
tf.FixedLenFeature([max_seq_length], tf.int64)})
else:
name_to_features.update({
"masked_lm_positions":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32)})
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.data.experimental.map_and_batch_with_legacy_function(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True))
tf.logging.info(d)
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
albert_config = modeling.AlbertConfig.from_json_file(FLAGS.albert_config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=FLAGS.keep_checkpoint_max,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
albert_config=albert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
optimizer=FLAGS.optimizer,
poly_power=FLAGS.poly_power,
start_warmup_step=FLAGS.start_warmup_step)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
global_step = -1
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
writer = tf.gfile.GFile(output_eval_file, "w")
eval_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False)
best_perf = 0
key_name = "masked_lm_accuracy"
while global_step < FLAGS.num_train_steps:
if estimator.latest_checkpoint() is None:
tf.logging.info("No checkpoint found yet. Sleeping.")
time.sleep(1)
else:
result = estimator.evaluate(
input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
global_step = result["global_step"]
tf.logging.info("***** Eval results *****")
checkpoint_path = estimator.latest_checkpoint()
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if result[key_name] > best_perf:
best_perf = result[key_name]
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tgt_ckpt = checkpoint_path.rsplit(
"-", 1)[0] + "-best.{}".format(ext)
tf.logging.info("saving {} to {}".format(src_ckpt, tgt_ckpt))
tf.gfile.Copy(src_ckpt, tgt_ckpt, overwrite=True)
writer.write("saved {} to {}\n".format(src_ckpt, tgt_ckpt))
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("albert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import time
from albert import classifier_utils
from albert import fine_tuning_utils
from albert import modeling
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"albert_config_file", None,
"The config json file corresponding to the pre-trained ALBERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string(
"vocab_file", None,
"The vocabulary file that the ALBERT model was trained on.")
flags.DEFINE_string("spm_model_file", None,
"The model file for sentence piece tokenization.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string("cached_dir", None,
"Path to cached training and dev tfrecord file. "
"The file will be generated if not exist.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"albert_hub_module_handle", None,
"If set, the ALBERT hub module to use.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("train_step", 1000,
"Total number of training steps to perform.")
flags.DEFINE_integer(
"warmup_step", 0,
"number of steps to perform linear learning rate warmup for.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("keep_checkpoint_max", 5,
"How many checkpoints to keep.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string("optimizer", "adamw", "Optimizer to use")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_string(
"export_dir", None,
"The directory where the exported SavedModel will be stored.")
flags.DEFINE_float(
"threshold_to_export", float("nan"),
"The threshold value that should be used with the exported classifier. "
"When specified, the threshold will be attached to the exported "
"SavedModel, and served along with the predictions. Please use the "
"saved model cli ("
"https://www.tensorflow.org/guide/saved_model#details_of_the_savedmodel_command_line_interface"
") to view the output signature of the threshold.")
def _serving_input_receiver_fn():
"""Creates an input function for serving."""
seq_len = FLAGS.max_seq_length
serialized_example = tf.placeholder(
dtype=tf.string, shape=[None], name="serialized_example")
features = {
"input_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"input_mask": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"segment_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
}
feature_map = tf.parse_example(serialized_example, features=features)
feature_map["is_real_example"] = tf.constant(1, dtype=tf.int32)
feature_map["label_ids"] = tf.constant(0, dtype=tf.int32)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in feature_map.keys():
t = feature_map[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
feature_map[name] = t
return tf_estimator.export.ServingInputReceiver(
features=feature_map, receiver_tensors=serialized_example)
def _add_threshold_to_model_fn(model_fn, threshold):
"""Adds the classifier threshold to the given model_fn."""
def new_model_fn(features, labels, mode, params):
spec = model_fn(features, labels, mode, params)
threshold_tensor = tf.constant(threshold, dtype=tf.float32)
default_serving_export = spec.export_outputs[
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
default_serving_export.outputs["threshold"] = threshold_tensor
return spec
return new_model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": classifier_utils.ColaProcessor,
"mnli": classifier_utils.MnliProcessor,
"mismnli": classifier_utils.MisMnliProcessor,
"mrpc": classifier_utils.MrpcProcessor,
"rte": classifier_utils.RteProcessor,
"sst-2": classifier_utils.Sst2Processor,
"sts-b": classifier_utils.StsbProcessor,
"qqp": classifier_utils.QqpProcessor,
"qnli": classifier_utils.QnliProcessor,
"wnli": classifier_utils.WnliProcessor,
}
if not (FLAGS.do_train or FLAGS.do_eval or FLAGS.do_predict or
FLAGS.export_dir):
raise ValueError(
"At least one of `do_train`, `do_eval`, `do_predict' or `export_dir` "
"must be True.")
if not FLAGS.albert_config_file and not FLAGS.albert_hub_module_handle:
raise ValueError("At least one of `--albert_config_file` and "
"`--albert_hub_module_handle` must be set")
if FLAGS.albert_config_file:
albert_config = modeling.AlbertConfig.from_json_file(
FLAGS.albert_config_file)
if FLAGS.max_seq_length > albert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the ALBERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, albert_config.max_position_embeddings))
else:
albert_config = None # Get the config from TF-Hub.
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name](
use_spm=True if FLAGS.spm_model_file else False,
do_lower_case=FLAGS.do_lower_case)
label_list = processor.get_labels()
tokenizer = fine_tuning_utils.create_vocab(
vocab_file=FLAGS.vocab_file,
do_lower_case=FLAGS.do_lower_case,
spm_model_file=FLAGS.spm_model_file,
hub_module=FLAGS.albert_hub_module_handle)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
if FLAGS.do_train:
iterations_per_loop = int(min(FLAGS.iterations_per_loop,
FLAGS.save_checkpoints_steps))
else:
iterations_per_loop = FLAGS.iterations_per_loop
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=int(FLAGS.save_checkpoints_steps),
keep_checkpoint_max=0,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
model_fn = classifier_utils.model_fn_builder(
albert_config=albert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.train_step,
num_warmup_steps=FLAGS.warmup_step,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
task_name=task_name,
hub_module=FLAGS.albert_hub_module_handle,
optimizer=FLAGS.optimizer)
if not math.isnan(FLAGS.threshold_to_export):
model_fn = _add_threshold_to_model_fn(model_fn, FLAGS.threshold_to_export)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size,
export_to_tpu=False) # http://yaqs/4707241341091840
if FLAGS.do_train:
cached_dir = FLAGS.cached_dir
if not cached_dir:
cached_dir = FLAGS.output_dir
train_file = os.path.join(cached_dir, task_name + "_train.tf_record")
if not tf.gfile.Exists(train_file):
classifier_utils.file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer,
train_file, task_name)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", FLAGS.train_step)
train_input_fn = classifier_utils.file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
task_name=task_name,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.train_batch_size)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_step)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(classifier_utils.PaddingInputExample())
cached_dir = FLAGS.cached_dir
if not cached_dir:
cached_dir = FLAGS.output_dir
eval_file = os.path.join(cached_dir, task_name + "_eval.tf_record")
if not tf.gfile.Exists(eval_file):
classifier_utils.file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer,
eval_file, task_name)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = classifier_utils.file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder,
task_name=task_name,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.eval_batch_size)
best_trial_info_file = os.path.join(FLAGS.output_dir, "best_trial.txt")
def _best_trial_info():
"""Returns information about which checkpoints have been evaled so far."""
if tf.gfile.Exists(best_trial_info_file):
with tf.gfile.GFile(best_trial_info_file, "r") as best_info:
global_step, best_metric_global_step, metric_value = (
best_info.read().split(":"))
global_step = int(global_step)
best_metric_global_step = int(best_metric_global_step)
metric_value = float(metric_value)
else:
metric_value = -1
best_metric_global_step = -1
global_step = -1
tf.logging.info(
"Best trial info: Step: %s, Best Value Step: %s, "
"Best Value: %s", global_step, best_metric_global_step, metric_value)
return global_step, best_metric_global_step, metric_value
def _remove_checkpoint(checkpoint_path):
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tf.logging.info("removing {}".format(src_ckpt))
tf.gfile.Remove(src_ckpt)
def _find_valid_cands(curr_step):
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
candidates = []
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
idx = ckpt_name.split("-")[-1]
if int(idx) > curr_step:
candidates.append(filename)
return candidates
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
if task_name == "sts-b":
key_name = "pearson"
elif task_name == "cola":
key_name = "matthew_corr"
else:
key_name = "eval_accuracy"
global_step, best_perf_global_step, best_perf = _best_trial_info()
writer = tf.gfile.GFile(output_eval_file, "w")
while global_step < FLAGS.train_step:
steps_and_files = {}
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
cur_filename = os.path.join(FLAGS.output_dir, ckpt_name)
if cur_filename.split("-")[-1] == "best":
continue
gstep = int(cur_filename.split("-")[-1])
if gstep not in steps_and_files:
tf.logging.info("Add {} to eval list.".format(cur_filename))
steps_and_files[gstep] = cur_filename
tf.logging.info("found {} files.".format(len(steps_and_files)))
if not steps_and_files:
tf.logging.info("found 0 file, global step: {}. Sleeping."
.format(global_step))
time.sleep(60)
else:
for checkpoint in sorted(steps_and_files.items()):
step, checkpoint_path = checkpoint
if global_step >= step:
if (best_perf_global_step != step and
len(_find_valid_cands(step)) > 1):
_remove_checkpoint(checkpoint_path)
continue
result = estimator.evaluate(
input_fn=eval_input_fn,
steps=eval_steps,
checkpoint_path=checkpoint_path)
global_step = result["global_step"]
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
writer.write("best = {}\n".format(best_perf))
if result[key_name] > best_perf:
best_perf = result[key_name]
best_perf_global_step = global_step
elif len(_find_valid_cands(global_step)) > 1:
_remove_checkpoint(checkpoint_path)
writer.write("=" * 50 + "\n")
writer.flush()
with tf.gfile.GFile(best_trial_info_file, "w") as best_info:
best_info.write("{}:{}:{}".format(
global_step, best_perf_global_step, best_perf))
writer.close()
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = "model.ckpt-{}.{}".format(best_perf_global_step, ext)
tgt_ckpt = "model.ckpt-best.{}".format(ext)
tf.logging.info("saving {} to {}".format(src_ckpt, tgt_ckpt))
tf.io.gfile.rename(
os.path.join(FLAGS.output_dir, src_ckpt),
os.path.join(FLAGS.output_dir, tgt_ckpt),
overwrite=True)
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(classifier_utils.PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
classifier_utils.file_based_convert_examples_to_features(
predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file, task_name)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = classifier_utils.file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder,
task_name=task_name,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.predict_batch_size)
checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
result = estimator.predict(
input_fn=predict_input_fn,
checkpoint_path=checkpoint_path)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
output_submit_file = os.path.join(FLAGS.output_dir, "submit_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as pred_writer,\
tf.gfile.GFile(output_submit_file, "w") as sub_writer:
sub_writer.write("index" + "\t" + "prediction\n")
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, (example, prediction)) in\
enumerate(zip(predict_examples, result)):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
pred_writer.write(output_line)
if task_name != "sts-b":
actual_label = label_list[int(prediction["predictions"])]
else:
actual_label = str(prediction["predictions"])
sub_writer.write(example.guid + "\t" + actual_label + "\n")
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if FLAGS.export_dir:
tf.gfile.MakeDirs(FLAGS.export_dir)
checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
tf.logging.info("Starting to export model.")
subfolder = estimator.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=_serving_input_receiver_fn,
checkpoint_path=checkpoint_path)
tf.logging.info("Model exported to %s.", subfolder)
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("spm_model_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper library for ALBERT fine-tuning.
This library can be used to construct ALBERT models for fine-tuning, either from
json config files or from TF-Hub modules.
"""
from albert import modeling
from albert import tokenization
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
def _create_model_from_hub(hub_module, is_training, input_ids, input_mask,
segment_ids):
"""Creates an ALBERT model from TF-Hub."""
tags = set()
if is_training:
tags.add("train")
albert_module = hub.Module(hub_module, tags=tags, trainable=True)
albert_inputs = dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
albert_outputs = albert_module(
inputs=albert_inputs,
signature="tokens",
as_dict=True)
return (albert_outputs["pooled_output"], albert_outputs["sequence_output"])
def _create_model_from_scratch(albert_config, is_training, input_ids,
input_mask, segment_ids, use_one_hot_embeddings,
use_einsum):
"""Creates an ALBERT model from scratch/config."""
model = modeling.AlbertModel(
config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
use_einsum=use_einsum)
return (model.get_pooled_output(), model.get_sequence_output())
def create_albert(albert_config, is_training, input_ids, input_mask,
segment_ids, use_one_hot_embeddings, use_einsum, hub_module):
"""Creates an ALBERT, either from TF-Hub or from scratch."""
if hub_module:
tf.logging.info("creating model from hub_module: %s", hub_module)
return _create_model_from_hub(hub_module, is_training, input_ids,
input_mask, segment_ids)
else:
tf.logging.info("creating model from albert_config")
return _create_model_from_scratch(albert_config, is_training, input_ids,
input_mask, segment_ids,
use_one_hot_embeddings, use_einsum)
def create_vocab(vocab_file, do_lower_case, spm_model_file, hub_module):
"""Creates a vocab, either from vocab file or from a TF-Hub module."""
if hub_module:
use_spm = True if spm_model_file else False
return tokenization.FullTokenizer.from_hub_module(
hub_module=hub_module, use_spm=use_spm)
else:
return tokenization.FullTokenizer.from_scratch(
vocab_file=vocab_file, do_lower_case=do_lower_case,
spm_model_file=spm_model_file)
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ALBERT finetuning runner with sentence piece tokenization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from albert import classifier_utils
from albert import fine_tuning_utils
from albert import modeling
from albert import race_utils
import tensorflow.compat.v1 as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"albert_config_file", None,
"The config json file corresponding to the pre-trained ALBERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", "race", "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the ALBERT model was trained on.")
flags.DEFINE_string("train_file", None,
"path to preprocessed tfrecord file. "
"The file will be generated if not exst.")
flags.DEFINE_string("eval_file", None,
"path to preprocessed tfrecord file. "
"The file will be generated if not exst.")
flags.DEFINE_string("predict_file", None,
"path to preprocessed tfrecord file. "
"The file will be generated if not exst.")
flags.DEFINE_string("spm_model_file", None,
"The model file for sentence piece tokenization.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained ALBERT model).")
flags.DEFINE_string(
"albert_hub_module_handle", None,
"If set, the ALBERT hub module to use.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_float("dropout_prob", 0.1, "dropout probability.")
flags.DEFINE_integer(
"max_seq_length", 512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"max_qa_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"num_keep_checkpoint", 5,
"maximum number of keep checkpoints")
flags.DEFINE_bool(
"high_only", False,
"Whether to only run the model on the high school set.")
flags.DEFINE_bool(
"middle_only", False,
"Whether to only run the model on the middle school set.")
flags.DEFINE_bool("do_train", True, "Whether to run training.")
flags.DEFINE_bool("do_eval", True, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 1e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("train_step", 12000,
"Total number of training epochs to perform.")
flags.DEFINE_integer(
"warmup_step", 1000,
"number of steps to perform linear learning rate warmup for.")
flags.DEFINE_integer("save_checkpoints_steps", 100,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"race": race_utils.RaceProcessor
}
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
albert_config = modeling.AlbertConfig.from_json_file(FLAGS.albert_config_file)
if FLAGS.max_seq_length > albert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the ALBERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, albert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name](
use_spm=True if FLAGS.spm_model_file else False,
do_lower_case=FLAGS.do_lower_case,
high_only=FLAGS.high_only,
middle_only=FLAGS.middle_only)
label_list = processor.get_labels()
tokenizer = fine_tuning_utils.create_vocab(
vocab_file=FLAGS.vocab_file,
do_lower_case=FLAGS.do_lower_case,
spm_model_file=FLAGS.spm_model_file,
hub_module=FLAGS.albert_hub_module_handle)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
if FLAGS.do_train:
iterations_per_loop = int(min(FLAGS.iterations_per_loop,
FLAGS.save_checkpoints_steps))
else:
iterations_per_loop = FLAGS.iterations_per_loop
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=int(FLAGS.save_checkpoints_steps),
keep_checkpoint_max=0,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
model_fn = race_utils.model_fn_builder(
albert_config=albert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.train_step,
num_warmup_steps=FLAGS.warmup_step,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
max_seq_length=FLAGS.max_seq_length,
dropout_prob=FLAGS.dropout_prob,
hub_module=FLAGS.albert_hub_module_handle)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
if not tf.gfile.Exists(FLAGS.train_file):
race_utils.file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer,
FLAGS.train_file, FLAGS.max_qa_length)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", FLAGS.train_step)
train_input_fn = classifier_utils.file_based_input_fn_builder(
input_file=FLAGS.train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
task_name=task_name,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.train_batch_size,
multiple=len(label_list))
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_step)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(classifier_utils.PaddingInputExample())
if not tf.gfile.Exists(FLAGS.eval_file):
race_utils.file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer,
FLAGS.eval_file, FLAGS.max_qa_length)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = classifier_utils.file_based_input_fn_builder(
input_file=FLAGS.eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder,
task_name=task_name,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.eval_batch_size,
multiple=len(label_list))
def _find_valid_cands(curr_step):
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
candidates = []
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
idx = ckpt_name.split("-")[-1]
if idx != "best" and int(idx) > curr_step:
candidates.append(filename)
return candidates
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
key_name = "eval_accuracy"
if tf.gfile.Exists(checkpoint_path + ".index"):
result = estimator.evaluate(
input_fn=eval_input_fn,
steps=eval_steps,
checkpoint_path=checkpoint_path)
best_perf = result[key_name]
global_step = result["global_step"]
else:
global_step = -1
best_perf = -1
checkpoint_path = None
writer = tf.gfile.GFile(output_eval_file, "w")
while global_step < FLAGS.train_step:
steps_and_files = {}
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
cur_filename = os.path.join(FLAGS.output_dir, ckpt_name)
if cur_filename.split("-")[-1] == "best":
continue
gstep = int(cur_filename.split("-")[-1])
if gstep not in steps_and_files:
tf.logging.info("Add {} to eval list.".format(cur_filename))
steps_and_files[gstep] = cur_filename
tf.logging.info("found {} files.".format(len(steps_and_files)))
# steps_and_files = sorted(steps_and_files, key=lambda x: x[0])
if not steps_and_files:
tf.logging.info("found 0 file, global step: {}. Sleeping."
.format(global_step))
time.sleep(1)
else:
for ele in sorted(steps_and_files.items()):
step, checkpoint_path = ele
if global_step >= step:
if len(_find_valid_cands(step)) > 1:
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tf.logging.info("removing {}".format(src_ckpt))
tf.gfile.Remove(src_ckpt)
continue
result = estimator.evaluate(
input_fn=eval_input_fn,
steps=eval_steps,
checkpoint_path=checkpoint_path)
global_step = result["global_step"]
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
writer.write("best = {}\n".format(best_perf))
if result[key_name] > best_perf:
best_perf = result[key_name]
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tgt_ckpt = checkpoint_path.rsplit("-", 1)[0] + "-best.{}".format(ext)
tf.logging.info("saving {} to {}".format(src_ckpt, tgt_ckpt))
tf.gfile.Copy(src_ckpt, tgt_ckpt, overwrite=True)
writer.write("saved {} to {}\n".format(src_ckpt, tgt_ckpt))
if len(_find_valid_cands(global_step)) > 1:
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tf.logging.info("removing {}".format(src_ckpt))
tf.gfile.Remove(src_ckpt)
writer.write("=" * 50 + "\n")
writer.close()
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(classifier_utils.PaddingInputExample())
assert len(predict_examples) % FLAGS.predict_batch_size == 0
predict_steps = int(len(predict_examples) // FLAGS.predict_batch_size)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
race_utils.file_based_convert_examples_to_features(
predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file, FLAGS.max_qa_length)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = classifier_utils.file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder,
task_name=task_name,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.predict_batch_size,
multiple=len(label_list))
checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
result = estimator.evaluate(
input_fn=predict_input_fn,
steps=predict_steps,
checkpoint_path=checkpoint_path)
output_predict_file = os.path.join(FLAGS.output_dir, "predict_results.txt")
with tf.gfile.GFile(output_predict_file, "w") as pred_writer:
# num_written_lines = 0
tf.logging.info("***** Predict results *****")
pred_writer.write("***** Predict results *****\n")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
pred_writer.write("%s = %s\n" % (key, str(result[key])))
pred_writer.write("best = {}\n".format(best_perf))
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("spm_model_file")
flags.mark_flag_as_required("albert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import six
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
# pylint: enable=g-direct-tensorflow-import
class LAMBOptimizer(tf.train.Optimizer):
"""LAMB (Layer-wise Adaptive Moments optimizer for Batch training)."""
# A new optimizer that includes correct L2 weight decay, adaptive
# element-wise updating, and layer-wise justification. The LAMB optimizer
# was proposed by Yang You, Jing Li, Jonathan Hseu, Xiaodan Song,
# James Demmel, and Cho-Jui Hsieh in a paper titled as Reducing BERT
# Pre-Training Time from 3 Days to 76 Minutes (arxiv.org/abs/1904.00962)
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
exclude_from_layer_adaptation=None,
name="LAMBOptimizer"):
"""Constructs a LAMBOptimizer."""
super(LAMBOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
# exclude_from_layer_adaptation is set to exclude_from_weight_decay if the
# arg is None.
# TODO(jingli): validate if exclude_from_layer_adaptation is necessary.
if exclude_from_layer_adaptation:
self.exclude_from_layer_adaptation = exclude_from_layer_adaptation
else:
self.exclude_from_layer_adaptation = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=six.ensure_str(param_name) + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=six.ensure_str(param_name) + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
ratio = 1.0
if self._do_layer_adaptation(param_name):
w_norm = linalg_ops.norm(param, ord=2)
g_norm = linalg_ops.norm(update, ord=2)
ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(
math_ops.greater(g_norm, 0), (w_norm / g_norm), 1.0), 1.0)
update_with_lr = ratio * self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _do_layer_adaptation(self, param_name):
"""Whether to do layer-wise learning rate adaptation for `param_name`."""
if self.exclude_from_layer_adaptation:
for r in self.exclude_from_layer_adaptation:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", six.ensure_str(param_name))
if m is not None:
param_name = m.group(1)
return param_name
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
"""Create masked LM/next sentence masked_lm TF examples for ALBERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
from albert import tokenization
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None,
"Input raw text file (or comma-separated list of files).")
flags.DEFINE_string(
"output_file", None,
"Output TF example file (or comma-separated list of files).")
flags.DEFINE_string(
"vocab_file", None,
"The vocabulary file that the ALBERT model was trained on.")
flags.DEFINE_string("spm_model_file", None,
"The model file for sentence piece tokenization.")
flags.DEFINE_string("input_file_mode", "r",
"The data format of the input file.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool(
"do_whole_word_mask", True,
"Whether to use whole word masking rather than per-WordPiece masking.")
flags.DEFINE_bool(
"do_permutation", False,
"Whether to do the permutation training.")
flags.DEFINE_bool(
"favor_shorter_ngram", True,
"Whether to set higher probabilities for sampling shorter ngrams.")
flags.DEFINE_bool(
"random_next_sentence", False,
"Whether to use the sentence that's right before the current sentence "
"as the negative sample for next sentence prection, rather than using "
"sentences from other random documents.")
flags.DEFINE_integer("max_seq_length", 512, "Maximum sequence length.")
flags.DEFINE_integer("ngram", 3, "Maximum number of ngrams to mask.")
flags.DEFINE_integer("max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence.")
flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
flags.DEFINE_integer(
"dupe_factor", 40,
"Number of times to duplicate the input data (with different masks).")
flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
flags.DEFINE_float(
"short_seq_prob", 0.1,
"Probability of creating sequences which are shorter than the "
"maximum length.")
class TrainingInstance(object):
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
is_random_next, token_boundary):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.token_boundary = token_boundary
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
def __str__(self):
s = ""
s += "tokens: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens]))
s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
s += "token_boundary: %s\n" % (" ".join(
[str(x) for x in self.token_boundary]))
s += "is_random_next: %s\n" % self.is_random_next
s += "masked_lm_positions: %s\n" % (" ".join(
[str(x) for x in self.masked_lm_positions]))
s += "masked_lm_labels: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.masked_lm_labels]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def write_instance_to_example_files(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_files):
"""Create TF example files from `TrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
token_boundary = list(instance.token_boundary)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
token_boundary.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
multiplier = 1 + int(FLAGS.do_permutation)
while len(masked_lm_positions) < max_predictions_per_seq * multiplier:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
sentence_order_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["segment_ids"] = create_int_feature(segment_ids)
features["token_boundary"] = create_int_feature(token_boundary)
features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
# Note: We keep this feature name `next_sentence_labels` to be compatible
# with the original data created by lanzhzh@. However, in the ALBERT case
# it does contain sentence_order_label.
features["next_sentence_labels"] = create_int_feature(
[sentence_order_label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info("Wrote %d total instances", total_written)
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
with tf.gfile.GFile(input_file, FLAGS.input_file_mode) as reader:
while True:
line = reader.readline()
if not FLAGS.spm_model_file:
line = tokenization.convert_to_unicode(line)
if not line:
break
if FLAGS.spm_model_file:
line = tokenization.preprocess_text(line, lower=FLAGS.do_lower_case)
else:
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or \
(FLAGS.random_next_sentence and rng.random() < 0.5):
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
elif not FLAGS.random_next_sentence and rng.random() < 0.5:
is_random_next = True
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
# Note(mingdachen): in this case, we just swap tokens_a and tokens_b
tokens_a, tokens_b = tokens_b, tokens_a
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
(tokens, masked_lm_positions,
masked_lm_labels, token_boundary) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
token_boundary=token_boundary,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def _is_start_piece_sp(piece):
"""Check if the current word piece is the starting piece (sentence piece)."""
special_pieces = set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~'))
special_pieces.add(u"€".encode("utf-8"))
special_pieces.add(u"£".encode("utf-8"))
# Note(mingdachen):
# For foreign characters, we always treat them as a whole piece.
english_chars = set(list("abcdefghijklmnopqrstuvwxyz"))
if (six.ensure_str(piece).startswith("▁") or
six.ensure_str(piece).startswith("<") or piece in special_pieces or
not all([i.lower() in english_chars.union(special_pieces)
for i in piece])):
return True
else:
return False
def _is_start_piece_bert(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not six.ensure_str(piece).startswith("##")
def is_start_piece(piece):
if FLAGS.spm_model_file:
return _is_start_piece_sp(piece)
else:
return _is_start_piece_bert(piece)
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
# Note(mingdachen): We create a list for recording if the piece is
# the starting piece of current token, where 1 means true, so that
# on-the-fly whole word masking is possible.
token_boundary = [0] * len(tokens)
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and
not is_start_piece(token)):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
if is_start_piece(token):
token_boundary[i] = 1
output_tokens = list(tokens)
masked_lm_positions = []
masked_lm_labels = []
if masked_lm_prob == 0:
return (output_tokens, masked_lm_positions,
masked_lm_labels, token_boundary)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
# Note(mingdachen):
# By default, we set the probilities to favor shorter ngram sequences.
ngrams = np.arange(1, FLAGS.ngram + 1, dtype=np.int64)
pvals = 1. / np.arange(1, FLAGS.ngram + 1)
pvals /= pvals.sum(keepdims=True)
if not FLAGS.favor_shorter_ngram:
pvals = pvals[::-1]
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx:idx+n])
ngram_indexes.append(ngram_index)
rng.shuffle(ngram_indexes)
masked_lms = []
covered_indexes = set()
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes:
continue
n = np.random.choice(ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# Note(mingdachen):
# Repeatedly looking for a candidate that does not exceed the
# maximum number of predictions by trying shorter ngrams.
while len(masked_lms) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
assert len(masked_lms) <= num_to_predict
rng.shuffle(ngram_indexes)
select_indexes = set()
if FLAGS.do_permutation:
for cand_index_set in ngram_indexes:
if len(select_indexes) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes or index in select_indexes:
continue
n = np.random.choice(ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
index_set = sum(cand_index_set[n - 1], [])
n -= 1
while len(select_indexes) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(select_indexes) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes or index in select_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
select_indexes.add(index)
assert len(select_indexes) <= num_to_predict
select_indexes = sorted(select_indexes)
permute_indexes = list(select_indexes)
rng.shuffle(permute_indexes)
orig_token = list(output_tokens)
for src_i, tgt_i in zip(select_indexes, permute_indexes):
output_tokens[src_i] = orig_token[tgt_i]
masked_lms.append(MaskedLmInstance(index=src_i, label=orig_token[src_i]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case,
spm_model_file=FLAGS.spm_model_file)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Reading from input files ***")
for input_file in input_files:
tf.logging.info(" %s", input_file)
rng = random.Random(FLAGS.random_seed)
instances = create_training_instances(
input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,
FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,
rng)
tf.logging.info("number of instances: %i", len(instances))
output_files = FLAGS.output_file.split(",")
tf.logging.info("*** Writing to output files ***")
for output_file in output_files:
tf.logging.info(" %s", output_file)
write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,
FLAGS.max_predictions_per_seq, output_files)
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_file")
flags.mark_flag_as_required("vocab_file")
tf.app.run()
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run ALBERT on SQuAD v2.0 using sentence piece tokenization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
import time
from albert import fine_tuning_utils
from albert import modeling
from albert import squad_utils
import six
import tensorflow.compat.v1 as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
# pylint: disable=g-import-not-at-top
if six.PY2:
import six.moves.cPickle as pickle
else:
import pickle
# pylint: enable=g-import-not-at-top
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"albert_config_file", None,
"The config json file corresponding to the pre-trained ALBERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the ALBERT model was trained on.")
flags.DEFINE_string("spm_model_file", None,
"The model file for sentence piece tokenization.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string("train_feature_file", None,
"training feature file.")
flags.DEFINE_string(
"predict_feature_file", None,
"Location of predict features. If it doesn't exist, it will be written. "
"If it does exist, it will be read.")
flags.DEFINE_string(
"predict_feature_left_file", None,
"Location of predict features not passed to TPU. If it doesn't exist, it "
"will be written. If it does exist, it will be read.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"albert_hub_module_handle", None,
"If set, the ALBERT hub module to use.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_integer("start_n_top", 5, "beam size for the start positions.")
flags.DEFINE_integer("end_n_top", 5, "beam size for the end positions.")
flags.DEFINE_float("dropout_prob", 0.1, "dropout probability.")
def validate_flags_or_throw(albert_config):
"""Validate the input FLAGS or throw an exception."""
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if not FLAGS.predict_feature_file:
raise ValueError(
"If `do_predict` is True, then `predict_feature_file` must be "
"specified.")
if not FLAGS.predict_feature_left_file:
raise ValueError(
"If `do_predict` is True, then `predict_feature_left_file` must be "
"specified.")
if FLAGS.max_seq_length > albert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the ALBERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, albert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
albert_config = modeling.AlbertConfig.from_json_file(FLAGS.albert_config_file)
validate_flags_or_throw(albert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tokenizer = fine_tuning_utils.create_vocab(
vocab_file=FLAGS.vocab_file,
do_lower_case=FLAGS.do_lower_case,
spm_model_file=FLAGS.spm_model_file,
hub_module=FLAGS.albert_hub_module_handle)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
if FLAGS.do_train:
iterations_per_loop = int(min(FLAGS.iterations_per_loop,
FLAGS.save_checkpoints_steps))
else:
iterations_per_loop = FLAGS.iterations_per_loop
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
keep_checkpoint_max=0,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
train_examples = squad_utils.read_squad_examples(
input_file=FLAGS.train_file, is_training=True)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
if FLAGS.do_train:
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(12345)
rng.shuffle(train_examples)
model_fn = squad_utils.v2_model_fn_builder(
albert_config=albert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
max_seq_length=FLAGS.max_seq_length,
start_n_top=FLAGS.start_n_top,
end_n_top=FLAGS.end_n_top,
dropout_prob=FLAGS.dropout_prob,
hub_module=FLAGS.albert_hub_module_handle)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
if not tf.gfile.Exists(FLAGS.train_feature_file):
train_writer = squad_utils.FeatureWriter(
filename=os.path.join(FLAGS.train_feature_file), is_training=True)
squad_utils.convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature,
do_lower_case=FLAGS.do_lower_case)
train_writer.close()
tf.logging.info("***** Running training *****")
tf.logging.info(" Num orig examples = %d", len(train_examples))
# tf.logging.info(" Num split examples = %d", train_writer.num_features)
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
del train_examples
train_input_fn = squad_utils.input_fn_builder(
input_file=FLAGS.train_feature_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.train_batch_size,
is_v2=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_predict:
with tf.gfile.Open(FLAGS.predict_file) as predict_file:
prediction_json = json.load(predict_file)["data"]
eval_examples = squad_utils.read_squad_examples(
input_file=FLAGS.predict_file, is_training=False)
if (tf.gfile.Exists(FLAGS.predict_feature_file) and tf.gfile.Exists(
FLAGS.predict_feature_left_file)):
tf.logging.info("Loading eval features from {}".format(
FLAGS.predict_feature_left_file))
with tf.gfile.Open(FLAGS.predict_feature_left_file, "rb") as fin:
eval_features = pickle.load(fin)
else:
eval_writer = squad_utils.FeatureWriter(
filename=FLAGS.predict_feature_file, is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
squad_utils.convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature,
do_lower_case=FLAGS.do_lower_case)
eval_writer.close()
with tf.gfile.Open(FLAGS.predict_feature_left_file, "wb") as fout:
pickle.dump(eval_features, fout)
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_input_fn = squad_utils.input_fn_builder(
input_file=FLAGS.predict_feature_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False,
use_tpu=FLAGS.use_tpu,
bsz=FLAGS.predict_batch_size,
is_v2=True)
def get_result(checkpoint):
"""Evaluate the checkpoint on SQuAD v2.0."""
# If running eval on the TPU, you will need to specify the number of
# steps.
reader = tf.train.NewCheckpointReader(checkpoint)
global_step = reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
all_results = []
for result in estimator.predict(
predict_input_fn, yield_single_examples=True,
checkpoint_path=checkpoint):
if len(all_results) % 1000 == 0:
tf.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_top_log_probs = (
[float(x) for x in result["start_top_log_probs"].flat])
start_top_index = [int(x) for x in result["start_top_index"].flat]
end_top_log_probs = (
[float(x) for x in result["end_top_log_probs"].flat])
end_top_index = [int(x) for x in result["end_top_index"].flat]
cls_logits = float(result["cls_logits"].flat[0])
all_results.append(
squad_utils.RawResultV2(
unique_id=unique_id,
start_top_log_probs=start_top_log_probs,
start_top_index=start_top_index,
end_top_log_probs=end_top_log_probs,
end_top_index=end_top_index,
cls_logits=cls_logits))
output_prediction_file = os.path.join(
FLAGS.output_dir, "predictions.json")
output_nbest_file = os.path.join(
FLAGS.output_dir, "nbest_predictions.json")
output_null_log_odds_file = os.path.join(
FLAGS.output_dir, "null_odds.json")
result_dict = {}
cls_dict = {}
squad_utils.accumulate_predictions_v2(
result_dict, cls_dict, eval_examples, eval_features,
all_results, FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.start_n_top, FLAGS.end_n_top)
return squad_utils.evaluate_v2(
result_dict, cls_dict, prediction_json, eval_examples,
eval_features, all_results, FLAGS.n_best_size,
FLAGS.max_answer_length, output_prediction_file, output_nbest_file,
output_null_log_odds_file), int(global_step)
def _find_valid_cands(curr_step):
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
candidates = []
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
idx = ckpt_name.split("-")[-1]
if idx != "best" and int(idx) > curr_step:
candidates.append(filename)
return candidates
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
key_name = "f1"
writer = tf.gfile.GFile(output_eval_file, "w")
if tf.gfile.Exists(checkpoint_path + ".index"):
result = get_result(checkpoint_path)
best_perf = result[0][key_name]
global_step = result[1]
else:
global_step = -1
best_perf = -1
checkpoint_path = None
while global_step < num_train_steps:
steps_and_files = {}
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
cur_filename = os.path.join(FLAGS.output_dir, ckpt_name)
if cur_filename.split("-")[-1] == "best":
continue
gstep = int(cur_filename.split("-")[-1])
if gstep not in steps_and_files:
tf.logging.info("Add {} to eval list.".format(cur_filename))
steps_and_files[gstep] = cur_filename
tf.logging.info("found {} files.".format(len(steps_and_files)))
if not steps_and_files:
tf.logging.info("found 0 file, global step: {}. Sleeping."
.format(global_step))
time.sleep(60)
else:
for ele in sorted(steps_and_files.items()):
step, checkpoint_path = ele
if global_step >= step:
if len(_find_valid_cands(step)) > 1:
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tf.logging.info("removing {}".format(src_ckpt))
tf.gfile.Remove(src_ckpt)
continue
result, global_step = get_result(checkpoint_path)
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if result[key_name] > best_perf:
best_perf = result[key_name]
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tgt_ckpt = checkpoint_path.rsplit(
"-", 1)[0] + "-best.{}".format(ext)
tf.logging.info("saving {} to {}".format(src_ckpt, tgt_ckpt))
tf.gfile.Copy(src_ckpt, tgt_ckpt, overwrite=True)
writer.write("saved {} to {}\n".format(src_ckpt, tgt_ckpt))
writer.write("best {} = {}\n".format(key_name, best_perf))
tf.logging.info(" best {} = {}\n".format(key_name, best_perf))
if len(_find_valid_cands(global_step)) > 2:
for ext in ["meta", "data-00000-of-00001", "index"]:
src_ckpt = checkpoint_path + ".{}".format(ext)
tf.logging.info("removing {}".format(src_ckpt))
tf.gfile.Remove(src_ckpt)
writer.write("=" * 50 + "\n")
checkpoint_path = os.path.join(FLAGS.output_dir, "model.ckpt-best")
result, global_step = get_result(checkpoint_path)
tf.logging.info("***** Final Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
writer.write("best perf happened at step: {}".format(global_step))
if __name__ == "__main__":
flags.mark_flag_as_required("spm_model_file")
flags.mark_flag_as_required("albert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for GLUE classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
from albert import fine_tuning_utils
from albert import modeling
from albert import optimization
from albert import tokenization
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import metrics as contrib_metrics
from tensorflow.contrib import tpu as contrib_tpu
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
guid=None,
example_id=None,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.example_id = example_id
self.guid = guid
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self, use_spm, do_lower_case):
super(DataProcessor, self).__init__()
self.use_spm = use_spm
self.do_lower_case = do_lower_case
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
def process_text(self, text):
if self.use_spm:
return tokenization.preprocess_text(text, lower=self.do_lower_case)
else:
return tokenization.convert_to_unicode(text)
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "test_matched.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
# Note(mingdachen): We will rely on this guid for GLUE submission.
guid = self.process_text(line[0])
text_a = self.process_text(line[8])
text_b = self.process_text(line[9])
if set_type == "test":
label = "contradiction"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MisMnliProcessor(MnliProcessor):
"""Processor for the Mismatched MultiNLI data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "dev_mismatched.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "test_mismatched.tsv")),
"test")
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MRPC", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MRPC", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MRPC", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = self.process_text(line[3])
text_b = self.process_text(line[4])
if set_type == "test":
guid = line[0]
label = "0"
else:
label = self.process_text(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "CoLA", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "CoLA", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "CoLA", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
guid = line[0]
text_a = self.process_text(line[1])
label = "0"
else:
text_a = self.process_text(line[3])
label = self.process_text(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "SST-2", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "SST-2", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "SST-2", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if set_type != "test":
guid = "%s-%s" % (set_type, i)
text_a = self.process_text(line[0])
label = self.process_text(line[1])
else:
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[1])
label = "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "STS-B", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "STS-B", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "STS-B", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[7])
text_b = self.process_text(line[8])
if set_type != "test":
label = float(line[-1])
else:
label = 0
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QQP", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QQP", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QQP", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = line[0]
# guid = "%s-%s" % (set_type, line[0])
if set_type != "test":
try:
text_a = self.process_text(line[3])
text_b = self.process_text(line[4])
label = self.process_text(line[5])
except IndexError:
continue
else:
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
label = "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QNLI", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QNLI", "dev.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QNLI", "test.tsv")),
"test_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type == "test_matched":
label = "entailment"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "RTE", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "RTE", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "RTE", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type == "test":
label = "entailment"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "WNLI", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "WNLI", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "WNLI", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type != "test":
label = self.process_text(line[-1])
else:
label = "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class AXProcessor(DataProcessor):
"""Processor for the AX data set (GLUE version)."""
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "diagnostic", "diagnostic.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
# Note(mingdachen): We will rely on this guid for GLUE submission.
guid = self.process_text(line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type == "test":
label = "contradiction"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer, task_name):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
if task_name != "sts-b":
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in ALBERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if task_name != "sts-b":
label_id = label_map[example.label]
else:
label_id = example.label
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file, task_name):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer, task_name)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_float_feature([feature.label_id])\
if task_name == "sts-b" else create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder, task_name, use_tpu, bsz,
multiple=1):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
labeltype = tf.float32 if task_name == "sts-b" else tf.int64
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length * multiple], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length * multiple], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length * multiple], tf.int64),
"label_ids": tf.FixedLenFeature([], labeltype),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
if use_tpu:
batch_size = params["batch_size"]
else:
batch_size = bsz
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(albert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings, task_name,
hub_module):
"""Creates a classification model."""
(output_layer, _) = fine_tuning_utils.create_albert(
albert_config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
use_einsum=True,
hub_module=hub_module)
hidden_size = output_layer.shape[-1]
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
if task_name != "sts-b":
probabilities = tf.nn.softmax(logits, axis=-1)
predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
else:
probabilities = logits
logits = tf.squeeze(logits, [-1])
predictions = logits
per_example_loss = tf.square(logits - labels)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, probabilities, logits, predictions)
def model_fn_builder(albert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, task_name, hub_module=None,
optimizer="adamw"):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf_estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, probabilities, logits, predictions) = \
create_model(albert_config, is_training, input_ids, input_mask,
segment_ids, label_ids, num_labels, use_one_hot_embeddings,
task_name, hub_module)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf_estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
use_tpu, optimizer)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf_estimator.ModeKeys.EVAL:
if task_name not in ["sts-b", "cola"]:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions,
weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
elif task_name == "sts-b":
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Pearson correlations for STS-B."""
# Display labels and predictions
concat1 = contrib_metrics.streaming_concat(logits)
concat2 = contrib_metrics.streaming_concat(label_ids)
# Compute Pearson correlation
pearson = contrib_metrics.streaming_pearson_correlation(
logits, label_ids, weights=is_real_example)
# Compute MSE
# mse = tf.metrics.mean(per_example_loss)
mse = tf.metrics.mean_squared_error(
label_ids, logits, weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss,
weights=is_real_example)
return {"pred": concat1, "label_ids": concat2, "pearson": pearson,
"MSE": mse, "eval_loss": loss,}
elif task_name == "cola":
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Matthew's correlations for COLA."""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
tp, tp_op = tf.metrics.true_positives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
tn, tn_op = tf.metrics.true_negatives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
fp, fp_op = tf.metrics.false_positives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
fn, fn_op = tf.metrics.false_negatives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
# Compute Matthew's correlation
mcc = tf.div_no_nan(
tp * tn - fp * fn,
tf.pow((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn), 0.5))
# Compute accuracy
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions,
weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss,
weights=is_real_example)
return {"matthew_corr": (mcc, tf.group(tp_op, tn_op, fp_op, fn_op)),
"eval_accuracy": accuracy, "eval_loss": loss,}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
predictions={
"probabilities": probabilities,
"predictions": predictions
},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, task_name):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer, task_name)
features.append(feature)
return features
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Exports a minimal module for ALBERT models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from albert import modeling
import tensorflow.compat.v1 as tf
flags.DEFINE_string(
"albert_directory", None,
"The config json file corresponding to the pre-trained ALBERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"checkpoint_name", "model.ckpt-best",
"Name of the checkpoint under albert_directory to be exported.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_string("export_path", None, "Path to the output module.")
FLAGS = flags.FLAGS
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def get_mlm_logits(input_tensor, albert_config, mlm_positions, output_weights):
"""From run_pretraining.py."""
input_tensor = gather_indexes(input_tensor, mlm_positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=albert_config.embedding_size,
activation=modeling.get_activation(albert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
albert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[albert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(
input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
return logits
def get_sentence_order_logits(input_tensor, albert_config):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, albert_config.hidden_size],
initializer=modeling.create_initializer(
albert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
return logits
def build_model(sess):
"""Module function."""
input_ids = tf.placeholder(tf.int32, [None, None], "input_ids")
input_mask = tf.placeholder(tf.int32, [None, None], "input_mask")
segment_ids = tf.placeholder(tf.int32, [None, None], "segment_ids")
mlm_positions = tf.placeholder(tf.int32, [None, None], "mlm_positions")
albert_config_path = os.path.join(
FLAGS.albert_directory, "albert_config.json")
albert_config = modeling.AlbertConfig.from_json_file(albert_config_path)
model = modeling.AlbertModel(
config=albert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=False)
get_mlm_logits(model.get_sequence_output(), albert_config,
mlm_positions, model.get_embedding_table())
get_sentence_order_logits(model.get_pooled_output(), albert_config)
checkpoint_path = os.path.join(FLAGS.albert_directory, FLAGS.checkpoint_name)
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, checkpoint_path)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
tf.train.init_from_checkpoint(checkpoint_path, assignment_map)
init = tf.global_variables_initializer()
sess.run(init)
return sess
def main(_):
sess = tf.Session()
tf.train.get_or_create_global_step()
sess = build_model(sess)
my_vars = []
for var in tf.global_variables():
if "lamb_v" not in var.name and "lamb_m" not in var.name:
my_vars.append(var)
saver = tf.train.Saver(my_vars)
saver.save(sess, FLAGS.export_path)
if __name__ == "__main__":
flags.mark_flag_as_required("albert_directory")
flags.mark_flag_as_required("export_path")
app.run(main)
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main ALBERT model and related functions.
For a description of the algorithm, see https://arxiv.org/abs/1909.11942.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
class AlbertConfig(object):
"""Configuration for `AlbertModel`.
The default settings match the configuration of model `albert_xxlarge`.
"""
def __init__(self,
vocab_size,
embedding_size=128,
hidden_size=4096,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
down_scale_factor=1,
hidden_act="gelu",
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs AlbertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`.
embedding_size: size of voc embeddings.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_hidden_groups: Number of group for the hidden layers, parameters in
the same group are shared.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
inner_group_num: int, number of inner repetition of attention and ffn.
down_scale_factor: float, the scale to apply
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`AlbertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.down_scale_factor = down_scale_factor
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `AlbertConfig` from a Python dictionary of parameters."""
config = AlbertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `AlbertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class AlbertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted from strings into ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.AlbertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.AlbertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
use_einsum=True,
scope=None):
"""Constructor for AlbertModel.
Args:
config: `AlbertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
use_einsum: (optional) bool. Whether to use einsum or reshape+matmul for
dense layers
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.word_embedding_output,
self.output_embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.embedding_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.word_embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob,
use_one_hot_embeddings=use_one_hot_embeddings)
with tf.variable_scope("encoder"):
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=input_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_hidden_groups=config.num_hidden_groups,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
inner_group_num=config.inner_group_num,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True,
use_einsum=use_einsum)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_word_embedding_output(self):
"""Get output of the word(piece) embedding lookup.
This is BEFORE positional embeddings and token type embeddings have been
added.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size]
corresponding to the output of the word(piece) embedding layer.
"""
return self.word_embedding_output
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size]
corresponding to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.output_embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint, num_of_group=0):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
init_vars_name = [name for (name, _) in init_vars]
if num_of_group > 0:
assignment_map = []
for gid in range(num_of_group):
assignment_map.append(collections.OrderedDict())
else:
assignment_map = collections.OrderedDict()
for name in name_to_variable:
if name in init_vars_name:
tvar_name = name
elif (re.sub(r"/group_\d+/", "/group_0/",
six.ensure_str(name)) in init_vars_name and
num_of_group > 1):
tvar_name = re.sub(r"/group_\d+/", "/group_0/", six.ensure_str(name))
elif (re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name))
in init_vars_name and num_of_group > 1):
tvar_name = re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name))
elif (re.sub(r"/attention_\d+/", "/attention_1/", six.ensure_str(name))
in init_vars_name and num_of_group > 1):
tvar_name = re.sub(r"/attention_\d+/", "/attention_1/",
six.ensure_str(name))
else:
tf.logging.info("name %s does not get matched", name)
continue
tf.logging.info("name %s match to %s", name, tvar_name)
if num_of_group > 0:
group_matched = False
for gid in range(1, num_of_group):
if (("/group_" + str(gid) + "/" in name) or
("/ffn_" + str(gid) + "/" in name) or
("/attention_" + str(gid) + "/" in name)):
group_matched = True
tf.logging.info("%s belongs to %dth", name, gid)
assignment_map[gid][tvar_name] = name
if not group_matched:
assignment_map[0][tvar_name] = name
else:
assignment_map[tvar_name] = name
initialized_variable_names[name] = 1
initialized_variable_names[six.ensure_str(name) + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, rate=dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return contrib_layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_timing_signal_1d_given_position(channels,
position,
min_timescale=1.0,
max_timescale=1.0e4):
"""Get sinusoids of diff frequencies, with timing position given.
Adapted from add_timing_signal_1d_given_position in
//third_party/py/tensor2tensor/layers/common_attention.py
Args:
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
position: a Tensor with shape [batch, seq_len]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor of timing signals [batch, seq_len, channels]
"""
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = (
tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims(
tf.expand_dims(inv_timescales, 0), 0))
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])
return signal
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1,
use_one_hot_embeddings=True):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary, unless converting to tflite model.
if use_one_hot_embeddings:
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
else:
token_type_embeddings = tf.nn.embedding_lookup(token_type_table,
token_type_ids)
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def einsum_via_matmul(input_tensor, w, num_inner_dims):
"""Implements einsum via matmul and reshape ops.
Args:
input_tensor: float Tensor of shape [<batch_dims>, <inner_dims>].
w: float Tensor of shape [<inner_dims>, <outer_dims>].
num_inner_dims: int. number of dimensions to use for inner products.
Returns:
float Tensor of shape [<batch_dims>, <outer_dims>].
"""
input_shape = get_shape_list(input_tensor)
w_shape = get_shape_list(w)
batch_dims = input_shape[: -num_inner_dims]
inner_dims = input_shape[-num_inner_dims:]
outer_dims = w_shape[num_inner_dims:]
inner_dim = np.prod(inner_dims)
outer_dim = np.prod(outer_dims)
if num_inner_dims > 1:
input_tensor = tf.reshape(input_tensor, batch_dims + [inner_dim])
if len(w_shape) > 2:
w = tf.reshape(w, [inner_dim, outer_dim])
ret = tf.matmul(input_tensor, w)
if len(outer_dims) > 1:
ret = tf.reshape(ret, batch_dims + outer_dims)
return ret
def dense_layer_3d(input_tensor,
num_attention_heads,
head_size,
initializer,
activation,
use_einsum,
name=None):
"""A dense layer with 3D kernel.
Args:
input_tensor: float Tensor of shape [batch, seq_length, hidden_size].
num_attention_heads: Number of attention heads.
head_size: The size per attention head.
initializer: Kernel initializer.
activation: Actication function.
use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, num_attention_heads * head_size],
initializer=initializer)
w = tf.reshape(w, [hidden_size, num_attention_heads, head_size])
b = tf.get_variable(
name="bias",
shape=[num_attention_heads * head_size],
initializer=tf.zeros_initializer)
b = tf.reshape(b, [num_attention_heads, head_size])
if use_einsum:
ret = tf.einsum("BFH,HND->BFND", input_tensor, w)
else:
ret = einsum_via_matmul(input_tensor, w, 1)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_3d_proj(input_tensor,
hidden_size,
head_size,
initializer,
activation,
use_einsum,
name=None):
"""A dense layer with 3D kernel for projection.
Args:
input_tensor: float Tensor of shape [batch,from_seq_length,
num_attention_heads, size_per_head].
hidden_size: The size of hidden layer.
head_size: The size of head.
initializer: Kernel initializer.
activation: Actication function.
use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
num_attention_heads = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[num_attention_heads * head_size, hidden_size],
initializer=initializer)
w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])
b = tf.get_variable(
name="bias", shape=[hidden_size], initializer=tf.zeros_initializer)
if use_einsum:
ret = tf.einsum("BFND,NDH->BFH", input_tensor, w)
else:
ret = einsum_via_matmul(input_tensor, w, 2)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_2d(input_tensor,
output_size,
initializer,
activation,
use_einsum,
num_attention_heads=1,
name=None):
"""A dense layer with 2D kernel.
Args:
input_tensor: Float tensor with rank 3.
output_size: The size of output dimension.
initializer: Kernel initializer.
activation: Activation function.
use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers.
num_attention_heads: number of attention head in attention layer.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
del num_attention_heads # unused
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, output_size],
initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
if use_einsum:
ret = tf.einsum("BFH,HO->BFO", input_tensor, w)
else:
ret = tf.matmul(input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dot_product_attention(q, k, v, bias, dropout_rate=0.0):
"""Dot-product attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
dropout_rate: a float.
Returns:
Tensor with shape [..., length_q, depth_v].
"""
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
logits = tf.multiply(logits, 1.0 / math.sqrt(float(get_shape_list(q)[-1])))
if bias is not None:
# `attention_mask` = [B, T]
from_shape = get_shape_list(q)
if len(from_shape) == 4:
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], 1], tf.float32)
elif len(from_shape) == 5:
# from_shape = [B, N, Block_num, block_size, depth]#
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], from_shape[3],
1], tf.float32)
bias = tf.matmul(broadcast_ones,
tf.cast(bias, tf.float32), transpose_b=True)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - bias) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
logits += adder
else:
adder = 0.0
attention_probs = tf.nn.softmax(logits, name="attention_probs")
attention_probs = dropout(attention_probs, dropout_rate)
return tf.matmul(attention_probs, v)
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
batch_size=None,
from_seq_length=None,
to_seq_length=None,
use_einsum=True):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
The values should be 1 or 0. The attention scores will effectively
be set to -infinity for any positions in the mask that are 0, and
will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
size_per_head = int(from_shape[2]/num_attention_heads)
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_layer` = [B, F, N, H]
q = dense_layer_3d(from_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), query_act,
use_einsum, "query")
# `key_layer` = [B, T, N, H]
k = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), key_act,
use_einsum, "key")
# `value_layer` = [B, T, N, H]
v = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), value_act,
use_einsum, "value")
q = tf.transpose(q, [0, 2, 1, 3])
k = tf.transpose(k, [0, 2, 1, 3])
v = tf.transpose(v, [0, 2, 1, 3])
if attention_mask is not None:
attention_mask = tf.reshape(
attention_mask, [batch_size, 1, to_seq_length, 1])
# 'new_embeddings = [B, N, F, H]'
new_embeddings = dot_product_attention(q, k, v, attention_mask,
attention_probs_dropout_prob)
return tf.transpose(new_embeddings, [0, 2, 1, 3])
def attention_ffn_block(layer_input,
hidden_size=768,
attention_mask=None,
num_attention_heads=1,
attention_head_size=64,
attention_probs_dropout_prob=0.0,
intermediate_size=3072,
intermediate_act_fn=None,
initializer_range=0.02,
hidden_dropout_prob=0.0,
use_einsum=True):
"""A network with attention-ffn as sub-block.
Args:
layer_input: float Tensor of shape [batch_size, from_seq_length,
from_width].
hidden_size: (optional) int, size of hidden layer.
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
The values should be 1 or 0. The attention scores will effectively be set
to -infinity for any positions in the mask that are 0, and will be
unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
attention_head_size: int. Size of attention head.
attention_probs_dropout_prob: float. dropout probability for attention_layer
intermediate_size: int. Size of intermediate hidden layer.
intermediate_act_fn: (optional) Activation function for the intermediate
layer.
initializer_range: float. Range of the weight initializer.
hidden_dropout_prob: (optional) float. Dropout probability of the hidden
layer.
use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers
Returns:
layer output
"""
with tf.variable_scope("attention_1"):
with tf.variable_scope("self"):
attention_output = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
use_einsum=use_einsum)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = dense_layer_3d_proj(
attention_output,
hidden_size,
attention_head_size,
create_initializer(initializer_range),
None,
use_einsum=use_einsum,
name="dense")
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
with tf.variable_scope("ffn_1"):
with tf.variable_scope("intermediate"):
intermediate_output = dense_layer_2d(
attention_output,
intermediate_size,
create_initializer(initializer_range),
intermediate_act_fn,
use_einsum=use_einsum,
num_attention_heads=num_attention_heads,
name="dense")
with tf.variable_scope("output"):
ffn_output = dense_layer_2d(
intermediate_output,
hidden_size,
create_initializer(initializer_range),
None,
use_einsum=use_einsum,
num_attention_heads=num_attention_heads,
name="dense")
ffn_output = dropout(ffn_output, hidden_dropout_prob)
ffn_output = layer_norm(ffn_output + attention_output)
return ffn_output
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_hidden_groups=12,
num_attention_heads=12,
intermediate_size=3072,
inner_group_num=1,
intermediate_act_fn="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False,
use_einsum=True):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length],
with 1 for positions that can be attended to and 0 in positions that
should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_hidden_groups: int. Number of group for the hidden layers, parameters
in the same group are shared.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
inner_group_num: int, number of inner repetition of attention and ffn.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = hidden_size // num_attention_heads
input_shape = get_shape_list(input_tensor, expected_rank=3)
input_width = input_shape[2]
all_layer_outputs = []
if input_width != hidden_size:
prev_output = dense_layer_2d(
input_tensor, hidden_size, create_initializer(initializer_range),
None, use_einsum=use_einsum, name="embedding_hidden_mapping_in")
else:
prev_output = input_tensor
with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE):
for layer_idx in range(num_hidden_layers):
group_idx = int(layer_idx / num_hidden_layers * num_hidden_groups)
with tf.variable_scope("group_%d" % group_idx):
with tf.name_scope("layer_%d" % layer_idx):
layer_output = prev_output
for inner_group_idx in range(inner_group_num):
with tf.variable_scope("inner_group_%d" % inner_group_idx):
layer_output = attention_ffn_block(
layer_input=layer_output,
hidden_size=hidden_size,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
attention_head_size=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
intermediate_size=intermediate_size,
intermediate_act_fn=intermediate_act_fn,
initializer_range=initializer_range,
hidden_dropout_prob=hidden_dropout_prob,
use_einsum=use_einsum)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
return all_layer_outputs
else:
return all_layer_outputs[-1]
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for RACE dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
from albert import classifier_utils
from albert import fine_tuning_utils
from albert import modeling
from albert import optimization
from albert import tokenization
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import tpu as contrib_tpu
class InputExample(object):
"""A single training/test example for the RACE dataset."""
def __init__(self,
example_id,
context_sentence,
start_ending,
endings,
label=None):
self.example_id = example_id
self.context_sentence = context_sentence
self.start_ending = start_ending
self.endings = endings
self.label = label
def __str__(self):
return self.__repr__()
def __repr__(self):
l = [
"id: {}".format(self.example_id),
"context_sentence: {}".format(self.context_sentence),
"start_ending: {}".format(self.start_ending),
"ending_0: {}".format(self.endings[0]),
"ending_1: {}".format(self.endings[1]),
"ending_2: {}".format(self.endings[2]),
"ending_3: {}".format(self.endings[3]),
]
if self.label is not None:
l.append("label: {}".format(self.label))
return ", ".join(l)
class RaceProcessor(object):
"""Processor for the RACE data set."""
def __init__(self, use_spm, do_lower_case, high_only, middle_only):
super(RaceProcessor, self).__init__()
self.use_spm = use_spm
self.do_lower_case = do_lower_case
self.high_only = high_only
self.middle_only = middle_only
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
return self.read_examples(
os.path.join(data_dir, "RACE", "train"))
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
return self.read_examples(
os.path.join(data_dir, "RACE", "dev"))
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
return self.read_examples(
os.path.join(data_dir, "RACE", "test"))
def get_labels(self):
"""Gets the list of labels for this data set."""
return ["A", "B", "C", "D"]
def process_text(self, text):
if self.use_spm:
return tokenization.preprocess_text(text, lower=self.do_lower_case)
else:
return tokenization.convert_to_unicode(text)
def read_examples(self, data_dir):
"""Read examples from RACE json files."""
examples = []
for level in ["middle", "high"]:
if level == "middle" and self.high_only: continue
if level == "high" and self.middle_only: continue
cur_dir = os.path.join(data_dir, level)
cur_path = os.path.join(cur_dir, "all.txt")
with tf.gfile.Open(cur_path) as f:
for line in f:
cur_data = json.loads(line.strip())
answers = cur_data["answers"]
options = cur_data["options"]
questions = cur_data["questions"]
context = self.process_text(cur_data["article"])
for i in range(len(answers)):
label = ord(answers[i]) - ord("A")
qa_list = []
question = self.process_text(questions[i])
for j in range(4):
option = self.process_text(options[i][j])
if "_" in question:
qa_cat = question.replace("_", option)
else:
qa_cat = " ".join([question, option])
qa_list.append(qa_cat)
examples.append(
InputExample(
example_id=cur_data["id"],
context_sentence=context,
start_ending=None,
endings=[qa_list[0], qa_list[1], qa_list[2], qa_list[3]],
label=label
)
)
return examples
def convert_single_example(example_index, example, label_size, max_seq_length,
tokenizer, max_qa_length):
"""Loads a data file into a list of `InputBatch`s."""
# RACE is a multiple choice task. To perform this task using AlBERT,
# we will use the formatting proposed in "Improving Language
# Understanding by Generative Pre-Training" and suggested by
# @jacobdevlin-google in this issue
# https://github.com/google-research/bert/issues/38.
#
# Each choice will correspond to a sample on which we run the
# inference. For a given RACE example, we will create the 4
# following inputs:
# - [CLS] context [SEP] choice_1 [SEP]
# - [CLS] context [SEP] choice_2 [SEP]
# - [CLS] context [SEP] choice_3 [SEP]
# - [CLS] context [SEP] choice_4 [SEP]
# The model will output a single value for each input. To get the
# final decision of the model, we will run a softmax over these 4
# outputs.
if isinstance(example, classifier_utils.PaddingInputExample):
return classifier_utils.InputFeatures(
example_id=0,
input_ids=[[0] * max_seq_length] * label_size,
input_mask=[[0] * max_seq_length] * label_size,
segment_ids=[[0] * max_seq_length] * label_size,
label_id=0,
is_real_example=False)
else:
context_tokens = tokenizer.tokenize(example.context_sentence)
if example.start_ending is not None:
start_ending_tokens = tokenizer.tokenize(example.start_ending)
all_input_tokens = []
all_input_ids = []
all_input_mask = []
all_segment_ids = []
for ending in example.endings:
# We create a copy of the context tokens in order to be
# able to shrink it according to ending_tokens
context_tokens_choice = context_tokens[:]
if example.start_ending is not None:
ending_tokens = start_ending_tokens + tokenizer.tokenize(ending)
else:
ending_tokens = tokenizer.tokenize(ending)
# Modifies `context_tokens_choice` and `ending_tokens` in
# place so that the total length is less than the
# specified length. Account for [CLS], [SEP], [SEP] with
# "- 3"
ending_tokens = ending_tokens[- max_qa_length:]
if len(context_tokens_choice) + len(ending_tokens) > max_seq_length - 3:
context_tokens_choice = context_tokens_choice[: (
max_seq_length - 3 - len(ending_tokens))]
tokens = ["[CLS]"] + context_tokens_choice + (
["[SEP]"] + ending_tokens + ["[SEP]"])
segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (
len(ending_tokens) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
all_input_tokens.append(tokens)
all_input_ids.append(input_ids)
all_input_mask.append(input_mask)
all_segment_ids.append(segment_ids)
label = example.label
if example_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("id: {}".format(example.example_id))
for choice_idx, (tokens, input_ids, input_mask, segment_ids) in \
enumerate(zip(all_input_tokens, all_input_ids, all_input_mask, all_segment_ids)):
tf.logging.info("choice: {}".format(choice_idx))
tf.logging.info("tokens: {}".format(" ".join(tokens)))
tf.logging.info(
"input_ids: {}".format(" ".join(map(str, input_ids))))
tf.logging.info(
"input_mask: {}".format(" ".join(map(str, input_mask))))
tf.logging.info(
"segment_ids: {}".format(" ".join(map(str, segment_ids))))
tf.logging.info("label: {}".format(label))
return classifier_utils.InputFeatures(
example_id=example.example_id,
input_ids=all_input_ids,
input_mask=all_input_mask,
segment_ids=all_segment_ids,
label_id=label
)
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer,
output_file, max_qa_length):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, len(label_list),
max_seq_length, tokenizer, max_qa_length)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(sum(feature.input_ids, []))
features["input_mask"] = create_int_feature(sum(feature.input_mask, []))
features["segment_ids"] = create_int_feature(sum(feature.segment_ids, []))
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def create_model(albert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings, max_seq_length,
dropout_prob, hub_module):
"""Creates a classification model."""
bsz_per_core = tf.shape(input_ids)[0]
input_ids = tf.reshape(input_ids, [bsz_per_core * num_labels, max_seq_length])
input_mask = tf.reshape(input_mask,
[bsz_per_core * num_labels, max_seq_length])
token_type_ids = tf.reshape(segment_ids,
[bsz_per_core * num_labels, max_seq_length])
(output_layer, _) = fine_tuning_utils.create_albert(
albert_config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=token_type_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
use_einsum=True,
hub_module=hub_module)
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [1, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [1],
initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(
output_layer, keep_prob=1 - dropout_prob)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [bsz_per_core, num_labels])
probabilities = tf.nn.softmax(logits, axis=-1)
predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(
labels, depth=tf.cast(num_labels, dtype=tf.int32), dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, probabilities, logits, predictions)
def model_fn_builder(albert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, max_seq_length, dropout_prob,
hub_module):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf_estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, probabilities, logits, predictions) = \
create_model(albert_config, is_training, input_ids, input_mask,
segment_ids, label_ids, num_labels,
use_one_hot_embeddings, max_seq_length, dropout_prob,
hub_module)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf_estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf_estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions,
weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities,
"predictions": predictions},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for SQuAD v1.1/v2.0 datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import re
import string
import sys
from albert import fine_tuning_utils
from albert import modeling
from albert import optimization
from albert import tokenization
import numpy as np
import six
from six.moves import map
from six.moves import range
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import tpu as contrib_tpu
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index",
"start_log_prob", "end_log_prob"])
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_log_prob", "end_log_prob"])
RawResult = collections.namedtuple("RawResult",
["unique_id",
"start_log_prob",
"end_log_prob"])
RawResultV2 = collections.namedtuple(
"RawResultV2",
["unique_id", "start_top_log_probs", "start_top_index",
"end_top_log_probs", "end_top_index", "cls_logits"])
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
paragraph_text,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.paragraph_text = paragraph_text
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", paragraph_text: [%s]" % (" ".join(self.paragraph_text))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tok_start_to_orig_index,
tok_end_to_orig_index,
token_is_max_context,
tokens,
input_ids,
input_mask,
segment_ids,
paragraph_len,
p_mask=None,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tok_start_to_orig_index = tok_start_to_orig_index
self.tok_end_to_orig_index = tok_end_to_orig_index
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.paragraph_len = paragraph_len
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.p_mask = p_mask
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
orig_answer_text = None
is_impossible = False
if is_training:
is_impossible = qa.get("is_impossible", False)
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
start_position = answer["answer_start"]
else:
start_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
paragraph_text=paragraph_text,
orig_answer_text=orig_answer_text,
start_position=start_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def _convert_index(index, pos, m=None, is_start=True):
"""Converts index."""
if index[pos] is not None:
return index[pos]
n = len(index)
rear = pos
while rear < n - 1 and index[rear] is None:
rear += 1
front = pos
while front > 0 and index[front] is None:
front -= 1
assert index[front] is not None or index[rear] is not None
if index[front] is None:
if index[rear] >= 1:
if is_start:
return 0
else:
return index[rear] - 1
return index[rear]
if index[rear] is None:
if m is not None and index[front] < m - 1:
if is_start:
return index[front] + 1
else:
return m - 1
return index[front]
if is_start:
if index[rear] > index[front] + 1:
return index[front] + 1
else:
return index[rear]
else:
if index[rear] > index[front] + 1:
return index[rear] - 1
else:
return index[front]
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn, do_lower_case):
"""Loads a data file into a list of `InputBatch`s."""
cnt_pos, cnt_neg = 0, 0
unique_id = 1000000000
max_n, max_m = 1024, 1024
f = np.zeros((max_n, max_m), dtype=np.float32)
for (example_index, example) in enumerate(examples):
if example_index % 100 == 0:
tf.logging.info("Converting {}/{} pos {} neg {}".format(
example_index, len(examples), cnt_pos, cnt_neg))
query_tokens = tokenization.encode_ids(
tokenizer.sp_model,
tokenization.preprocess_text(
example.question_text, lower=do_lower_case))
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
paragraph_text = example.paragraph_text
para_tokens = tokenization.encode_pieces(
tokenizer.sp_model,
tokenization.preprocess_text(
example.paragraph_text, lower=do_lower_case),
return_unicode=False)
chartok_to_tok_index = []
tok_start_to_chartok_index = []
tok_end_to_chartok_index = []
char_cnt = 0
para_tokens = [six.ensure_text(token, "utf-8") for token in para_tokens]
for i, token in enumerate(para_tokens):
new_token = six.ensure_text(token).replace(
tokenization.SPIECE_UNDERLINE.decode("utf-8"), " ")
chartok_to_tok_index.extend([i] * len(new_token))
tok_start_to_chartok_index.append(char_cnt)
char_cnt += len(new_token)
tok_end_to_chartok_index.append(char_cnt - 1)
tok_cat_text = "".join(para_tokens).replace(
tokenization.SPIECE_UNDERLINE.decode("utf-8"), " ")
n, m = len(paragraph_text), len(tok_cat_text)
if n > max_n or m > max_m:
max_n = max(n, max_n)
max_m = max(m, max_m)
f = np.zeros((max_n, max_m), dtype=np.float32)
g = {}
def _lcs_match(max_dist, n=n, m=m):
"""Longest-common-substring algorithm."""
f.fill(0)
g.clear()
### longest common sub sequence
# f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j))
for i in range(n):
# note(zhiliny):
# unlike standard LCS, this is specifically optimized for the setting
# because the mismatch between sentence pieces and original text will
# be small
for j in range(i - max_dist, i + max_dist):
if j >= m or j < 0: continue
if i > 0:
g[(i, j)] = 0
f[i, j] = f[i - 1, j]
if j > 0 and f[i, j - 1] > f[i, j]:
g[(i, j)] = 1
f[i, j] = f[i, j - 1]
f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0
if (tokenization.preprocess_text(
paragraph_text[i], lower=do_lower_case,
remove_space=False) == tok_cat_text[j]
and f_prev + 1 > f[i, j]):
g[(i, j)] = 2
f[i, j] = f_prev + 1
max_dist = abs(n - m) + 5
for _ in range(2):
_lcs_match(max_dist)
if f[n - 1, m - 1] > 0.8 * n: break
max_dist *= 2
orig_to_chartok_index = [None] * n
chartok_to_orig_index = [None] * m
i, j = n - 1, m - 1
while i >= 0 and j >= 0:
if (i, j) not in g: break
if g[(i, j)] == 2:
orig_to_chartok_index[i] = j
chartok_to_orig_index[j] = i
i, j = i - 1, j - 1
elif g[(i, j)] == 1:
j = j - 1
else:
i = i - 1
if (all(v is None for v in orig_to_chartok_index) or
f[n - 1, m - 1] < 0.8 * n):
tf.logging.info("MISMATCH DETECTED!")
continue
tok_start_to_orig_index = []
tok_end_to_orig_index = []
for i in range(len(para_tokens)):
start_chartok_pos = tok_start_to_chartok_index[i]
end_chartok_pos = tok_end_to_chartok_index[i]
start_orig_pos = _convert_index(chartok_to_orig_index, start_chartok_pos,
n, is_start=True)
end_orig_pos = _convert_index(chartok_to_orig_index, end_chartok_pos,
n, is_start=False)
tok_start_to_orig_index.append(start_orig_pos)
tok_end_to_orig_index.append(end_orig_pos)
if not is_training:
tok_start_position = tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = 0
tok_end_position = 0
if is_training and not example.is_impossible:
start_position = example.start_position
end_position = start_position + len(example.orig_answer_text) - 1
start_chartok_pos = _convert_index(orig_to_chartok_index, start_position,
is_start=True)
tok_start_position = chartok_to_tok_index[start_chartok_pos]
end_chartok_pos = _convert_index(orig_to_chartok_index, end_position,
is_start=False)
tok_end_position = chartok_to_tok_index[end_chartok_pos]
assert tok_start_position <= tok_end_position
def _piece_to_id(x):
if six.PY2 and isinstance(x, six.text_type):
x = six.ensure_binary(x, "utf-8")
return tokenizer.sp_model.PieceToId(x)
all_doc_tokens = list(map(_piece_to_id, para_tokens))
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_is_max_context = {}
segment_ids = []
p_mask = []
cur_tok_start_to_orig_index = []
cur_tok_end_to_orig_index = []
tokens.append(tokenizer.sp_model.PieceToId("[CLS]"))
segment_ids.append(0)
p_mask.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
p_mask.append(1)
tokens.append(tokenizer.sp_model.PieceToId("[SEP]"))
segment_ids.append(0)
p_mask.append(1)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
cur_tok_start_to_orig_index.append(
tok_start_to_orig_index[split_token_index])
cur_tok_end_to_orig_index.append(
tok_end_to_orig_index[split_token_index])
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
p_mask.append(0)
tokens.append(tokenizer.sp_model.PieceToId("[SEP]"))
segment_ids.append(1)
p_mask.append(1)
paragraph_len = len(tokens)
input_ids = tokens
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
# continue
start_position = 0
end_position = 0
span_is_impossible = True
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = 0
end_position = 0
if example_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("doc_span_index: %s" % (doc_span_index))
tf.logging.info("tok_start_to_orig_index: %s" % " ".join(
[str(x) for x in cur_tok_start_to_orig_index]))
tf.logging.info("tok_end_to_orig_index: %s" % " ".join(
[str(x) for x in cur_tok_end_to_orig_index]))
tf.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.logging.info("input_pieces: %s" % " ".join(
[tokenizer.sp_model.IdToPiece(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and span_is_impossible:
tf.logging.info("impossible example span")
if is_training and not span_is_impossible:
pieces = [tokenizer.sp_model.IdToPiece(token) for token in
tokens[start_position: (end_position + 1)]]
answer_text = tokenizer.sp_model.DecodePieces(pieces)
tf.logging.info("start_position: %d" % (start_position))
tf.logging.info("end_position: %d" % (end_position))
tf.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
# note(zhiliny): With multi processing,
# the example_index is actually the index within the current process
# therefore we use example_index=None to avoid being used in the future.
# The current code does not use example_index of training data.
if is_training:
feat_example_index = None
else:
feat_example_index = example_index
feature = InputFeatures(
unique_id=unique_id,
example_index=feat_example_index,
doc_span_index=doc_span_index,
tok_start_to_orig_index=cur_tok_start_to_orig_index,
tok_end_to_orig_index=cur_tok_end_to_orig_index,
token_is_max_context=token_is_max_context,
tokens=[tokenizer.sp_model.IdToPiece(x) for x in tokens],
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible,
p_mask=p_mask)
# Run callback
output_fn(feature)
unique_id += 1
if span_is_impossible:
cnt_neg += 1
else:
cnt_pos += 1
tf.logging.info("Total number of instances: {} = pos {} neg {}".format(
cnt_pos + cnt_neg, cnt_pos, cnt_neg))
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["p_mask"] = create_int_feature(feature.p_mask)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def input_fn_builder(input_file, seq_length, is_training,
drop_remainder, use_tpu, bsz, is_v2):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
# p_mask is not required for SQuAD v1.1
if is_v2:
name_to_features["p_mask"] = tf.FixedLenFeature([seq_length], tf.int64)
if is_training:
name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["is_impossible"] = tf.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
if use_tpu:
batch_size = params["batch_size"]
else:
batch_size = bsz
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def create_v1_model(albert_config, is_training, input_ids, input_mask,
segment_ids, use_one_hot_embeddings, use_einsum,
hub_module):
"""Creates a classification model."""
(_, final_hidden) = fine_tuning_utils.create_albert(
albert_config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
use_einsum=use_einsum,
hub_module=hub_module)
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
def v1_model_fn_builder(albert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, use_einsum, hub_module):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
if "unique_ids" in features:
unique_ids = features["unique_ids"]
else:
unique_ids = None
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf_estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_v1_model(
albert_config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
use_einsum=use_einsum,
hub_module=hub_module)
# Assign names to the logits so that we can refer to them as output tensors.
start_logits = tf.identity(start_logits, name="start_logits")
end_logits = tf.identity(end_logits, name="end_logits")
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf_estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf_estimator.ModeKeys.PREDICT:
predictions = {
"start_log_prob": start_logits,
"end_log_prob": end_logits,
}
if unique_ids is not None:
predictions["unique_ids"] = unique_ids
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
def accumulate_predictions_v1(result_dict, all_examples, all_features,
all_results, n_best_size, max_answer_length):
"""accumulate predictions for each positions in a dictionary."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
if example_index not in result_dict:
result_dict[example_index] = {}
features = example_index_to_features[example_index]
prelim_predictions = []
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
if feature.unique_id not in result_dict[example_index]:
result_dict[example_index][feature.unique_id] = {}
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_log_prob, n_best_size)
end_indexes = _get_best_indexes(result.end_log_prob, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
doc_offset = feature.tokens.index("[SEP]") + 1
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index - doc_offset >= len(feature.tok_start_to_orig_index):
continue
if end_index - doc_offset >= len(feature.tok_end_to_orig_index):
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
start_log_prob = result.start_log_prob[start_index]
end_log_prob = result.end_log_prob[end_index]
start_idx = start_index - doc_offset
end_idx = end_index - doc_offset
if (start_idx, end_idx) not in result_dict[example_index][feature.unique_id]:
result_dict[example_index][feature.unique_id][(start_idx, end_idx)] = []
result_dict[example_index][feature.unique_id][(start_idx, end_idx)].append((start_log_prob, end_log_prob))
def write_predictions_v1(result_dict, all_examples, all_features,
all_results, n_best_size, max_answer_length,
output_prediction_file, output_nbest_file):
"""Write final predictions to the json file and log-odds of null if needed."""
tf.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.logging.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
for ((start_idx, end_idx), logprobs) in \
result_dict[example_index][feature.unique_id].items():
start_log_prob = 0
end_log_prob = 0
for logprob in logprobs:
start_log_prob += logprob[0]
end_log_prob += logprob[1]
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_idx,
end_index=end_idx,
start_log_prob=start_log_prob / len(logprobs),
end_log_prob=end_log_prob / len(logprobs)))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_log_prob + x.end_log_prob),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index >= 0: # this is a non-null prediction
tok_start_to_orig_index = feature.tok_start_to_orig_index
tok_end_to_orig_index = feature.tok_end_to_orig_index
start_orig_pos = tok_start_to_orig_index[pred.start_index]
end_orig_pos = tok_end_to_orig_index[pred.end_index]
paragraph_text = example.paragraph_text
final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_log_prob=pred.start_log_prob,
end_log_prob=pred.end_log_prob))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_log_prob=0.0, end_log_prob=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_log_prob"] = entry.start_log_prob
output["end_log_prob"] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
return all_predictions
####### following are from official SQuAD v1.1 evaluation scripts
def normalize_answer_v1(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer_v1(prediction).split()
ground_truth_tokens = normalize_answer_v1(ground_truth).split()
common = (
collections.Counter(prediction_tokens)
& collections.Counter(ground_truth_tokens))
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer_v1(prediction) == normalize_answer_v1(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate_v1(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = ("Unanswered question " + six.ensure_str(qa["id"]) +
" will receive score 0.")
print(message, file=sys.stderr)
continue
ground_truths = [x["text"] for x in qa["answers"]]
# ground_truths = list(map(lambda x: x["text"], qa["answers"]))
prediction = predictions[qa["id"]]
exact_match += metric_max_over_ground_truths(exact_match_score,
prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
####### above are from official SQuAD v1.1 evaluation scripts
####### following are from official SQuAD v2.0 evaluation scripts
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def normalize_answer_v2(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s: return []
return normalize_answer_v2(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer_v2(a_gold) == normalize_answer_v2(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if normalize_answer_v2(a['text'])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('total', total),
])
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores: continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval['best_exact'] = best_exact
main_eval['best_exact_thresh'] = exact_thresh
main_eval['best_f1'] = best_f1
main_eval['best_f1_thresh'] = f1_thresh
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval['%s_%s' % (prefix, k)] = new_eval[k]
####### above are from official SQuAD v2.0 evaluation scripts
def accumulate_predictions_v2(result_dict, cls_dict, all_examples,
all_features, all_results, n_best_size,
max_answer_length, start_n_top, end_n_top):
"""accumulate predictions for each positions in a dictionary."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
if example_index not in result_dict:
result_dict[example_index] = {}
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
for (feature_index, feature) in enumerate(features):
if feature.unique_id not in result_dict[example_index]:
result_dict[example_index][feature.unique_id] = {}
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
# if we could have irrelevant answers, get the min score of irrelevant
score_null = min(score_null, cur_null_score)
doc_offset = feature.tokens.index("[SEP]") + 1
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_top_log_probs[i]
start_index = result.start_top_index[i]
j_index = i * end_n_top + j
end_log_prob = result.end_top_log_probs[j_index]
end_index = result.end_top_index[j_index]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index - doc_offset >= len(feature.tok_start_to_orig_index):
continue
if start_index - doc_offset < 0:
continue
if end_index - doc_offset >= len(feature.tok_end_to_orig_index):
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
start_idx = start_index - doc_offset
end_idx = end_index - doc_offset
if (start_idx, end_idx) not in result_dict[example_index][feature.unique_id]:
result_dict[example_index][feature.unique_id][(start_idx, end_idx)] = []
result_dict[example_index][feature.unique_id][(start_idx, end_idx)].append((start_log_prob, end_log_prob))
if example_index not in cls_dict:
cls_dict[example_index] = []
cls_dict[example_index].append(score_null)
def write_predictions_v2(result_dict, cls_dict, all_examples, all_features,
all_results, n_best_size, max_answer_length,
output_prediction_file,
output_nbest_file, output_null_log_odds_file,
null_score_diff_threshold):
"""Write final predictions to the json file and log-odds of null if needed."""
tf.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.logging.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
# score_null = 1000000 # large and positive
for (feature_index, feature) in enumerate(features):
for ((start_idx, end_idx), logprobs) in \
result_dict[example_index][feature.unique_id].items():
start_log_prob = 0
end_log_prob = 0
for logprob in logprobs:
start_log_prob += logprob[0]
end_log_prob += logprob[1]
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_idx,
end_index=end_idx,
start_log_prob=start_log_prob / len(logprobs),
end_log_prob=end_log_prob / len(logprobs)))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_log_prob + x.end_log_prob),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_start_to_orig_index = feature.tok_start_to_orig_index
tok_end_to_orig_index = feature.tok_end_to_orig_index
start_orig_pos = tok_start_to_orig_index[pred.start_index]
end_orig_pos = tok_end_to_orig_index[pred.end_index]
paragraph_text = example.paragraph_text
final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_log_prob=pred.start_log_prob,
end_log_prob=pred.end_log_prob))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(
text="",
start_log_prob=-1e6,
end_log_prob=-1e6))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_log_prob"] = entry.start_log_prob
output["end_log_prob"] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
assert best_non_null_entry is not None
score_diff = sum(cls_dict[example_index]) / len(cls_dict[example_index])
scores_diff_json[example.qas_id] = score_diff
# predict null answers when null threshold is provided
if null_score_diff_threshold is None or score_diff < null_score_diff_threshold:
all_predictions[example.qas_id] = best_non_null_entry.text
else:
all_predictions[example.qas_id] = ""
all_nbest_json[example.qas_id] = nbest_json
assert len(nbest_json) >= 1
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
with tf.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions, scores_diff_json
def create_v2_model(albert_config, is_training, input_ids, input_mask,
segment_ids, use_one_hot_embeddings, features,
max_seq_length, start_n_top, end_n_top, dropout_prob,
hub_module):
"""Creates a classification model."""
(_, output) = fine_tuning_utils.create_albert(
albert_config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
use_einsum=True,
hub_module=hub_module)
bsz = tf.shape(output)[0]
return_dict = {}
output = tf.transpose(output, [1, 0, 2])
# invalid position mask such as query and special symbols (PAD, SEP, CLS)
p_mask = tf.cast(features["p_mask"], dtype=tf.float32)
# logit of the start position
with tf.variable_scope("start_logits"):
start_logits = tf.layers.dense(
output,
1,
kernel_initializer=modeling.create_initializer(
albert_config.initializer_range))
start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0])
start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask
start_log_probs = tf.nn.log_softmax(start_logits_masked, -1)
# logit of the end position
with tf.variable_scope("end_logits"):
if is_training:
# during training, compute the end logits based on the
# ground truth of the start position
start_positions = tf.reshape(features["start_positions"], [-1])
start_index = tf.one_hot(start_positions, depth=max_seq_length, axis=-1,
dtype=tf.float32)
start_features = tf.einsum("lbh,bl->bh", output, start_index)
start_features = tf.tile(start_features[None], [max_seq_length, 1, 1])
end_logits = tf.layers.dense(
tf.concat([output, start_features], axis=-1),
albert_config.hidden_size,
kernel_initializer=modeling.create_initializer(
albert_config.initializer_range),
activation=tf.tanh,
name="dense_0")
end_logits = contrib_layers.layer_norm(end_logits, begin_norm_axis=-1)
end_logits = tf.layers.dense(
end_logits,
1,
kernel_initializer=modeling.create_initializer(
albert_config.initializer_range),
name="dense_1")
end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0])
end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask
end_log_probs = tf.nn.log_softmax(end_logits_masked, -1)
else:
# during inference, compute the end logits based on beam search
start_top_log_probs, start_top_index = tf.nn.top_k(
start_log_probs, k=start_n_top)
start_index = tf.one_hot(start_top_index,
depth=max_seq_length, axis=-1, dtype=tf.float32)
start_features = tf.einsum("lbh,bkl->bkh", output, start_index)
end_input = tf.tile(output[:, :, None],
[1, 1, start_n_top, 1])
start_features = tf.tile(start_features[None],
[max_seq_length, 1, 1, 1])
end_input = tf.concat([end_input, start_features], axis=-1)
end_logits = tf.layers.dense(
end_input,
albert_config.hidden_size,
kernel_initializer=modeling.create_initializer(
albert_config.initializer_range),
activation=tf.tanh,
name="dense_0")
end_logits = contrib_layers.layer_norm(end_logits, begin_norm_axis=-1)
end_logits = tf.layers.dense(
end_logits,
1,
kernel_initializer=modeling.create_initializer(
albert_config.initializer_range),
name="dense_1")
end_logits = tf.reshape(end_logits, [max_seq_length, -1, start_n_top])
end_logits = tf.transpose(end_logits, [1, 2, 0])
end_logits_masked = end_logits * (
1 - p_mask[:, None]) - 1e30 * p_mask[:, None]
end_log_probs = tf.nn.log_softmax(end_logits_masked, -1)
end_top_log_probs, end_top_index = tf.nn.top_k(
end_log_probs, k=end_n_top)
end_top_log_probs = tf.reshape(
end_top_log_probs,
[-1, start_n_top * end_n_top])
end_top_index = tf.reshape(
end_top_index,
[-1, start_n_top * end_n_top])
if is_training:
return_dict["start_log_probs"] = start_log_probs
return_dict["end_log_probs"] = end_log_probs
else:
return_dict["start_top_log_probs"] = start_top_log_probs
return_dict["start_top_index"] = start_top_index
return_dict["end_top_log_probs"] = end_top_log_probs
return_dict["end_top_index"] = end_top_index
# an additional layer to predict answerability
with tf.variable_scope("answer_class"):
# get the representation of CLS
cls_index = tf.one_hot(tf.zeros([bsz], dtype=tf.int32),
max_seq_length,
axis=-1, dtype=tf.float32)
cls_feature = tf.einsum("lbh,bl->bh", output, cls_index)
# get the representation of START
start_p = tf.nn.softmax(start_logits_masked, axis=-1,
name="softmax_start")
start_feature = tf.einsum("lbh,bl->bh", output, start_p)
# note(zhiliny): no dependency on end_feature so that we can obtain
# one single `cls_logits` for each sample
ans_feature = tf.concat([start_feature, cls_feature], -1)
ans_feature = tf.layers.dense(
ans_feature,
albert_config.hidden_size,
activation=tf.tanh,
kernel_initializer=modeling.create_initializer(
albert_config.initializer_range),
name="dense_0")
ans_feature = tf.layers.dropout(ans_feature, dropout_prob,
training=is_training)
cls_logits = tf.layers.dense(
ans_feature,
1,
kernel_initializer=modeling.create_initializer(
albert_config.initializer_range),
name="dense_1",
use_bias=False)
cls_logits = tf.squeeze(cls_logits, -1)
return_dict["cls_logits"] = cls_logits
return return_dict
def v2_model_fn_builder(albert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, max_seq_length, start_n_top,
end_n_top, dropout_prob, hub_module):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
# unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf_estimator.ModeKeys.TRAIN)
outputs = create_v2_model(
albert_config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
features=features,
max_seq_length=max_seq_length,
start_n_top=start_n_top,
end_n_top=end_n_top,
dropout_prob=dropout_prob,
hub_module=hub_module)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf_estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
def compute_loss(log_probs, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
loss = - tf.reduce_sum(one_hot_positions * log_probs, axis=-1)
loss = tf.reduce_mean(loss)
return loss
start_loss = compute_loss(
outputs["start_log_probs"], features["start_positions"])
end_loss = compute_loss(
outputs["end_log_probs"], features["end_positions"])
total_loss = (start_loss + end_loss) * 0.5
cls_logits = outputs["cls_logits"]
is_impossible = tf.reshape(features["is_impossible"], [-1])
regression_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.cast(is_impossible, dtype=tf.float32), logits=cls_logits)
regression_loss = tf.reduce_mean(regression_loss)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is
# comparable to start_loss and end_loss
total_loss += regression_loss * 0.5
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf_estimator.ModeKeys.PREDICT:
predictions = {
"unique_ids": features["unique_ids"],
"start_top_index": outputs["start_top_index"],
"start_top_log_probs": outputs["start_top_log_probs"],
"end_top_index": outputs["end_top_index"],
"end_top_log_probs": outputs["end_top_log_probs"],
"cls_logits": outputs["cls_logits"]
}
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
def evaluate_v2(result_dict, cls_dict, prediction_json, eval_examples,
eval_features, all_results, n_best_size, max_answer_length,
output_prediction_file, output_nbest_file,
output_null_log_odds_file):
null_score_diff_threshold = None
predictions, na_probs = write_predictions_v2(
result_dict, cls_dict, eval_examples, eval_features,
all_results, n_best_size, max_answer_length,
output_prediction_file, output_nbest_file,
output_null_log_odds_file, null_score_diff_threshold)
na_prob_thresh = 1.0 # default value taken from the eval script
qid_to_has_ans = make_qid_to_has_ans(prediction_json) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(prediction_json, predictions)
exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,
na_prob_thresh)
f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,
na_prob_thresh)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
find_all_best_thresh(out_eval, predictions, exact_raw, f1_raw, na_probs, qid_to_has_ans)
null_score_diff_threshold = out_eval["best_f1_thresh"]
predictions, na_probs = write_predictions_v2(
result_dict, cls_dict,eval_examples, eval_features,
all_results, n_best_size, max_answer_length,
output_prediction_file, output_nbest_file,
output_null_log_odds_file, null_score_diff_threshold)
qid_to_has_ans = make_qid_to_has_ans(prediction_json) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(prediction_json, predictions)
exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,
na_prob_thresh)
f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,
na_prob_thresh)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
out_eval["null_score_diff_threshold"] = null_score_diff_threshold
return out_eval
|
#!/usr/bin/python
"""
This is gbdt implementation based on the original implementation by
Seong-Jin Kim wtih some modificiations.
File name: tinygbt.py
Author: Seong-Jin Kim
EMail: [email protected]
Date created: 7/15/2018
Reference:
[1] T. Chen and C. Guestrin. XGBoost: A Scalable Tree Boosting System. 2016.
[2] G. Ke et al. LightGBM: A Highly Efficient Gradient Boosting Decision
Tree. 2017.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
import functional as F
NUM_QUANTILE = 100
np.random.seed(0)
class Dataset(object):
def __init__(self, x, y):
self.x = x
self.y = y
class TreeNode(object):
"""Tree node structure."""
def __init__(self):
self.is_leaf = False
self.left_child = None
self.right_child = None
self.split_feature_id = None
self.split_val = None
self.weight = None
def _calc_split_gain(self, gradient, h, gradient_l, h_l, gradient_r, h_r,
regularizer_const):
"""Loss reduction (Refer to Eq7 of Reference[1])."""
def calc_term(gradient, h):
return np.square(gradient) / (h + regularizer_const)
return calc_term(gradient_l, h_l) + calc_term(gradient_r, h_r) - calc_term(
gradient, h)
def _calc_leaf_weight(self, grad, hessian, regularizer_const):
"""Calculate the optimal weight of this leaf node."""
return np.sum(grad) / (np.sum(hessian) + regularizer_const)
def find_instance_for_quantile_boundaries(self, sorted_values, num_quantile):
"""Returns the ids of instances at quantile boundary.
Args:
sorted_values: One feature's sorted values.
num_quantile: number of quantile.
Returns:
the ids of instances that fall below quantile boundary.
"""
n = len(sorted_values)
linear_space = np.linspace(-1, n - 1, num_quantile + 1, dtype=int)[1:]
quantile_id = np.zeros(num_quantile, dtype=int)
i, j = 0, 0
for j in range(num_quantile):
while i < len(sorted_values) and (sorted_values[i] <=
sorted_values[linear_space[j]]):
i = i + 1
quantile_id[j] = i - 1
quantile_id = np.unique(quantile_id)
quantile_id = np.append([-1], quantile_id)
return quantile_id
def build(self, instances, grad, hessian, depth, params):
"""Exact greedy alogirithm for split finding."""
assert instances.shape[0] == len(grad) == len(hessian)
if depth > params.max_depth:
self.is_leaf = True
self.weight = (
self._calc_leaf_weight(grad, hessian, params.regularizer_const))
return
gradient = np.sum(grad)
h = np.sum(hessian)
best_gain = 0.
best_feature_id = None
best_val = 0.
best_left_instance_ids = None
best_right_instance_ids = None
for feature_id in range(instances.shape[1]):
gradient_l, h_l = 0., 0.
sorted_values = np.sort(instances[:, feature_id])
sorted_instance_ids = instances[:, feature_id].argsort()
quantile_id = self.find_instance_for_quantile_boundaries(
sorted_values, NUM_QUANTILE)
num_quantile_id = len(quantile_id)
for j in range(0, num_quantile_id - 1):
gradient_l += np.sum(
grad[sorted_instance_ids[(1 +
quantile_id[j]):(1 +
quantile_id[j + 1])]])
h_l += np.sum(
hessian[sorted_instance_ids[(1 +
quantile_id[j]):(1 +
quantile_id[j + 1])]])
gradient_r = gradient - gradient_l
h_r = h - h_l
current_gain = (
self._calc_split_gain(gradient, h, gradient_l, h_l, gradient_r, h_r,
params.regularizer_const))
if current_gain > best_gain:
best_gain = current_gain
best_feature_id = feature_id
best_val = instances[sorted_instance_ids[quantile_id[j +
1]]][feature_id]
best_left_instance_ids = sorted_instance_ids[:quantile_id[j + 1] + 1]
best_right_instance_ids = sorted_instance_ids[quantile_id[j + 1] + 1:]
if best_gain < params.min_split_gain:
self.is_leaf = True
self.weight = self._calc_leaf_weight(grad, hessian,
params.regularizer_const)
else:
self.split_feature_id = best_feature_id
self.split_val = best_val
self.left_child = TreeNode()
self.left_child.build(instances[best_left_instance_ids],
grad[best_left_instance_ids],
hessian[best_left_instance_ids], depth + 1, params)
self.right_child = TreeNode()
self.right_child.build(instances[best_right_instance_ids],
grad[best_right_instance_ids],
hessian[best_right_instance_ids], depth + 1,
params)
def predict(self, x):
if self.is_leaf:
return self.weight
else:
if x[self.split_feature_id] <= self.split_val:
return self.left_child.predict(x)
else:
return self.right_child.predict(x)
class Tree(object):
"""Classification and regression tree."""
def __init__(self):
self.root = None
def build(self, instances, grad, hessian, params):
assert len(instances) == len(grad) == len(hessian)
self.root = TreeNode()
current_depth = 0
self.root.build(instances, grad, hessian, current_depth, params)
def predict(self, x):
return self.root.predict(x)
class TreeEnsemble(object):
"""Ensemble of classification and regression tree."""
def __init__(self, models=None, coefficients=np.array([])):
if not models:
self.models = []
else:
self.models = models
self.coefficients = coefficients
def __rmul__(self, multiplier):
new_models = copy.copy(self.models)
new_coefficients = self.coefficients * multiplier
return TreeEnsemble(new_models, new_coefficients)
def __add__(self, other):
total_models = self.models + other.models
total_coefficients = np.append(self.coefficients, other.coefficients)
return TreeEnsemble(total_models, total_coefficients)
def __len__(self):
assert len(self.models) == len(self.coefficients)
return len(self.models)
def append(self, learner, multiplier=1):
self.models.append(learner)
self.coefficients = np.append(self.coefficients, multiplier)
def add(self, learner, multiplier):
new_models = copy.copy(self.models)
new_models.append(learner)
new_coefficients = np.append(self.coefficients, multiplier)
return TreeEnsemble(new_models, new_coefficients)
def predict(self, x, num_trees=None):
if not self.models:
return 0
else:
if num_trees is None:
num_trees = len(self.models)
return np.sum(self.coefficients[i] * self.models[i].predict(x)
for i in range(num_trees))
class BoostedTrees(BaseEstimator, ClassifierMixin):
"""Class of boosted trees.
This is a super-class used in GBT, AGBT, and AGBT_B.
"""
def __init__(self,
params,
max_depth=None,
learning_rate=None,
min_split_gain=None,
z_shrinkage_parameter=None,
num_trees=None):
self.tree_ensemble = TreeEnsemble()
self.params = params
self.best_iteration = 0
if params.loss == "L2Loss":
self.loss = F.L2Loss(params.use_hessian)
elif params.loss == "LogisticLoss":
self.loss = F.LogisticLoss(params.use_hessian)
def _calc_training_data_output(self, train_set, tree_ensemble):
if not tree_ensemble.models:
return np.zeros(len(train_set.y))
x = train_set.x
output = np.zeros(len(x))
for i in range(len(x)):
output[i] = tree_ensemble.predict(x[i])
return output
def _calc_gradient_and_hessian(self, train_set, output):
return (self.loss.negative_gradient(output, train_set.y),
self.loss.hessian(output, train_set.y))
def _calc_loss(self, tree_ensemble, data_set):
"""For now, only L2 loss and Logistic loss are supported."""
predict = []
for x in data_set.x:
predict.append(tree_ensemble.predict(x))
return np.mean(self.loss.loss_value(np.array(predict), data_set.y))
def _build_learner(self, train_set, grad, hessian):
learner = Tree()
learner.build(train_set.x, grad, hessian, self.params)
return learner
def _update_output(self, data_set, learner, coefficient, output):
x = data_set.x
new_output = np.copy(output)
for i in range(len(x)):
new_output[i] += coefficient * learner.predict(x[i])
return new_output
def _calc_loss_from_output(self, data_set, output):
return np.mean(self.loss.loss_value(output, data_set.y))
def _calc_training_data_scores(self, train_set, ensemble):
if not ensemble.models:
return np.zeros(len(train_set.y))
x = train_set.x
scores = np.zeros(len(x))
for i in range(len(x)):
scores[i] = ensemble.predict(x[i])
return scores
def fit(self, x, y=None):
train_set = Dataset(x, y)
self.train(train_set)
def score(self, x, y):
return -self._calc_loss(self.tree_ensemble, Dataset(x, y))
def get_params(self, deep=True):
return {
"params": self.params,
"max_depth": self.params.max_depth,
"learning_rate": self.params.learning_rate,
"min_split_gain": self.params.min_split_gain,
"z_shrinkage_parameter": self.params.z_shrinkage_parameter,
"num_trees": self.params.num_trees
}
def set_params(self, **parameters):
print(parameters)
for key, value in parameters.items():
if key == "max_depth":
self.params = self.params._replace(max_depth=value)
if key == "learning_rate":
self.params = self.params._replace(learning_rate=value)
if key == "min_split_gain":
self.params = self.params._replace(min_split_gain=value)
if key == "z_shrinkage_parameter":
self.params = self.params._replace(z_shrinkage_parameter=value)
if key == "num_trees":
self.params = self.params._replace(num_trees=value)
return self
|
# Copyright 2020 The Google Authors. All Rights Reserved.
#
# Licensed under the MIT License (the "License");
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""Tests for tinygbt.functional."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import functional as F
from absl.testing import absltest
class LogisticLoss(absltest.TestCase):
def setUp(self):
super(LogisticLoss, self).setUp()
self.loss = F.LogisticLoss(True)
def testLossValue(self):
prediction = np.array([1, -1])
labels = np.array([1, 1])
expected_output = np.array([np.log(1 + np.exp(-1)), np.log(1 + np.exp(1))])
self.assertSameElements(expected_output,
self.loss.loss_value(prediction, labels))
def testNegativeGradient(self):
prediction = np.array([1, -1])
labels = np.array([1, 1])
expected_output = np.array(
[np.exp(-1) / (1 + np.exp(-1)),
np.exp(1) / (1 + np.exp(1))])
self.assertSameElements(expected_output,
self.loss.negative_gradient(prediction, labels))
def testHessian(self):
prediction = np.array([1, -1])
labels = np.array([1, 1])
expected_output = np.array(
[np.exp(-1) / (1 + np.exp(-1))**2,
np.exp(1) / (1 + np.exp(1))**2])
self.assertSameElements(expected_output,
self.loss.hessian(prediction, labels))
if __name__ == '__main__':
absltest.main()
|
# Copyright 2020 The Google Authors. All Rights Reserved.
#
# Licensed under the MIT License (the "License");
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""This is a simple implementation of accelerated gradient boosted tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import sys
import time
import numpy as np
import tree as Tree
NUM_QUANTILE = 100
LARGE_NUMBER = sys.maxsize
np.random.seed(0)
np.set_printoptions(threshold=np.inf)
def combine_f_with_h(ensemble_f, ensemble_h):
assert len(ensemble_f) == 2 * len(ensemble_h) - 1
new_models = copy.copy(ensemble_f.models)
new_models.append(ensemble_h.models[-1])
new_coefficients = np.append(ensemble_f.coefficients, 0)
new_coefficients[1:len(new_coefficients):2] = (
new_coefficients[1:len(new_coefficients):2] + ensemble_h.coefficients)
return Tree.TreeEnsemble(new_models, new_coefficients)
class AGBT(Tree.BoostedTrees):
"""Accelerated Gradient Boosted Trees.
Typical usage example:
method = AGBT(params)
method.train(train_data, valid_set=test_data)
"""
def train(self, train_set, valid_set=None, early_stopping_rounds=5):
ensemble_f = Tree.TreeEnsemble()
ensemble_g = Tree.TreeEnsemble()
ensemble_h = Tree.TreeEnsemble()
learning_rate = self.params.learning_rate
z_shrinkage_parameter = self.params.z_shrinkage_parameter
best_iteration = 0
best_val_loss = LARGE_NUMBER
train_start_time = time.time()
n = len(train_set.y)
corrected_grad = np.zeros(n) # corresponds to c^m in the paper
learner_h_output = np.zeros(
n) # corresponds to b_{\tau^2}^m(X) in the paper
train_losses = np.array([])
val_losses = np.array([])
train_f_output = np.zeros(len(train_set.y))
train_g_output = np.zeros(len(train_set.y))
train_h_output = np.zeros(len(train_set.y))
if valid_set:
val_f_output = np.zeros(len(valid_set.y))
val_g_output = np.zeros(len(valid_set.y))
val_h_output = np.zeros(len(valid_set.y))
for iter_cnt in range(self.params.num_trees):
iter_start_time = time.time()
theta = 2/(iter_cnt+2)
if ensemble_f.models:
ensemble_g = combine_f_with_h((1-theta)*ensemble_f, theta*ensemble_h)
train_g_output = (1-theta) * train_f_output + theta * train_h_output
grad, hessian = self._calc_gradient_and_hessian(train_set, train_g_output)
learner_f = self._build_learner(train_set, grad, hessian)
ensemble_f = ensemble_g.add(learner_f, learning_rate)
corrected_grad = (
grad - (iter_cnt+1)/(iter_cnt+2)*(corrected_grad - learner_h_output))
learner_h = self._build_learner(train_set, corrected_grad, hessian)
learner_h_output = self._calc_training_data_scores(
train_set, Tree.TreeEnsemble([learner_h], np.array([1])))
train_f_output = self._update_output(
train_set, learner_f, learning_rate, train_g_output)
train_h_output = self._update_output(
train_set, learner_h,
z_shrinkage_parameter / theta * learning_rate, train_h_output)
ensemble_h.append(learner_h, z_shrinkage_parameter/theta*learning_rate)
train_loss = self._calc_loss_from_output(train_set, train_f_output)
train_losses = np.append(train_losses, train_loss)
if valid_set:
val_g_output = (1-theta) * val_f_output + theta * val_h_output
val_f_output = self._update_output(
valid_set, learner_f, learning_rate, val_g_output)
val_h_output = self._update_output(
valid_set, learner_h,
z_shrinkage_parameter / theta * learning_rate, val_h_output)
val_loss = self._calc_loss_from_output(valid_set, val_f_output)
val_losses = np.append(val_losses, val_loss)
val_loss_str = '{:.10f}'.format(val_loss) if val_loss else '-'
else:
val_loss_str = ''
print(
"Iter {:>3}, Train's loss: {:.10f}, Valid's loss: {}, Elapsed: {:.2f} secs"
.format(iter_cnt, train_loss, val_loss_str,
time.time() - iter_start_time))
if valid_set and val_loss is not None and val_loss < best_val_loss:
best_val_loss = val_loss
best_iteration = iter_cnt
if iter_cnt - best_iteration >= self.params.early_stopping_rounds:
print('Early stopping, best iteration is:')
print("Iter {:>3}, Train's loss: {:.10f}".format(
best_iteration, best_val_loss))
break
self.tree_ensemble = ensemble_f
self.best_iteration = best_iteration
print('Training finished. Elapsed: {:.2f} secs'.
format(time.time() - train_start_time))
if valid_set:
return train_losses, val_losses
else:
return train_losses
def predict(self, x, ensemble=None, num_iteration=None):
if ensemble is None:
ensemble = self.tree_ensemble
return ensemble.predict(x)
|
# Copyright 2020 The Google Authors. All Rights Reserved.
#
# Licensed under the MIT License (the "License");
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""The util functions used in the experiments."""
from __future__ import division
import numpy as np
class L2Loss(object):
"""L2 loss function.
use_hessian corresponds to whether we isuse hessian or a constant upper
bound of the hessian.
For L2 loss, the hessian is always 1.
"""
def __init__(self, use_hessian):
self.use_hessian = use_hessian
def loss_value(self, prediction, labels):
return 1 / 2 * (prediction - labels)**2
def negative_gradient(self, prediction, labels):
return labels - prediction
def hessian(self, prediction, labels):
del prediction
return np.ones(len(labels))
class LogisticLoss(object):
"""Logistic loss function.
use_hessian corresponds to whether we use hessian or a constant upper bound of
the hessian. The labels are either -1 or 1.
For logistic loss, the upper bound of hessian is 1/4.
"""
def __init__(self, use_hessian):
self.use_hessian = use_hessian
def loss_value(self, prediction, labels):
# labels are -1 and 1.
return np.log(1 + np.exp(np.nan_to_num(-prediction * labels)))
def negative_gradient(self, prediction, labels):
temp = np.nan_to_num(np.exp(-labels * prediction))
return labels * temp / (1 + temp)
def hessian(self, prediction, labels):
if self.use_hessian:
temp = np.nan_to_num(np.exp(-labels * prediction))
return temp / (1 + temp)**2
else:
# return a scalar upper bound of the hessian
return np.ones(len(labels))
|
# Copyright 2020 The Google Authors. All Rights Reserved.
#
# Licensed under the MIT License (the "License");
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""Tests for accelerated_gbm.tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tree
from absl.testing import absltest
class TreeNode(absltest.TestCase):
def setUp(self):
super(TreeNode, self).setUp()
self.tree_node = tree.TreeNode()
def testFindInstanceForQuantileBoundaries(self):
sorted_values = [0, 0, 0, 1, 1, 2, 3, 3]
num_quantile = 4
expected_output = [-1, 2, 4, 5, 7]
self.assertSameElements(
expected_output,
self.tree_node.find_instance_for_quantile_boundaries(
sorted_values, num_quantile)
)
def testFindInstanceForQuantileBoundariesWhenMoreQuantiles(self):
sorted_values = [0, 0, 0, 1, 1, 2, 3, 3]
num_quantile = 10
expected_output = [-1, 2, 4, 5, 7]
self.assertSameElements(
expected_output,
self.tree_node.find_instance_for_quantile_boundaries(
sorted_values, num_quantile)
)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2020 The Google Authors. All Rights Reserved.
#
# Licensed under the MIT License (the "License");
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""Script to run the experiments and plot the results."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import numpy as np
import scipy.io
import sklearn.datasets
from sklearn.model_selection import train_test_split
from agbt import AGBT
from agbt_b import AGBTB
import functional as F
from gbt import GBT
from tree import Dataset
from tensorflow.python.platform import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string('data_folder', None, 'The directory of datasets.')
flags.DEFINE_enum('dataset_name', 'all_datasets', [
'all_datasets', 'a1a', 'w1a', 'housing', 'w8a', 'a9a', 'colon', 'Year',
'rcv1'
], ('The name of instances.'
'`all_datasets` means all of the instances in the folder.'))
flags.DEFINE_enum('loss', 'L2Loss', ['L2Loss', 'LogisticLoss'],
'The loss function.')
flags.DEFINE_integer(
'early_stopping_rounds', 100000,
('Stop the algorithm if the validation loss does not improve after this'
'number of iterations.'))
flags.DEFINE_float(
'z_shrinkage_parameter', 0.1,
'The shrinkage parameter in the z-update in accelerated method.')
flags.DEFINE_string('output_dir', None,
'The directory where output will be written.')
flags.DEFINE_integer('max_depth', 4, 'Maximal depth of a tree.')
flags.DEFINE_integer('num_trees', 20, 'Number of boosting iterations.')
flags.DEFINE_float('min_split_gain', 0.01, 'Minimal gain for splitting a leaf.')
flags.DEFINE_float('learning_rate', 0.3, 'Learning rate.')
flags.DEFINE_float('regularizer_const', 1, 'Regularizer constant.')
flags.DEFINE_boolean('use_hessian', False, 'Whether to use Hessian.')
TEST_SIZE = 0.2
RANDOM_STATE = 1
LOSS = {'L2Loss': F.L2Loss, 'LogisticLoss': F.LogisticLoss}
def set_up_data(data_folder, dataset_name):
path = os.path.join(data_folder, dataset_name + '.txt')
data = sklearn.datasets.load_svmlight_file(gfile.Open(path, mode='rb'))
x = np.asarray(data[0].todense())
y = np.array(data[1])
return train_test_split(x, y, test_size=TEST_SIZE, random_state=RANDOM_STATE)
def save_output(output_dict, name, params):
dir = os.path.join(FLAGS.output_dir, 'output')
if not gfile.Exists(dir):
gfile.MakeDirs(dir)
matfile_path = dir + '/{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}_max_depth_{:s}.mat'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
str(params.max_depth).replace('.', ''),
)
scipy.io.savemat(gfile.Open(matfile_path, mode='wb'), mdict=output_dict)
return 0
def plot_figures(output_dict, name, params):
"""Plots the figure from the output."""
figure_dir = os.path.join(FLAGS.output_dir, 'figures')
if not gfile.Exists(figure_dir):
gfile.MakeDirs(figure_dir)
fig = plt.figure()
plt.plot(output_dict['gbt_train_losses'], label='gbt')
plt.plot(output_dict['agbt_b_train_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'/train_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_test_losses'], label='gbt')
plt.plot(output_dict['agbt_b_test_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'test_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_train_losses'], label='gbt')
plt.plot(output_dict['agbt_b_train_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.yscale('log')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'log_train_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_test_losses'], label='gbt')
plt.plot(output_dict['agbt_b_test_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.yscale('log')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'log_test_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
def main(argv):
del argv
if FLAGS.data_folder is None:
raise ValueError('Directory with downloaded datasets must be provided.')
if FLAGS.dataset_name == 'all_datasets':
names = ['a1a', 'w1a', 'housing']
else:
names = [FLAGS.dataset_name]
if FLAGS.output_dir is None:
raise ValueError('Output directory must be provided.')
for name in names:
x_train, x_test, y_train, y_test = set_up_data(FLAGS.data_folder, name)
train_data = Dataset(x_train, y_train)
test_data = Dataset(x_test, y_test)
gbt_params = collections.namedtuple('gbt_params', [
'regularizer_const', 'min_split_gain', 'max_depth', 'learning_rate',
'num_trees', 'early_stopping_rounds', 'loss', 'use_hessian',
'z_shrinkage_parameter'
])
params = gbt_params(
regularizer_const=FLAGS.regularizer_const,
min_split_gain=FLAGS.min_split_gain,
max_depth=FLAGS.max_depth,
learning_rate=FLAGS.learning_rate,
num_trees=FLAGS.num_trees,
early_stopping_rounds=FLAGS.early_stopping_rounds,
loss=FLAGS.loss,
use_hessian=FLAGS.use_hessian,
z_shrinkage_parameter=FLAGS.z_shrinkage_parameter)
gbt_method = GBT(params)
gbt_train_losses, gbt_test_losses = (
gbt_method.train(train_data, valid_set=test_data))
agbt_b_method = AGBTB(params)
agbt_b_train_losses, agbt_b_test_losses = (
agbt_b_method.train(train_data, valid_set=test_data))
params = params._replace(z_shrinkage_parameter=0.5)
agbt_method_1 = AGBT(params)
agbt_train_losses_1, agbt_test_losses_1 = (
agbt_method_1.train(train_data, valid_set=test_data))
params = params._replace(z_shrinkage_parameter=0.3)
agbt_method_2 = AGBT(params)
agbt_train_losses_2, agbt_test_losses_2 = (
agbt_method_2.train(train_data, valid_set=test_data))
params = params._replace(z_shrinkage_parameter=0.1)
agbt_method_3 = AGBT(params)
agbt_train_losses_3, agbt_test_losses_3 = (
agbt_method_3.train(train_data, valid_set=test_data))
output_dict = {
'gbt_train_losses': gbt_train_losses,
'gbt_test_losses': gbt_test_losses,
'agbt_b_train_losses': agbt_b_train_losses,
'agbt_b_test_losses': agbt_b_test_losses,
'agbt_train_losses_1': agbt_train_losses_1,
'agbt_test_losses_1': agbt_test_losses_1,
'agbt_train_losses_2': agbt_train_losses_2,
'agbt_test_losses_2': agbt_test_losses_2,
'agbt_train_losses_3': agbt_train_losses_3,
'agbt_test_losses_3': agbt_test_losses_3
}
save_output(output_dict, name, params)
plot_figures(output_dict, name, params)
if __name__ == '__main__':
app.run(main)
|
# Copyright 2020 The Google Authors. All Rights Reserved.
#
# Licensed under the MIT License (the "License");
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""GBT is a simple implementation of gradient boosted tree."""
from __future__ import absolute_import
from __future__ import division
import sys
import time
import numpy as np
import tree as Tree
NUM_QUANTILE = 100
np.random.seed(0)
LARGE_NUMBER = sys.maxsize
class GBT(Tree.BoostedTrees):
"""Simple implementation of Gradient Boosted Trees.
Typical usage example:
method = GBT(params)
method.train(train_data, valid_set=test_data)
"""
def train(self, train_set, valid_set=None, early_stopping_rounds=5):
tree_ensemble = Tree.TreeEnsemble()
learning_rate = self.params.learning_rate
best_iteration = 0
best_val_loss = LARGE_NUMBER
train_start_time = time.time()
train_losses = np.array([])
train_output = np.zeros(len(train_set.y))
if valid_set:
val_losses = np.array([])
val_output = np.zeros(len(valid_set.y))
for iter_cnt in range(self.params.num_trees):
iter_start_time = time.time()
grad, hessian = self._calc_gradient_and_hessian(train_set, train_output)
learner = self._build_learner(train_set, grad, hessian)
tree_ensemble.append(learner, learning_rate)
train_output = self._update_output(train_set, learner, learning_rate,
train_output)
train_loss = self._calc_loss_from_output(train_set, train_output)
train_losses = np.append(train_losses, train_loss)
if valid_set:
val_output = self._update_output(valid_set, learner, learning_rate,
val_output)
val_loss = self._calc_loss_from_output(valid_set, val_output)
val_losses = np.append(val_losses, val_loss)
val_loss_str = '{:.10f}'.format(val_loss) if val_loss else '-'
else:
val_loss_str = ''
print(
"Iter {:>3}, Train's loss: {:.10f}, Valid's loss: {}, Elapsed: {:.2f} secs"
.format(iter_cnt, train_loss, val_loss_str,
time.time() - iter_start_time))
if valid_set and val_loss is not None and val_loss < best_val_loss:
best_val_loss = val_loss
best_iteration = iter_cnt
self.tree_ensemble = tree_ensemble
self.best_iteration = best_iteration
print('Training finished. Elapsed: {:.2f} secs'.format(time.time() -
train_start_time))
if valid_set:
return train_losses, val_losses
else:
return train_losses
def predict(self, x, tree_ensemble=None, num_iteration=None):
if tree_ensemble is None:
tree_ensemble = self.tree_ensemble
return tree_ensemble.predict(x)
|
# Copyright 2020 The Google Authors. All Rights Reserved.
#
# Licensed under the MIT License (the "License");
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""AGBTB implements the method proposed in https://arxiv.org/pdf/1803.02042.pdf.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import sys
import time
import numpy as np
import tree as Tree
NUM_QUANTILE = 100
LARGE_NUMBER = sys.maxsize
np.random.seed(0)
np.set_printoptions(threshold=np.inf)
def combine_f_with_h(ensemble_f, ensemble_h):
assert len(ensemble_f) == 2 * len(ensemble_h) - 1
new_models = copy.copy(ensemble_f.models)
new_models.append(ensemble_h.models[-1])
new_coefficients = np.append(ensemble_f.coefficients, 0)
new_coefficients[1:len(new_coefficients):2] = (
new_coefficients[1:len(new_coefficients):2] + ensemble_h.coefficients)
return Tree.TreeEnsemble(new_models, new_coefficients)
class AGBTB(Tree.BoostedTrees):
"""Accelerated Gradient Boosted Trees in previous paper.
See https://arxiv.org/pdf/1803.02042.pdf for details.
Typical usage example:
method = AGBTB(params)
method.train(train_data, valid_set=test_data)
"""
def train(self, train_set, valid_set=None, early_stopping_rounds=5):
ensemble_f = Tree.TreeEnsemble()
ensemble_g = Tree.TreeEnsemble()
ensemble_h = Tree.TreeEnsemble()
learning_rate = self.params.learning_rate
best_iteration = 0
best_val_loss = LARGE_NUMBER
train_start_time = time.time()
train_losses = np.array([])
val_losses = np.array([])
train_f_output = np.zeros(len(train_set.y))
train_g_output = np.zeros(len(train_set.y))
train_h_output = np.zeros(len(train_set.y))
if valid_set:
val_f_output = np.zeros(len(valid_set.y))
val_g_output = np.zeros(len(valid_set.y))
val_h_output = np.zeros(len(valid_set.y))
for iter_cnt in range(self.params.num_trees):
iter_start_time = time.time()
theta = 2/(iter_cnt+2)
if ensemble_f.models:
ensemble_g = combine_f_with_h((1-theta) * ensemble_f,
theta * ensemble_h)
train_g_output = (1-theta) * train_f_output + theta * train_h_output
grad, hessian = self._calc_gradient_and_hessian(train_set, train_g_output)
learner_f = self._build_learner(train_set, grad, hessian)
ensemble_f = ensemble_g.add(learner_f, learning_rate)
ensemble_h.append(learner_f, 1 / theta * learning_rate)
train_f_output = self._update_output(
train_set, learner_f, learning_rate, train_g_output)
train_h_output = self._update_output(
train_set, learner_f, 1 / theta * learning_rate, train_h_output)
train_loss = self._calc_loss_from_output(train_set, train_f_output)
train_losses = np.append(train_losses, train_loss)
if valid_set:
val_g_output = (1-theta) * val_f_output + theta * val_h_output
val_f_output = self._update_output(
valid_set, learner_f, learning_rate, val_g_output)
val_h_output = self._update_output(
valid_set, learner_f, 1 / theta * learning_rate, val_h_output)
val_loss = self._calc_loss_from_output(valid_set, val_f_output)
val_losses = np.append(val_losses, val_loss)
val_loss_str = '{:.10f}'.format(val_loss) if val_loss else '-'
else:
val_loss_str = ''
print(
"Iter {:>3}, Train's loss: {:.10f}, Valid's loss: {}, Elapsed: {:.2f} secs"
.format(iter_cnt, train_loss, val_loss_str,
time.time() - iter_start_time))
if valid_set and val_loss is not None and val_loss < best_val_loss:
best_val_loss = val_loss
best_iteration = iter_cnt
if iter_cnt - best_iteration >= self.params.early_stopping_rounds:
print('Early stopping, best iteration is:')
print("Iter {:>3}, Train's loss: {:.10f}".format(
best_iteration, best_val_loss))
break
self.tree_ensemble = ensemble_f
self.best_iteration = best_iteration
print('Training finished. Elapsed: {:.2f} secs'.
format(time.time() - train_start_time))
if valid_set:
return train_losses, val_losses
else:
return train_losses
def predict(self, x, ensemble=None, num_iteration=None):
if ensemble is None:
ensemble = self.tree_ensemble
return ensemble.predict(x)
|
# Copyright 2020 The Google Authors. All Rights Reserved.
#
# Licensed under the MIT License (the "License");
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""Run vanilla AGBM, save the results and plot the figures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import numpy as np
import scipy.io
import sklearn.datasets
from sklearn.model_selection import train_test_split
from agbt_b import AGBTB
import functional as F
from tree import Dataset
from tensorflow.python.platform import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string('data_folder', None, 'The directory of datasets.')
flags.DEFINE_enum('dataset_name', 'all_datasets', [
'all_datasets', 'a1a', 'w1a', 'housing', 'w8a', 'a9a', 'colon', 'Year',
'rcv1'
], ('The name of instances.'
'`all_datasets` means all of the instances in the folder.'))
flags.DEFINE_enum('loss', 'L2Loss', ['L2Loss', 'LogisticLoss'],
'The loss function.')
flags.DEFINE_integer(
'early_stopping_rounds', 100000,
('Stop the algorithm if the validation loss does not improve after this'
'number of iterations.'))
flags.DEFINE_float(
'z_shrinkage_parameter', 0.1,
'The shrinkage parameter in the z-update in accelerated method.')
flags.DEFINE_string('output_dir', None,
'The directory where output will be written.')
flags.DEFINE_integer('max_depth', 4, 'Maximal depth of a tree.')
flags.DEFINE_integer('num_trees', 20, 'Number of boosting iterations.')
flags.DEFINE_float('min_split_gain', 0.01, 'Minimal gain for splitting a leaf.')
flags.DEFINE_float('learning_rate', 0.3, 'Learning rate.')
flags.DEFINE_float('regularizer_const', 1, 'Regularizer constant.')
flags.DEFINE_boolean('use_hessian', False, 'Whether to use Hessian.')
TEST_SIZE = 0.2
RANDOM_STATE = 1
LOSS = {'L2Loss': F.L2Loss, 'LogisticLoss': F.LogisticLoss}
def set_up_data(data_folder, dataset_name):
path = os.path.join(data_folder, dataset_name + '.txt')
data = sklearn.datasets.load_svmlight_file(gfile.Open(path, mode='rb'))
x = np.asarray(data[0].todense())
y = np.array(data[1])
return train_test_split(x, y, test_size=TEST_SIZE, random_state=RANDOM_STATE)
def save_output(output_dict, name, params):
dir = os.path.join(FLAGS.output_dir, 'output')
if not gfile.Exists(dir):
gfile.MakeDirs(dir)
matfile_path = dir + '/VAGBM_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}.mat'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
)
scipy.io.savemat(gfile.Open(matfile_path, 'wb'), mdict=output_dict)
return 0
def plot_figures(output_dict, name, params):
"""Plots the figure from the output."""
figure_dir = os.path.join(FLAGS.output_dir, 'figures')
if not gfile.Exists(figure_dir):
gfile.MakeDirs(figure_dir)
fig = plt.figure()
plt.plot(output_dict['gbt_train_losses'], label='gbt')
plt.plot(output_dict['agbt_b_train_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'train_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_test_losses'], label='gbt')
plt.plot(output_dict['agbt_b_test_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'test_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_train_losses'], label='gbt')
plt.plot(output_dict['agbt_b_train_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.yscale('log')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'log_train_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
fig = plt.figure()
plt.plot(output_dict['gbt_test_losses'], label='gbt')
plt.plot(output_dict['agbt_b_test_losses'], label='agbt_b')
plt.plot(output_dict['agbt_train_losses_1'], label='agbt1')
plt.plot(output_dict['agbt_train_losses_2'], label='agbt01')
plt.plot(output_dict['agbt_train_losses_3'], label='agbt001')
plt.yscale('log')
plt.legend()
fig.savefig(
gfile.Open(
figure_dir +
'log_test_{:s}_lr_{:s}_min_split_gain_{:s}_num_trees_{:s}'.format(
name,
str(params.learning_rate).replace('.', ''),
str(params.min_split_gain).replace('.', ''),
str(params.num_trees).replace('.', ''),
), 'wb'))
def main(argv):
del argv
if FLAGS.data_folder is None:
raise ValueError('Directory with downloaded datasets must be provided.')
if FLAGS.dataset_name == 'all_datasets':
names = ['a1a', 'w1a', 'housing']
else:
names = [FLAGS.dataset_name]
if FLAGS.output_dir is None:
raise ValueError('Output directory must be provided.')
for name in names:
x_train, x_test, y_train, y_test = set_up_data(FLAGS.data_folder, name)
train_data = Dataset(x_train, y_train)
test_data = Dataset(x_test, y_test)
GBTParams = collections.namedtuple('GBTParams', [
'regularizer_const', 'min_split_gain', 'max_depth', 'learning_rate',
'num_trees', 'early_stopping_rounds', 'loss', 'use_hessian',
'z_shrinkage_parameter'
])
params = GBTParams(
regularizer_const=FLAGS.regularizer_const,
min_split_gain=FLAGS.min_split_gain,
max_depth=FLAGS.max_depth,
learning_rate=FLAGS.learning_rate,
num_trees=FLAGS.num_trees,
early_stopping_rounds=FLAGS.early_stopping_rounds,
loss=FLAGS.loss,
use_hessian=FLAGS.use_hessian,
z_shrinkage_parameter=FLAGS.z_shrinkage_parameter)
params = params._replace(learning_rate=1)
agbt_b_method_1 = AGBTB(params)
agbt_b_train_losses_1, agbt_b_test_losses_1 = (
agbt_b_method_1.train(train_data, valid_set=test_data))
for i in range(len(agbt_b_train_losses_1)):
if agbt_b_train_losses_1[i] > 1e8:
agbt_b_train_losses_1[i] = 1e8
if agbt_b_test_losses_1[i] > 1e8:
agbt_b_test_losses_1[i] = 1e8
params = params._replace(learning_rate=0.1)
agbt_b_method_2 = AGBTB(params)
agbt_b_train_losses_2, agbt_b_test_losses_2 = (
agbt_b_method_2.train(train_data, valid_set=test_data))
params = params._replace(learning_rate=0.01)
agbt_b_method_3 = AGBTB(params)
agbt_b_train_losses_3, agbt_b_test_losses_3 = (
agbt_b_method_3.train(train_data, valid_set=test_data))
output_dict = {
'agbt_b_train_losses_1': agbt_b_train_losses_1,
'agbt_b_test_losses_1': agbt_b_test_losses_1,
'agbt_b_train_losses_2': agbt_b_train_losses_2,
'agbt_b_test_losses_2': agbt_b_test_losses_2,
'agbt_b_train_losses_3': agbt_b_train_losses_3,
'agbt_b_test_losses_3': agbt_b_test_losses_3
}
save_output(output_dict, name, params)
# plot_figures(output_dict, name, params)
if __name__ == '__main__':
app.run(main)
|
# Copyright 2020 The Google Authors. All Rights Reserved.
#
# Licensed under the MIT License (the "License");
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""Run tests with LIBSVM dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import app
from absl import flags
import numpy as np
import sklearn.datasets
from sklearn.model_selection import train_test_split
from agbt import AGBT
from agbt_b import AGBTB
import functional as F
from gbt import GBT
from tree import Dataset
from tensorflow.python.platform import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("data_folder", None, "The directory of datasets.")
flags.DEFINE_enum("dataset_name", "all_datasets",
["all_datasets", "a1a", "w1a", "housing"],
("The name of instances."
"`all_datasets` means all of the instances in the folder."))
flags.DEFINE_enum("loss", "L2Loss", ["L2Loss", "LogisticLoss"],
"The loss function.")
flags.DEFINE_enum(
"method", "AGBT", ["GBT", "AGBT", "AGBTB"],
("The method to use. GBT is the standard gradient boosted tree. AGBT is our"
"proposed method and AGBTB is the method proposed by Biau et al."))
flags.DEFINE_integer(
"early_stopping_rounds", 100000,
("Stop the algorithm if the validation loss does not improve after this"
"number of iterations."))
flags.DEFINE_float(
"z_shrinkage_parameter", 0.1,
"The shrinkage parameter in the z-update in accelerated method.")
flags.DEFINE_integer("max_depth", 3, "Maximal depth of a tree.")
flags.DEFINE_integer("num_trees", 20, "Number of boosting iterations.")
flags.DEFINE_float("min_split_gain", 0.1, "Minimal gain for splitting a leaf.")
flags.DEFINE_float("learning_rate", 0.3, "Learning rate.")
flags.DEFINE_float("regularizer_const", 1, "Regularizer constant.")
flags.DEFINE_boolean("use_hessian", False, "Whether to use Hessian.")
TEST_SIZE = 0.2
RANDOM_STATE = 40
LOSS = {"L2Loss": F.L2Loss, "LogisticLoss": F.LogisticLoss}
def SetupData(data_folder, dataset_name):
path = os.path.join(data_folder, dataset_name + ".txt")
data = sklearn.datasets.load_svmlight_file(gfile.Open(path, mode="rb"))
x = np.asarray(data[0].todense())
y = np.array(data[1])
return train_test_split(x, y, test_size=TEST_SIZE, random_state=RANDOM_STATE)
def main(argv):
del argv
if FLAGS.data_folder is None:
raise ValueError("Directory with downloaded datasets must be provided.")
if FLAGS.dataset_name == "all_datasets":
names = ["a1a", "w1a", "housing"]
else:
names = [FLAGS.dataset_name]
for name in names:
x_train, x_test, y_train, y_test = SetupData(FLAGS.data_folder, name)
train_data = Dataset(x_train, y_train)
test_data = Dataset(x_test, y_test)
GBTParams = collections.namedtuple("GBTParams", [
"regularizer_const", "min_split_gain", "max_depth", "learning_rate",
"num_trees", "early_stopping_rounds", "loss", "use_hessian",
"z_shrinkage_parameter"
])
params = GBTParams(
regularizer_const=FLAGS.regularizer_const,
min_split_gain=FLAGS.min_split_gain,
max_depth=FLAGS.max_depth,
learning_rate=FLAGS.learning_rate,
num_trees=FLAGS.num_trees,
early_stopping_rounds=FLAGS.early_stopping_rounds,
loss=FLAGS.loss,
use_hessian=FLAGS.use_hessian,
z_shrinkage_parameter=FLAGS.z_shrinkage_parameter)
if FLAGS.method == "GBT":
print("Start training using GBT...")
method = GBT(params)
elif FLAGS.method == "AGBT":
print("Start training using AGBT...")
method = AGBT(params)
elif FLAGS.method == "AGBTB":
print("Start training using AGBTB...")
method = AGBTB(params)
method.train(train_data, valid_set=test_data)
print("Start predicting...")
y_pred = []
for x in x_test:
y_pred.append(method.predict(x, num_iteration=method.best_iteration))
if params.loss == "L2Loss":
loss = F.L2Loss(params.use_hessian)
elif params.loss == "LogisticLoss":
loss = F.LogisticLoss(params.use_hessian)
print("The mean loss of prediction is:",
np.mean(loss.loss_value(np.array(y_pred), np.array(y_test))))
if __name__ == "__main__":
app.run(main)
|
# Copyright 2020 The Google Authors. All Rights Reserved.
#
# Licensed under the MIT License (the "License");
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""Script to run cross validation experiments and plot the results."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import app
from absl import flags
import numpy as np
from scipy.stats import randint as sp_randint
import sklearn.datasets
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
from agbt import AGBT
from agbt_b import AGBTB
import functional as F
from gbt import GBT
from tree import Dataset
from tensorflow.python.platform import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("data_folder", None, "The directory of datasets.")
flags.DEFINE_string("output_dir", None,
"The directory where output will be written.")
flags.DEFINE_enum("dataset_name", "housing", [
"all_datasets", "a1a", "w1a", "housing", "w8a", "a9a", "colon", "Year",
"rcv1", "german", "sonar", "eunite", "diabetes"
], ("The name of instances."
"`all_datasets` means all of the instances in the folder."))
flags.DEFINE_enum("loss", "L2Loss", ["L2Loss", "LogisticLoss"],
"The loss function.")
flags.DEFINE_enum(
"method", "AGBT", ["GBT", "AGBT", "AGBTB"],
("The method to use. GBT is the standard gradient boosted tree. AGBT is our"
"proposed method and AGBTB is the method proposed by Biau et al."))
flags.DEFINE_integer(
"early_stopping_rounds", 100000,
("Stop the algorithm if the validation loss does not improve after this"
"number of iterations."))
flags.DEFINE_integer("cv_random_state", 100,
("The random state for cross validation."))
flags.DEFINE_float(
"z_shrinkage_parameter", 0.1,
"The shrinkage parameter in the z-update in accelerated method.")
flags.DEFINE_integer("max_depth", 4, "Maximal depth of a tree.")
flags.DEFINE_integer("num_trees", 20, "Number of boosting iterations.")
flags.DEFINE_integer(
"num_iteration", 10,
"Number of iterations in random search of the parameters.")
flags.DEFINE_float("min_split_gain", 0.01, "Minimal gain for splitting a leaf.")
flags.DEFINE_float("learning_rate", 0.3, "Learning rate.")
flags.DEFINE_float("regularizer_const", 1, "Regularizer constant.")
flags.DEFINE_boolean("use_hessian", False, "Whether to use Hessian.")
TEST_SIZE = 0.2
RANDOM_STATE = 1
LOSS = {"L2Loss": F.L2Loss, "LogisticLoss": F.LogisticLoss}
def SetupData(data_folder, dataset_name):
path = os.path.join(data_folder, dataset_name + ".txt")
with gfile.Open(path, "rb") as f:
data = sklearn.datasets.load_svmlight_file(f)
x = np.asarray(data[0].todense())
y = np.array(data[1])
return train_test_split(x, y, test_size=TEST_SIZE, random_state=RANDOM_STATE)
def main(argv):
del argv
if FLAGS.output_dir is None:
raise ValueError("Output directory must be provided.")
if FLAGS.data_folder is None:
raise ValueError("Directory with downloaded datasets must be provided.")
if FLAGS.dataset_name == "all_datasets":
names = ["a1a", "w1a", "housing"]
else:
names = [FLAGS.dataset_name]
for name in names:
x_train, x_test, y_train, y_test = SetupData(FLAGS.data_folder, name)
train_data = Dataset(x_train, y_train)
GBTParams = collections.namedtuple("GBTParams", [
"regularizer_const", "min_split_gain", "max_depth", "learning_rate",
"num_trees", "early_stopping_rounds", "loss", "use_hessian",
"z_shrinkage_parameter"
])
params = GBTParams(
regularizer_const=FLAGS.regularizer_const,
min_split_gain=FLAGS.min_split_gain,
max_depth=FLAGS.max_depth,
learning_rate=FLAGS.learning_rate,
num_trees=FLAGS.num_trees,
early_stopping_rounds=FLAGS.early_stopping_rounds,
loss=FLAGS.loss,
use_hessian=FLAGS.use_hessian,
z_shrinkage_parameter=FLAGS.z_shrinkage_parameter)
param_dist = {"max_depth": [4], "min_split_gain": [0.1, 0.01, 0.001]}
if FLAGS.loss == "L2Loss":
param_dist["learning_rate"] = [0.01, 0.03, 0.1, 0.3, 1]
elif FLAGS.loss == "LogisticLoss":
param_dist["learning_rate"] = [0.04, 0.12, 0.4, 1.2, 4]
if FLAGS.method == "GBT":
print("Start training using GBT...")
method = GBT(params)
param_dist["num_trees"] = sp_randint(10, 360)
if FLAGS.method == "AGBT":
print("Start training using AGBT...")
method = AGBT(params)
param_dist["z_shrinkage_parameter"] = ([
0.01, 0.02, 0.03, 0.05, 0.1, 0.2, 0.3, 0.5, 1
])
param_dist["num_trees"] = sp_randint(5, 180)
if FLAGS.method == "AGBTB":
print("Start training using AGBTB...")
method = AGBTB(params)
param_dist["num_trees"] = sp_randint(10, 360)
random_search = RandomizedSearchCV(
method,
param_distributions=param_dist,
n_iter=FLAGS.num_iteration,
cv=5,
random_state=FLAGS.cv_random_state)
random_search.fit(train_data.x, train_data.y)
with gfile.Open(
FLAGS.output_dir + "/" + FLAGS.dataset_name + "_" + FLAGS.method + "_" +
str(FLAGS.cv_random_state) + ".txt", "w") as f:
f.write("The best parameter setting of {} is: {}.\n".format(
FLAGS.method, str(random_search.best_params_)))
f.write("The training error with the best {} model is: {:.10f}.\n".format(
FLAGS.method, -random_search.best_estimator_.score(x_train, y_train)))
f.write("The testing error with the best {} model is: {:.10f}".format(
FLAGS.method, -random_search.best_estimator_.score(x_test, y_test)))
print("The best parameter setting of {} is: {}.\n".format(
FLAGS.method, str(random_search.best_params_)))
print("The training error with the best {} model is: {:.10f}.\n".format(
FLAGS.method, -random_search.best_estimator_.score(x_train, y_train)))
print("The testing error with the best {} model is: {:.10f}".format(
FLAGS.method, -random_search.best_estimator_.score(x_test, y_test)))
if __name__ == "__main__":
app.run(main)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
import re
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
from typing_extensions import Literal
import utils as ut
@dataclass
class AdversarialAttackSettings:
epsilon: float
norm: ut.NormType
step_size: float
n_steps: int = 20
n_averages: int = 1
attack: Tuple[Literal["pgd", "kwta"]] = "pgd"
random_start: bool = True
def __repr__(self):
return (
f"{self.attack}_{self.norm}_{self.epsilon}_{self.step_size}_"
f"{self.n_steps}_{self.n_averages}_{self.random_start}"
)
@dataclass
class DecisionBoundaryBinarizationSettings:
epsilon: float
norm: ut.NormType
n_inner_points: int
n_boundary_points: int
adversarial_attack_settings: Optional[AdversarialAttackSettings]
n_boundary_adversarial_points: int = 0
n_far_off_boundary_points: int = 0
n_far_off_adversarial_points: int = 0
optimizer: str = "adam"
lr: float = 5e-2
class_weight: Optional[Union[Literal["balanced"], dict]] = None
def __repr__(self):
return (
f"{self.norm}_{self.epsilon}_{self.n_inner_points}_"
f"{self.n_boundary_points}_{self.n_far_off_boundary_points}_"
f"{self.adversarial_attack_settings}_{self.optimizer}_{self.lr}"
)
def __parse_structure_argument(
value,
argument_type: Union[Callable[[str], Any], type],
known_flags: Dict[str, Tuple[str, bool]],
argument_types: Dict[str, Callable],
):
"""
Recursively parses structured arguments encoded as a string.
Args:
argument_type: Class to store values in.
known_flags: Map between name and default value of flags.
argument_types: Map between argument names and argument constructors
for variables.
Returns:
Object created based on string.
"""
arguments = re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', value)
kwargs = {}
for argument in arguments:
parts = argument.split("=")
if len(parts) > 2:
parts = [parts[0], "=".join(parts[1:])]
if len(parts) != 2:
# argument is a flag
if argument not in known_flags:
raise argparse.ArgumentTypeError(
"invalid argument/unknown flag:", argument
)
else:
kwargs[known_flags[argument][0]] = known_flags[argument][1]
else:
key, value = parts
value = value.replace(r"\"", '"')
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
if key in argument_types:
kwargs[key] = argument_types[key](value)
else:
raise argparse.ArgumentTypeError(
f"invalid argument `{argument}` for type `{argument_type}`"
)
try:
return argument_type(**kwargs)
except Exception as ex:
raise argparse.ArgumentTypeError("Could not create type:", ex)
def parse_adversarial_attack_argument(value):
"""Parse a string defining a AdversarialAttackSettings object."""
return __parse_structure_argument(
value,
AdversarialAttackSettings,
{},
{
"norm": str,
"n_steps": int,
"epsilon": float,
"step_size": float,
"attack": str,
"n_averages": int,
"random_start": lambda x: x.lower() == "true",
},
)
def parse_classifier_argument(value):
"""Parse a string describing a classifier object."""
class_name = value.split(".")[-1]
module_path = ".".join(value.split(".")[:-1])
module = importlib.import_module(module_path)
return getattr(module, class_name)
def parse_decision_boundary_binarization_argument(value):
"""Parse a string defining a DecisionBoundaryBinarizationSettings object."""
return __parse_structure_argument(
value,
DecisionBoundaryBinarizationSettings,
{},
{
"norm": str,
"epsilon": float,
"n_boundary_points": int,
"n_inner_points": int,
"adversarial_attack_settings": lambda x: parse_adversarial_attack_argument(
x
),
"optimizer": str,
"lr": float,
"class_weight": str,
},
)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from typing import Any
from typing import Callable
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms.functional as torchvision_functional
from PIL import Image
from torch.nn import init
from jpeg import DifferentiableJPEG
from torchvision.models.resnet import resnet50
from torchvision.models.inception import inception_v3
class Lambda(nn.Module):
def __init__(self, function: Callable):
super().__init__()
self.function = function
def forward(self, x, **kwargs):
return self.function(x, **kwargs)
class InputNormalization(nn.Module):
def __init__(self, module: nn.Module, mean: torch.Tensor, std: torch.Tensor):
super().__init__()
self.module = module
self.register_buffer("mean", mean[..., None, None])
self.register_buffer("std", std[..., None, None])
def forward(self, x, *args, **kwargs):
return self.module(
torchvision_functional.normalize(x, self.mean, self.std, False), *args,
**kwargs)
class Detector(nn.Module):
def __init__(self, encoder: Optional[nn.Module] = None,
n_features_encoder: int = 0, classifier: Optional[nn.Module] = None,
n_features_classifier: int = 0, ):
super().__init__()
assert encoder is not None or classifier is not None
self.encoder = encoder
self.classifier = classifier
n_features = n_features_encoder + n_features_classifier
self.head = nn.Sequential(
nn.Linear(n_features, n_features * 4),
nn.ReLU(),
nn.Linear(n_features * 4, n_features * 4),
nn.ReLU(),
nn.Linear(n_features * 4, n_features * 4),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(n_features * 4, n_features),
nn.ReLU(),
nn.Linear(n_features, 2),
)
def train(self, mode: bool = True) -> nn.Module:
if self.encoder is not None:
self.encoder.train(mode)
self.head.train(mode)
self.training = mode
# keep classifier always in test mode
if self.classifier is not None:
self.classifier.train(False)
return self
def forward(self, x):
features = []
if self.encoder is not None:
features.append(self.encoder(x))
if self.classifier is not None:
features.append(self.classifier(x))
if len(features) > 1:
features = torch.cat(features, 1)
else:
features = features[0]
return self.head(features)
class ScaledLogitsModule(nn.Module):
def __init__(self, module: nn.Module, scale: float):
super().__init__()
self.module = module
self.scale = scale
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs) * self.scale
class GaussianNoiseInputModule(nn.Module):
def __init__(self, module: nn.Module, stddev: float):
super().__init__()
self.stddev = stddev
self.module = module
def forward(self, x, *args, **kwargs):
x = x + torch.randn_like(x) * self.stddev
return self.module(x, *args, **kwargs)
class __GaussianNoiseGradientFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, stddev):
ctx.intermediate_results = stddev
return input
@staticmethod
def backward(ctx, grad_output):
stddev = ctx.intermediate_results
grad_input = grad_output + torch.randn_like(grad_output) * stddev
return grad_input, None
gaussian_noise_gradient = __GaussianNoiseGradientFunction.apply
class GaussianNoiseGradientModule(nn.Module):
def __init__(self, module: nn.Module, stddev: float):
super().__init__()
self.module = module
self.stddev = stddev
def forward(self, x, *args, **kwargs):
return gaussian_noise_gradient(self.module(x, *args, **kwargs), self.stddev)
class __JPEGForwardIdentityBackwardFunction(torch.autograd.Function):
@staticmethod
def forward(ctx: Any, input: torch.Tensor, quality: int) -> torch.Tensor:
res = []
for x in input.permute(0, 2, 3, 1).detach().cpu().numpy():
output = BytesIO()
x = (np.clip(x, 0, 1) * 255).astype(np.uint8)
Image.fromarray(x).save(output, 'JPEG', quality=quality)
x = Image.open(output)
res.append(np.array(x).transpose(2, 0, 1) / 255.0)
res = torch.Tensor(np.array(res)).to(input.device)
return res
@staticmethod
def backward(ctx: Any, grad_output: torch.Tensor) -> torch.Tensor:
return grad_output, None
jpeg_forward_identity_backward = __JPEGForwardIdentityBackwardFunction.apply
class __LambdaForwardIdentityBackward(torch.autograd.Function):
@staticmethod
def forward(ctx: Any, input: torch.Tensor,
function: Callable) -> torch.Tensor:
return function(input)
@staticmethod
def backward(ctx: Any, grad_output: torch.Tensor) -> torch.Tensor:
return grad_output, None, None
lambda_forward_identity_backward = __LambdaForwardIdentityBackward.apply
class JPEGForwardIdentityBackwardModule(nn.Module):
def __init__(self, module: nn.Module, quality: int, size: int, legacy=False):
super().__init__()
self.module = module
if legacy:
self.jpeg = lambda x: jpeg_forward_identity_backward(x, quality)
else:
self.jpeg_module = DifferentiableJPEG(size, size, True, quality=quality)
self.jpeg = lambda x: lambda_forward_identity_backward(x,
self.jpeg_module)
def forward(self, x, *args, **kwargs):
return self.module(self.jpeg(x), *args, **kwargs)
class DifferentiableJPEGModule(nn.Module):
def __init__(self, module: nn.Module, quality: int, size: int):
super().__init__()
self.module = module
self.jpeg = DifferentiableJPEG(size, size, True, quality=quality)
def forward(self, x, *args, **kwargs):
return self.module(self.jpeg(x), *args, **kwargs)
class __GausianBlurForwardIdentityBackwardFunction(torch.autograd.Function):
@staticmethod
def forward(ctx: Any, input: torch.Tensor, kernel_size: int,
stddev: float) -> torch.Tensor:
return torchvision_functional.gaussian_blur(input, kernel_size, stddev)
@staticmethod
def backward(ctx: Any, grad_output: torch.Tensor) -> torch.Tensor:
return grad_output, None, None
gaussian_blur_forward_identity_backward = __GausianBlurForwardIdentityBackwardFunction.apply
class GausianBlurForwardIdentityBackwardModule(nn.Module):
def __init__(self, module: nn.Module, kernel_size: int, stddev: float):
super().__init__()
self.module = module
self.kernel_size = kernel_size
self.stddev = stddev
def forward(self, x, *args, **kwargs):
return self.module(
gaussian_blur_forward_identity_backward(x, self.kernel_size,
self.stddev), *args, **kwargs)
class __UniversalSingularValueThresholding(torch.autograd.Function):
"""Universal Singular Value Thresholding (USVT) """
@staticmethod
def forward(ctx: Any, input: torch.Tensor, me_channel_concat: bool = True,
maskp: float = 0.5, svdprob: float = 0.8):
device = input.device
batch_num, c, h, w = input.size()
output = torch.zeros_like(input).cpu().numpy()
for i in range(batch_num):
img = (input[i] * 2 - 1).cpu().numpy()
if me_channel_concat:
img = np.concatenate((np.concatenate((img[0], img[1]), axis=1), img[2]),
axis=1)
mask = np.random.binomial(1, maskp, h * w * c).reshape(h, w * c)
p_obs = len(mask[mask == 1]) / (h * w * c)
if svdprob is not None:
u, sigma, v = np.linalg.svd(img * mask)
S = np.zeros((h, w))
for j in range(int(svdprob * h)):
S[j][j] = sigma[j]
S = np.concatenate((S, np.zeros((h, w * 2))), axis=1)
W = np.dot(np.dot(u, S), v) / p_obs
W[W < -1] = -1
W[W > 1] = 1
est_matrix = (W + 1) / 2
for channel in range(c):
output[i, channel] = est_matrix[:, channel * h:(channel + 1) * h]
else:
est_matrix = ((img * mask) + 1) / 2
for channel in range(c):
output[i, channel] = est_matrix[:, channel * h:(channel + 1) * h]
else:
mask = np.random.binomial(1, maskp, h * w).reshape(h, w)
p_obs = len(mask[mask == 1]) / (h * w)
for channel in range(c):
u, sigma, v = np.linalg.svd(img[channel] * mask)
S = np.zeros((h, w))
for j in range(int(svdprob * h)):
S[j][j] = sigma[j]
W = np.dot(np.dot(u, S), v) / p_obs
W[W < -1] = -1
W[W > 1] = 1
output[i, channel] = (W + 1) / 2
output = torch.from_numpy(output).float().to(device)
return output
@staticmethod
def backward(ctx: Any, grad_output: torch.Tensor):
return grad_output, None, None, None
universal_singular_value_thresholding = __UniversalSingularValueThresholding.apply
class UVSTModule(nn.Module):
"""Apply Universal Singular Value Thresholding as suggested in ME-Net:
Chatterjee, S. et al. Matrix estimation by universal singular value thresholding. 2015."""
def __init__(self, module: nn.Module, me_channel_concat: bool = True,
maskp: float = 0.5, svdprob: float = 0.8):
super().__init__()
self.module = module
self.me_channel_concat = me_channel_concat
self.maskp = maskp
self.svdprob = svdprob
def forward(self, x, *args, **kwargs):
x = universal_singular_value_thresholding(x, self.me_channel_concat,
self.maskp, self.svdprob)
return self.module(x, *args, **kwargs)
class _ThermometerEncodingFunction(torch.autograd.Function):
@staticmethod
def forward(ctx: Any, input: torch.Tensor, l: int) -> torch.Tensor:
ctx.intermediate_results = input, l
return _ThermometerEncodingFunction.tau(input, l)
@staticmethod
def tau_hat(x, l):
x_hat = torch.unsqueeze(x, 2)
k = torch.arange(l, dtype=x.dtype, device=x.device)
k = k.view((1, 1, -1, 1, 1))
y = torch.minimum(torch.maximum(x_hat - k / l, torch.zeros_like(x_hat)),
torch.ones_like(x_hat))
shape = list(x.shape)
shape[1] = -1
y = y.view(shape)
return y
@staticmethod
def tau(x, l):
return torch.ceil(_ThermometerEncodingFunction.tau_hat(x, l))
@staticmethod
def backward(ctx: Any, grad_output: torch.Tensor) -> torch.Tensor:
input, l = ctx.intermediate_results
with torch.enable_grad():
value_input = _ThermometerEncodingFunction.tau_hat(input.requires_grad_(),
l)
grad_output = torch.autograd.grad(
(value_input,), (input,), (grad_output,))[0].detach()
return grad_output, None
thermometer_encoding = _ThermometerEncodingFunction.apply
class ThermometerEncodingModule(nn.Module):
def __init__(self, l: int, differentiable: bool):
super().__init__()
self._l = l
self.differentaible = differentiable
if differentiable:
self.apply_fn = lambda x: thermometer_encoding(x, l)
else:
# TODO
# self.apply_fn = lambda y: lambda_forward_identity_backward(
# y, lambda x: thermometer_encoding(x, l))
self.apply_fn = lambda x: thermometer_encoding(x, l)
@property
def l(self):
return self._l
def forward(self, x):
if self.differentaible:
with torch.no_grad():
return self.apply_fn(x)
else:
return self.apply_fn(x)
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class _CifarResNetBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(_CifarResNetBasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class _CifarResNetBottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x, fake_relu=False):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
return F.relu(out)
class _CifarResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, n_input_channels=3):
super(_CifarResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(n_input_channels, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, features_only=False, features_and_logits=False):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
if features_and_logits:
return out, self.linear(out)
if not features_only:
out = self.linear(out)
return out
def cifar_resnet18(num_classes=10):
"""Resnet18 architecture adapted for small resolutions
Taken from https://github.com/kuangliu/pytorch-cifar"""
return _CifarResNet(_CifarResNetBasicBlock, [2, 2, 2, 2],
num_classes=num_classes)
def cifar_resnet50(num_classes=10):
"""Resnet50 architecture adapted for small resolutions
Taken from https://github.com/kuangliu/pytorch-cifar"""
return _CifarResNet(_CifarResNetBottleneck, [3, 4, 6, 3],
num_classes=num_classes)
class _ThermometerCifarResNet(nn.Module):
def __init__(self, num_classes: int, l: int, differentiable: bool):
super().__init__()
self.encoder = ThermometerEncodingModule(l, differentiable)
self.model = _CifarResNet(_CifarResNetBasicBlock, [2, 2, 2, 2],
num_classes=num_classes, n_input_channels=l * 3)
@property
def l(self):
return self.encoder.l
def forward(self, x, features_only: bool = False, skip_encoder: bool = False):
if not skip_encoder:
x = self.encoder(x)
return self.model(x, features_only)
# Taken from https://github.com/meliketoy/wide-resnet.pytorch
class WideResNetBasicBlock(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(WideResNetBasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1,
bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
# Taken from https://github.com/meliketoy/wide-resnet.pytorch
class WideResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes=10,
n_input_channels=3):
super(WideResNet, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'WideResNet depth should be 6n+4'
n = (depth - 4) / 6
k = widen_factor
nStages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = WideResNet.conv3x3(n_input_channels, nStages[0])
self.layer1 = self._wide_layer(WideResNetBasicBlock, nStages[1], n,
dropout_rate, stride=1)
self.layer2 = self._wide_layer(WideResNetBasicBlock, nStages[2], n,
dropout_rate, stride=2)
self.layer3 = self._wide_layer(WideResNetBasicBlock, nStages[3], n,
dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
# initialize weights
self.apply(WideResNet.__conv_init)
@staticmethod
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
@staticmethod
def __conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1] * (int(num_blocks) - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x, features_only: bool = False):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
if not features_only:
out = self.linear(out)
return out
class _ThermometerCifarWideResNet344(nn.Module):
def __init__(self, num_classes: int, l: int, differentiable: bool):
super().__init__()
self.encoder = ThermometerEncodingModule(l, differentiable)
self.model = WideResNet(depth=34, widen_factor=4, dropout_rate=0.3,
num_classes=num_classes, n_input_channels=l * 3)
@property
def l(self):
return self.encoder.l
def forward(self, x, features_only: bool = False, skip_encoder: bool = False):
if not skip_encoder:
x = self.encoder(x)
return self.model(x, features_only)
def thermometer_encoding_cifar_resnet18(num_classes=10, l=10,
differentiable=True):
"""Resnet18 architecture adapted for small resolutions
Taken from https://github.com/kuangliu/pytorch-cifar"""
return _ThermometerCifarResNet(num_classes=num_classes, l=l,
differentiable=differentiable)
def thermometer_encoding_cifar_wideresnet344(num_classes=10, l=10,
differentiable=True):
"""WideResnet architecture.
Taken from https://github.com/meliketoy/wide-resnet.pytorch"""
return _ThermometerCifarWideResNet344(num_classes=num_classes, l=l,
differentiable=differentiable)
def non_differentiable_10_thermometer_encoding_cifar_resnet18(num_classes=10):
return thermometer_encoding_cifar_resnet18(num_classes=num_classes,
l=10, differentiable=False)
def differentiable_10_thermometer_encoding_cifar_resnet18(num_classes=10):
return thermometer_encoding_cifar_resnet18(num_classes=num_classes,
l=10, differentiable=True)
def non_differentiable_16_thermometer_encoding_cifar_resnet18(num_classes=10):
return thermometer_encoding_cifar_resnet18(num_classes=num_classes,
l=16, differentiable=False)
def differentiable_16_thermometer_encoding_cifar_resnet18(num_classes=10):
return thermometer_encoding_cifar_resnet18(num_classes=num_classes,
l=16, differentiable=True)
def non_differentiable_16_thermometer_encoding_cifar_wideresnet344(
num_classes=10):
return thermometer_encoding_cifar_wideresnet344(num_classes=num_classes,
l=16, differentiable=False)
def differentiable_16_thermometer_encoding_cifar_wideresnet344(num_classes=10):
return thermometer_encoding_cifar_wideresnet344(num_classes=num_classes,
l=16, differentiable=True)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import torch
import torch.utils.data
from typing_extensions import Literal
NormType = Union[Literal["linf"], Literal["l2"], Literal["l1"]]
LabelRandomization = Tuple[Literal["random"], Literal["systematically"], Literal[None]]
def clipping_aware_rescaling_l2_torch(
x0: torch.Tensor, delta: torch.Tensor, target_l2: Union[float, torch.Tensor]
):
"""Rescale delta such that it exactly lies target_l2 away in l2 from x0 after clipping.
Adapted from https://github.com/jonasrauber/clipping-aware-rescaling/.
Args:
x0: Tensor containing the base samples.
delta: Tensor containing the perturbations to add to x0.
target_l2: Target l2 distance.
Returns:
Tensor containing required rescaling factors.
"""
N = x0.shape[0]
assert delta.shape[0] == N
delta2 = delta.pow(2).reshape((N, -1))
space = torch.where(delta >= 0, 1 - x0, x0).reshape((N, -1)).type(delta.dtype)
f2 = space.pow(2) / torch.max(delta2, 1e-20 * torch.ones_like(delta2))
f2_sorted, ks = torch.sort(f2, dim=-1)
m = torch.cumsum(delta2.gather(dim=-1, index=ks.flip(dims=(1,))), dim=-1).flip(
dims=(1,)
)
dx = f2_sorted[:, 1:] - f2_sorted[:, :-1]
dx = torch.cat((f2_sorted[:, :1], dx), dim=-1)
dy = m * dx
y = torch.cumsum(dy, dim=-1)
if not issubclass(type(target_l2), torch.Tensor):
target_l2 = torch.ones(len(x0)).to(x0.device) * target_l2
assert len(target_l2) == len(
x0
), f"Inconsistent length of `target_l2`. Must have length {len(x0)}."
assert len(target_l2.shape) == 1, "Inconsistent shape of `target_l2` (must be 1D)."
target_l2 = target_l2.view((-1, 1, 1, 1)).expand(*x0.shape)
target_l2 = target_l2.view(len(target_l2), -1)
target_l2 = target_l2.type(delta.dtype)
c = y >= target_l2**2
# work-around to get first nonzero element in each row
f = torch.arange(c.shape[-1], 0, -1, device=c.device)
v, j = torch.max(c.long() * f, dim=-1)
rows = torch.arange(0, N)
eps2 = f2_sorted[rows, j] - (y[rows, j] - target_l2[rows, j] ** 2) / m[rows, j]
# it can happen that for certain rows even the largest j is not large enough
# (i.e. v == 0), then we will just use it (without any correction) as it's
# the best we can do (this should also be the only cases where m[j] can be
# 0 and they are thus not a problem)
eps2 = torch.where(v == 0, f2_sorted[:, -1], eps2)
eps = torch.sqrt(eps2)
eps = eps.reshape((-1,) + (1,) * (len(x0.shape) - 1))
return eps
def clipping_aware_rescaling_l1_torch(
x0: torch.Tensor, delta: torch.Tensor, target_l1: Union[float, torch.Tensor]
):
"""Rescale delta such that it exactly lies target_l1 away in l1 from x0 after clipping.
Adapted from https://github.com/jonasrauber/clipping-aware-rescaling/.
Args:
x0: Tensor containing the base samples.
delta: Tensor containing the perturbations to add to x0.
target_l1: Target l1 distance.
Returns:
Tensor containing required rescaling factors.
"""
N = x0.shape[0]
assert delta.shape[0] == N
delta2 = delta.abs().reshape((N, -1))
space = torch.where(delta >= 0, 1 - x0, x0).reshape((N, -1)).type(delta.dtype)
f2 = space.abs() / torch.max(delta2, 1e-20 * torch.ones_like(delta2))
f2_sorted, ks = torch.sort(f2, dim=-1)
m = torch.cumsum(delta2.gather(dim=-1, index=ks.flip(dims=(1,))), dim=-1).flip(
dims=(1,)
)
dx = f2_sorted[:, 1:] - f2_sorted[:, :-1]
dx = torch.cat((f2_sorted[:, :1], dx), dim=-1)
dy = m * dx
y = torch.cumsum(dy, dim=-1)
# c = y >= target_l2
if not issubclass(type(target_l1), torch.Tensor):
target_l1 = torch.ones(len(x0)).to(x0.device) * target_l1
assert len(target_l1) == len(
x0
), f"Inconsistent length of `target_l2`. Must have length {len(x0)}."
assert len(target_l1.shape) == 1, "Inconsistent shape of `target_l2` (must be 1D)."
target_l1 = target_l1.view((-1, 1, 1, 1)).expand(*x0.shape)
target_l1 = target_l1.view(len(target_l1), -1)
target_l1 = target_l1.type(delta.dtype)
c = y >= target_l1
# Work-around to get first nonzero element in each row.
f = torch.arange(c.shape[-1], 0, -1, device=c.device)
v, j = torch.max(c.long() * f, dim=-1)
rows = torch.arange(0, N)
eps2 = f2_sorted[rows, j] - (y[rows, j] - target_l1[rows, j]) / m[rows, j]
# It can happen that for certain rows even the largest j is not large enough
# (i.e. v == 0), then we will just use it (without any correction) as it's
# the best we can do (this should also be the only cases where m[j] can be
# 0 and they are thus not a problem).
eps = torch.where(v == 0, f2_sorted[:, -1], eps2)
eps = eps.reshape((-1,) + (1,) * (len(x0.shape) - 1))
return eps
def clipping_aware_rescaling_linf_torch(
x0: torch.Tensor, delta: torch.Tensor, target_linf: Union[float, torch.Tensor]
):
"""Rescale delta such that it exactly lies target_linf away in l2inf from x0 after clipping.
Adapted from https://github.com/jonasrauber/clipping-aware-rescaling/.
Args:
x0: Tensor containing the base samples.
delta: Tensor containing the perturbations to add to x0.
target_linf: Target l2 distance.
Returns:
Tensor containing required rescaling factors.
"""
N = x0.shape[0]
assert delta.shape[0] == N
if not issubclass(type(target_linf), torch.Tensor):
target_linf = torch.ones(len(x0)).to(x0.device) * target_linf
assert len(target_linf) == len(
x0
), f"Inconsistent length of `target_linf`. Must have length {len(x0)}."
assert (
len(target_linf.shape) == 1
), "Inconsistent shape of `target_linf` (must be 1D)."
target_linf = target_linf.view((-1, 1, 1, 1)).expand(*x0.shape)
target_linf = target_linf.view(len(target_linf), -1)
target_linf = target_linf.type(delta.dtype)
delta2 = delta.abs().reshape((N, -1))
space = torch.where(delta >= 0, 1 - x0, x0).reshape((N, -1)).type(delta.dtype)
space_mask = space < target_linf
if torch.any(torch.all(space_mask, dim=-1)):
print("Not possible to rescale delta yield set Linf distance")
delta2[space_mask] = 0
delta2_sorted, _ = torch.sort(delta2, dim=-1, descending=True)
eps = target_linf[:, 0] / delta2_sorted[:, 0]
eps = eps.view(-1, 1, 1, 1)
return eps
def clipping_aware_rescaling(
x0: torch.Tensor,
delta: torch.Tensor,
target_distance: Union[float, torch.Tensor],
norm: NormType,
growing: bool = True,
shrinking: bool = True,
return_delta: bool = False,
):
"""Rescale delta such that it exactly lies target_distance away from x0 after clipping.
Adapted from https://github.com/jonasrauber/clipping-aware-rescaling/.
Args:
x0: Tensor containing the base samples.
delta: Tensor containing the perturbations to add to x0.
target_distance: Target distance.
norm: Norm for measuring the distance between x0 and delta.
growing: If True, delta is allowed to grow.
shrinking: If True, delta is allowed to shrink.
return_delta: Return rescaled delta in addition to x0
plus rescaled delta.
Returns:
If return_delta, Tuple of (x0 plus rescaled delta, rescaled delta), otherwise
only x0 plus rescaled delta.
"""
if norm == "linf":
eps = clipping_aware_rescaling_linf_torch(x0, delta, target_distance)
elif norm == "l2":
eps = clipping_aware_rescaling_l2_torch(x0, delta, target_distance)
elif norm == "l1":
eps = clipping_aware_rescaling_l1_torch(x0, delta, target_distance)
else:
raise ValueError("Invalid norm")
if not shrinking:
eps = torch.clamp_min(eps, 1.0)
if not growing:
eps = torch.clamp_max(eps, 1.0)
x = x0 + eps * delta
x = torch.clamp(x, 0, 1)
if return_delta:
return x, eps * delta
else:
return x
def normalize(x: torch.Tensor, norm: NormType):
"""Normalize data to have unit norm.
Args:
x: Data to normalize.
norm: Norm to use.
Returns:
Normalized x0.
"""
if norm == "linf":
x = torch.sign(x)
elif norm in ("l2", "l1"):
x /= torch.norm(x, p=1 if norm == "l1" else 2, keepdim=True, dim=(1, 2, 3))
else:
raise ValueError("Invalid norm:", norm)
return x
class RandomizeLabelsDataset(torch.utils.data.Dataset):
def __init__(
self,
base: torch.utils.data.Dataset,
mode: LabelRandomization,
label_map: Optional[Dict[int, int]] = None,
n_classes: int = 10,
):
if not n_classes > 0:
raise ValueError("n_classes must be > 0.")
if mode is None and label_map is None:
raise ValueError("If mode is None, label_map must not be None.")
if not mode in (None, "random", "systematically"):
raise ValueError("mode must be one of None, random, systematically.")
self.base = base
self.mode = mode
if label_map is None:
if mode == "random":
labels = np.random.randint(low=0, high=n_classes, size=len(base))
elif mode == "systematically":
labels = [
(a + b) % n_classes for a, b in enumerate(list(range(n_classes)))
]
random.shuffle(labels)
label_map = {i: labels[i] for i in range(len(labels))}
self.label_map = label_map
def __getitem__(self, item):
x, y = self.base[item]
if self.mode == "random":
y = self.label_map[item]
elif self.mode == "systematically":
y = self.label_map[y]
else:
raise ValueError()
return x, y
def __len__(self):
return len(self.base)
def __repr__(self):
return f"RandomizeLabelsDataset(base_dataset: {repr(self.base)}, mode: {self.mode})"
def build_dataloader_from_arrays(x: np.ndarray, y: np.ndarray, batch_size: int = 1):
"""Wrap two arrays in a dataset and data loader.
Args:
x: Array containing input data.
y: Array containing target data.
batch_size: Batch size of the newly created data loader.
Returns:
Dataloader based on x,y.
"""
x_tensor = torch.tensor(x, device="cpu", dtype=torch.float32)
y_tensor = torch.tensor(y, device="cpu", dtype=torch.long)
dataset = torch.utils.data.TensorDataset(x_tensor, y_tensor)
dataloader = torch.utils.data.DataLoader(dataset, batch_size)
return dataloader
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Makes print and tqdm work better together.
Based on the idea presented in https://stackoverflow.com/a/37243211
"""
import contextlib
import sys
import time
import warnings
import tqdm
from tqdm import tqdm
__all__ = ["tqdm_print"]
class __DummyFile(object):
file = None
def __init__(self, file):
self.file = file
def write(self, x):
# Avoid print() second call (useless \n)
if len(x.rstrip()) > 0:
with tqdm.external_write_mode():
tqdm.write(x, file=self.file)
def flush(self):
pass
@contextlib.contextmanager
def tqdm_print(include_warnings=True):
"""Makes sure printing text/showing warnings does not interrupt a
progressbar but just moves it to the bottom by wrapping stdout and
passing all write statements through tqdm.write."""
save_stdout = sys.stdout
sys.stdout = __DummyFile(sys.stdout)
if include_warnings:
def redirected_showwarning(message, category, filename, lineno,
file=sys.stdout, line=None):
if file is None:
file = sys.stdout
save_showwarning(message, category, filename, lineno, file, line)
save_showwarning = warnings.showwarning
warnings.showwarning = redirected_showwarning
try:
yield
finally:
# restore stdout
sys.stdout = save_stdout
if include_warnings:
warnings.showwarning = save_showwarning
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""From https://github.com/deepmind/deepmind-research/blob/master/adversarial_robustness/pytorch/model_zoo.py"""
from typing import Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
CIFAR10_MEAN = (0.4914, 0.4822, 0.4465)
CIFAR10_STD = (0.2471, 0.2435, 0.2616)
CIFAR100_MEAN = (0.5071, 0.4865, 0.4409)
CIFAR100_STD = (0.2673, 0.2564, 0.2762)
class _Swish(torch.autograd.Function):
"""Custom implementation of swish."""
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class Swish(nn.Module):
"""Module using custom implementation."""
def forward(self, input_tensor):
return _Swish.apply(input_tensor)
class _Block(nn.Module):
"""WideResNet Block."""
def __init__(self, in_planes, out_planes, stride, activation_fn=nn.ReLU):
super().__init__()
self.batchnorm_0 = nn.BatchNorm2d(in_planes)
self.relu_0 = activation_fn()
# We manually pad to obtain the same effect as `SAME` (necessary when
# `stride` is different than 1).
self.conv_0 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=0, bias=False)
self.batchnorm_1 = nn.BatchNorm2d(out_planes)
self.relu_1 = activation_fn()
self.conv_1 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.has_shortcut = in_planes != out_planes
if self.has_shortcut:
self.shortcut = nn.Conv2d(in_planes, out_planes, kernel_size=1,
stride=stride, padding=0, bias=False)
else:
self.shortcut = None
self._stride = stride
def forward(self, x):
if self.has_shortcut:
x = self.relu_0(self.batchnorm_0(x))
else:
out = self.relu_0(self.batchnorm_0(x))
v = x if self.has_shortcut else out
if self._stride == 1:
v = F.pad(v, (1, 1, 1, 1))
elif self._stride == 2:
v = F.pad(v, (0, 1, 0, 1))
else:
raise ValueError('Unsupported `stride`.')
out = self.conv_0(v)
out = self.relu_1(self.batchnorm_1(out))
out = self.conv_1(out)
out = torch.add(self.shortcut(x) if self.has_shortcut else x, out)
return out
class _BlockGroup(nn.Module):
"""WideResNet block group."""
def __init__(self, num_blocks, in_planes, out_planes, stride,
activation_fn=nn.ReLU):
super().__init__()
block = []
for i in range(num_blocks):
block.append(
_Block(i == 0 and in_planes or out_planes,
out_planes,
i == 0 and stride or 1,
activation_fn=activation_fn))
self.block = nn.Sequential(*block)
def forward(self, x):
return self.block(x)
class WideResNet(nn.Module):
"""WideResNet."""
def __init__(self,
num_classes: int = 10,
depth: int = 28,
width: int = 10,
activation_fn: nn.Module = nn.ReLU,
mean: Union[Tuple[float, ...], float] = CIFAR10_MEAN,
std: Union[Tuple[float, ...], float] = CIFAR10_STD,
padding: int = 0,
num_input_channels: int = 3):
super().__init__()
self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
self.std = torch.tensor(std).view(num_input_channels, 1, 1)
self.mean_cuda = None
self.std_cuda = None
self.padding = padding
num_channels = [16, 16 * width, 32 * width, 64 * width]
assert (depth - 4) % 6 == 0
num_blocks = (depth - 4) // 6
self.init_conv = nn.Conv2d(num_input_channels, num_channels[0],
kernel_size=3, stride=1, padding=1, bias=False)
self.layer = nn.Sequential(
_BlockGroup(num_blocks, num_channels[0], num_channels[1], 1,
activation_fn=activation_fn),
_BlockGroup(num_blocks, num_channels[1], num_channels[2], 2,
activation_fn=activation_fn),
_BlockGroup(num_blocks, num_channels[2], num_channels[3], 2,
activation_fn=activation_fn))
self.batchnorm = nn.BatchNorm2d(num_channels[3])
self.relu = activation_fn()
self.logits = nn.Linear(num_channels[3], num_classes)
self.num_channels = num_channels[3]
def forward(self, x, features_only=False, features_and_logits=False):
if self.padding > 0:
x = F.pad(x, (self.padding,) * 4)
if x.is_cuda:
if self.mean_cuda is None:
self.mean_cuda = self.mean.cuda()
self.std_cuda = self.std.cuda()
out = (x - self.mean_cuda) / self.std_cuda
else:
out = (x - self.mean) / self.std
out = self.init_conv(out)
out = self.layer(out)
out = self.relu(self.batchnorm(out))
out = F.avg_pool2d(out, 8)
features = out.view(-1, self.num_channels)
if features_only:
return features
logits = self.logits(features)
if features_and_logits:
return features, logits
return logits
class _PreActBlock(nn.Module):
"""Pre-activation ResNet Block."""
def __init__(self, in_planes, out_planes, stride, activation_fn=nn.ReLU):
super().__init__()
self._stride = stride
self.batchnorm_0 = nn.BatchNorm2d(in_planes)
self.relu_0 = activation_fn()
# We manually pad to obtain the same effect as `SAME` (necessary when
# `stride` is different than 1).
self.conv_2d_1 = nn.Conv2d(in_planes, out_planes, kernel_size=3,
stride=stride, padding=0, bias=False)
self.batchnorm_1 = nn.BatchNorm2d(out_planes)
self.relu_1 = activation_fn()
self.conv_2d_2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.has_shortcut = stride != 1 or in_planes != out_planes
if self.has_shortcut:
self.shortcut = nn.Conv2d(in_planes, out_planes, kernel_size=3,
stride=stride, padding=0, bias=False)
def _pad(self, x):
if self._stride == 1:
x = F.pad(x, (1, 1, 1, 1))
elif self._stride == 2:
x = F.pad(x, (0, 1, 0, 1))
else:
raise ValueError('Unsupported `stride`.')
return x
def forward(self, x):
out = self.relu_0(self.batchnorm_0(x))
shortcut = self.shortcut(self._pad(x)) if self.has_shortcut else x
out = self.conv_2d_1(self._pad(out))
out = self.conv_2d_2(self.relu_1(self.batchnorm_1(out)))
return out + shortcut
class PreActResNet(nn.Module):
"""Pre-activation ResNet."""
def __init__(self,
num_classes: int = 10,
depth: int = 18,
width: int = 0, # Used to make the constructor consistent.
activation_fn: nn.Module = nn.ReLU,
mean: Union[Tuple[float, ...], float] = CIFAR10_MEAN,
std: Union[Tuple[float, ...], float] = CIFAR10_STD,
padding: int = 0,
num_input_channels: int = 3):
super().__init__()
if width != 0:
raise ValueError('Unsupported `width`.')
self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
self.std = torch.tensor(std).view(num_input_channels, 1, 1)
self.mean_cuda = None
self.std_cuda = None
self.padding = padding
self.conv_2d = nn.Conv2d(num_input_channels, 64, kernel_size=3, stride=1,
padding=1, bias=False)
if depth == 18:
num_blocks = (2, 2, 2, 2)
elif depth == 34:
num_blocks = (3, 4, 6, 3)
else:
raise ValueError('Unsupported `depth`.')
self.layer_0 = self._make_layer(64, 64, num_blocks[0], 1, activation_fn)
self.layer_1 = self._make_layer(64, 128, num_blocks[1], 2, activation_fn)
self.layer_2 = self._make_layer(128, 256, num_blocks[2], 2, activation_fn)
self.layer_3 = self._make_layer(256, 512, num_blocks[3], 2, activation_fn)
self.batchnorm = nn.BatchNorm2d(512)
self.relu = activation_fn()
self.logits = nn.Linear(512, num_classes)
def _make_layer(self, in_planes, out_planes, num_blocks, stride,
activation_fn):
layers = []
for i, stride in enumerate([stride] + [1] * (num_blocks - 1)):
layers.append(
_PreActBlock(i == 0 and in_planes or out_planes,
out_planes,
stride,
activation_fn))
return nn.Sequential(*layers)
def forward(self, x, features_only=False, features_and_logits=False):
if self.padding > 0:
x = F.pad(x, (self.padding,) * 4)
if x.is_cuda:
if self.mean_cuda is None:
self.mean_cuda = self.mean.cuda()
self.std_cuda = self.std.cuda()
out = (x - self.mean_cuda) / self.std_cuda
else:
out = (x - self.mean) / self.std
out = self.conv_2d(out)
out = self.layer_0(out)
out = self.layer_1(out)
out = self.layer_2(out)
out = self.layer_3(out)
out = self.relu(self.batchnorm(out))
out = F.avg_pool2d(out, 4)
features = out.view(out.size(0), -1)
if features_only:
return features
logits = self.logits(features)
if features_and_logits:
return features, logits
return logits
def wideresnet_28_10(num_classes=10):
return WideResNet(num_classes, 28, 10, activation_fn=Swish, mean=CIFAR10_MEAN,
std=CIFAR10_STD)
def preactresnet_18(num_classes=10):
return PreActResNet(num_classes=num_classes, depth=18, activation_fn=Swish,
mean=CIFAR10_MEAN, std=CIFAR10_STD) |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import traceback
import sys
import warnings
from typing import Callable
from typing import List
from torch.utils.data import SequentialSampler
from typing_extensions import Literal
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC, SVC
import torch
import torch.utils.data
from torch.utils.data import DataLoader
import tqdm
import argparse_utils as aut
import networks
import utils as ut
__all__ = ["interior_boundary_discrimination_attack"]
LogitRescalingType = Optional[
Union[Literal["fixed"], Literal["adaptive"], Literal["tight"]]
]
SolutionGoodnessType = Union[Literal["perfect"], Literal["good"], None]
OptimizerType = Union[
Literal["sklearn"], Literal["sklearn-svm"], Literal["sgd"], Literal["adam"]
]
class __KwargsSequential(torch.nn.Sequential):
"""
Modification of a torch.nn.Sequential model that allows kwargs in the
forward pass. These will be passed to the first module of the network.
"""
def forward(self, x, **kwargs):
for idx, module in enumerate(self):
if idx == 0:
x = module(x, **kwargs)
else:
x = module(x)
return x
def _create_raw_data(
x: torch.Tensor,
y: torch.Tensor,
n_inner_points: int,
n_boundary_points: int,
n_boundary_adversarial_points: int,
n_far_off_boundary_points: int,
n_far_off_adversarial_points: int,
batch_size: int,
fill_batches_for_verification: bool,
verify_valid_inner_input_data_fn: Optional[Callable],
verify_valid_boundary_input_data_fn: Optional[Callable],
get_boundary_adversarials_fn: Optional[
Callable[[torch.Tensor, torch.Tensor, int, float], torch.Tensor]
],
device: str,
epsilon: float,
norm: ut.NormType,
n_boundary_classes: int = 1,
eta: float = 0.95,
xi: float = 1.50,
include_original: bool = True,
rejection_resampling_max_repetitions: int = 10,
sample_boundary_from_corners: bool = False,
) -> Tuple[DataLoader, DataLoader, DataLoader, int]:
"""Creates the raw training data in image space. Label 0 corresponds to
inner points and label 1 to boundary points."""
def _sample_inner_points(n_samples):
# We want to keep the original data point -> only generate n-1 new points
x_inner = torch.repeat_interleave(torch.unsqueeze(x, 0), n_samples, 0)
if norm == "linf":
# Random noise in [-1, 1].
delta_inner = 2 * torch.rand_like(x_inner) - 1.0
# Random noise in [-eps*eta, eps*eta]
delta_inner = delta_inner * eta * epsilon
elif norm == "l2":
# sample uniformly in ball with max radius eta*epsilon
delta_inner = torch.randn_like(x_inner)
delta_inner /= torch.norm(delta_inner, p=2, dim=[1, 2, 3], keepdim=True)
delta_inner *= torch.pow(
torch.rand(
(len(delta_inner), 1, 1, 1),
dtype=delta_inner.dtype,
device=delta_inner.device,
),
1 / np.prod(x.shape[1:]),
)
delta_inner *= epsilon * eta
else:
raise ValueError
if norm != "linf":
_, delta_inner = ut.clipping_aware_rescaling(
x_inner,
delta_inner,
target_distance=epsilon * eta,
norm=norm,
shrinking=True,
return_delta=True,
)
x_inner = torch.clamp(x_inner + delta_inner, 0, 1)
y_inner = torch.zeros(len(x_inner), dtype=torch.long, device=device)
return x_inner, y_inner
def _sample_boundary_points(n_samples, distance=epsilon):
x_boundary = torch.unsqueeze(x, 0).repeat(
tuple([n_samples] + [1] * len(x.shape))
)
if norm == "linf":
if sample_boundary_from_corners:
delta_boundary = torch.randint(
0,
2,
size=x_boundary.shape,
device=x_boundary.device,
dtype=x_boundary.dtype,
)
delta_boundary = (delta_boundary * 2.0 - 1.0) * distance
else:
delta_boundary = (torch.rand_like(x_boundary) * 2.0 - 1.0) * distance
elif norm == "l2":
# sample uniformly on sphere with radius epsilon
delta_boundary = torch.randn_like(x_boundary)
delta_boundary /= torch.norm(
delta_boundary, p=2, dim=[1, 2, 3], keepdim=True
)
delta_boundary *= distance
else:
raise ValueError
if not sample_boundary_from_corners:
_, delta_boundary = ut.clipping_aware_rescaling(
x_boundary,
delta_boundary,
target_distance=distance,
norm=norm,
growing=True,
shrinking=True,
return_delta=True,
)
x_boundary = torch.clamp(x_boundary + delta_boundary, 0, 1)
y_boundary = torch.ones(len(x_boundary), dtype=torch.long, device=device)
return x_boundary, y_boundary
def _create_boundary_data():
# TODO(zimmerrol): Extend this logic for multiple boundary classes
n_random_boundary_samples = n_boundary_points - n_boundary_adversarial_points
if n_random_boundary_samples == 0:
x_random, y_random = None, None
else:
if verify_valid_boundary_input_data_fn is None:
x_random, y_random = _sample_boundary_points(n_random_boundary_samples)
else:
x_random, y_random = _rejection_resampling(
_sample_boundary_points,
n_random_boundary_samples,
verify_valid_boundary_input_data_fn,
n_repetitions=rejection_resampling_max_repetitions,
)
if n_random_boundary_samples == n_boundary_points:
# do not have to add any special adversarial points anymore
x_total, y_total = x_random, y_random
else:
x_adv = get_boundary_adversarials_fn(
x.clone(), y, n_boundary_adversarial_points, epsilon
)
y_adv = torch.ones(len(x_adv), dtype=y.dtype, device=y.device)
if x_random is not None:
x_total = torch.cat((x_random, x_adv))
y_total = torch.cat((y_random, y_adv))
else:
x_total, y_total = x_adv, y_adv
if n_boundary_classes > 1:
raise NotImplementedError("n_boundary_classes > 1 is not yet implemented.")
if n_far_off_boundary_points > 0:
# add examples that have magnitude larger than epsilon but can be used
# e.g., by logit matching attacks as a reference point
n_random_far_off_samples = (
n_far_off_boundary_points - n_far_off_adversarial_points
)
if n_random_boundary_samples == 0:
x_faroff_random, y_faroff_random = None, None
else:
if verify_valid_boundary_input_data_fn is None:
x_faroff_random, y_faroff_random = _sample_boundary_points(
n_random_far_off_samples * n_boundary_classes,
distance=xi * epsilon,
)
else:
x_faroff_random, y_faroff_random = _rejection_resampling(
functools.partial(
_sample_boundary_points, distance=xi * epsilon
),
n_random_far_off_samples * n_boundary_classes,
verify_valid_boundary_input_data_fn,
n_repetitions=rejection_resampling_max_repetitions,
)
if n_boundary_classes > 1:
raise NotImplementedError(
"n_boundary_classes > 1 is not yet implemented."
)
if n_far_off_adversarial_points > 0:
x_faroff_adv = get_boundary_adversarials_fn(
x.clone(), y, n_far_off_adversarial_points, epsilon
)
y_faroff_adv = torch.ones(
len(x_faroff_adv), dtype=y.dtype, device=y.device
)
if x_faroff_random is not None:
x_faroff = torch.cat((x_faroff_random, x_faroff_adv))
y_faroff = torch.cat((y_faroff_random, y_faroff_adv))
else:
x_faroff, y_faroff = x_faroff_adv, y_faroff_adv
else:
x_faroff, y_faroff = x_faroff_random, y_faroff_random
x_total = torch.cat((x_total, x_faroff))
y_total = torch.cat((y_total, y_faroff))
return x_total, y_total
def _create_inner_data():
if include_original:
n_random_points = n_inner_points - 1
else:
n_random_points = n_inner_points
if n_random_points > 0:
if verify_valid_inner_input_data_fn is None:
x_random, y_random = _sample_inner_points(n_inner_points)
else:
x_random, y_random = _rejection_resampling(
_sample_inner_points,
n_inner_points,
verify_valid_inner_input_data_fn,
n_repetitions=rejection_resampling_max_repetitions,
)
if include_original:
x_total = torch.cat((torch.unsqueeze(x, 0), x_random))
y_total = torch.zeros(
len(y_random) + 1, dtype=y_random.dtype, device=y_random.device
)
else:
x_total, y_total = x_random, y_random
else:
x_total = torch.unsqueeze(x, 0)
y_total = torch.zeros(1, dtype=y_boundary.dtype, device=y_boundary.device)
return x_total, y_total
def _rejection_resampling(
sampling_fn, n_samples, verify_valid_input_data_fn, n_repetitions=10
):
"""Resample & replace until all samples returned by the sampling_fn are
valid according to verify_valid_input_data_fn."""
# do not waste time but running a non-full batch
if fill_batches_for_verification:
n_sampling_samples = max(n_samples, batch_size)
else:
n_sampling_samples = n_samples
x, y = sampling_fn(n_sampling_samples)
x_valid_mask = verify_valid_input_data_fn(x)
for i in range(n_repetitions + 1):
if np.sum(x_valid_mask) >= n_samples:
# found enough samples
# now restrict x to the valid samples
# and x and y such that their length matches n_samples
x = x[x_valid_mask]
x = x[:n_samples]
y = y[:n_samples]
return x, y
if i == n_repetitions:
raise RuntimeError(
f"Rejection resampling failed after {n_repetitions} " f"rounds."
)
# check how many samples to be replaced
n_x_invalid = len(x_valid_mask) - np.sum(x_valid_mask)
# generate new samples
c = sampling_fn(n_sampling_samples)[0]
# check how many of them are valid and are needed
c_valid_mask = verify_valid_input_data_fn(c)
c = c[c_valid_mask][:n_x_invalid]
c_valid_mask = c_valid_mask[c_valid_mask][:n_x_invalid]
n_x_invalid_c_valid = min(n_x_invalid, len(c))
# replace samples and update the mask
x[~x_valid_mask][:n_x_invalid_c_valid] = c
x_valid_mask[~x_valid_mask][:n_x_invalid_c_valid] = c_valid_mask
if not n_inner_points > 0:
raise ValueError("n_inner_points must be > 0.")
if not n_boundary_points > 0:
raise ValueError("n_boundary_points must be > 0.")
if not n_boundary_classes == 1:
raise NotImplementedError("More than 1 boundary class is not yet supported.")
if not n_far_off_adversarial_points >= 0:
raise ValueError("n_far_off_adversarial_points must not be negative.")
if not n_far_off_boundary_points >= 0:
raise ValueError("n_far_off_boundary_points must not be negative.")
if not n_boundary_adversarial_points >= 0:
raise ValueError("n_boundary_adversarial_points must not be negative.")
x = x.to(device)
y = y.to(device)
(x_boundary, y_boundary) = _create_boundary_data()
(x_inner, y_inner) = _create_inner_data()
x = torch.cat((x_inner, x_boundary))
y = torch.cat((y_inner, y_boundary))
dataset = torch.utils.data.TensorDataset(x, y)
dataset_boundary = torch.utils.data.TensorDataset(x_boundary, y_boundary)
dataset_inner = torch.utils.data.TensorDataset(x_inner, y_inner)
dataloader = torch.utils.data.DataLoader(
dataset, shuffle=False, batch_size=batch_size
)
dataloader_boundary = torch.utils.data.DataLoader(
dataset_boundary, shuffle=False, batch_size=batch_size
)
dataloader_inner = torch.utils.data.DataLoader(
dataset_inner, shuffle=False, batch_size=batch_size
)
return dataloader, dataloader_boundary, dataloader_inner, len(x)
def _get_data_features_and_maybe_logits(
classifier: Callable,
raw_data_loader: torch.utils.data.DataLoader,
get_logits: bool,
device: str,
include_raw_data: bool = False,
raw_data_loader_boundary: Optional[torch.utils.data.DataLoader] = None,
raw_data_loader_inner: Optional[torch.utils.data.DataLoader] = None,
n_repetitions_boundary: Optional[int] = None,
n_repetitions_inner: Optional[int] = None,
) -> Tuple[torch.utils.data.DataLoader, torch.Tensor, int, int]:
"""
Collects the intermediate features for a classifier and creates a new data
loader consisting only of these features.
Args:
classifier: Classifier to use as a feature extractor.
raw_data_loader: Data loader that contains images which
shall be mapped to intermediate features.
get_logits: Extract not only features but also logits
device: torch device.
include_raw_data: Include raw images in the data loader.
Returns:
Data loader mapping intermediate features to class labels.
"""
all_features = []
all_logits = [] if get_logits else None
all_labels = []
all_images = []
def _process_dataloader(dataloader: DataLoader):
with torch.no_grad():
for x, y in dataloader:
x_ = x.to(device)
if get_logits:
features, logits = classifier(x_, features_and_logits=True)
all_logits.append(logits)
else:
features = classifier(x_, features_only=True)
all_features.append(features.detach())
all_labels.append(y)
if include_raw_data:
all_images.append(x)
_process_dataloader(raw_data_loader)
if n_repetitions_boundary is not None:
raw_data_loader_boundary = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(
torch.repeat_interleave(
raw_data_loader_boundary.dataset.tensors[0],
n_repetitions_boundary,
0,
),
torch.repeat_interleave(
raw_data_loader_boundary.dataset.tensors[1],
n_repetitions_boundary,
0,
),
),
batch_size=raw_data_loader_boundary.batch_size,
)
_process_dataloader(raw_data_loader_boundary)
if n_repetitions_inner is not None:
raw_data_loader_inner = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(
torch.repeat_interleave(
raw_data_loader_inner.dataset.tensors[0], n_repetitions_inner, 0
),
torch.repeat_interleave(
raw_data_loader_inner.dataset.tensors[1], n_repetitions_inner, 0
),
),
batch_size=raw_data_loader_inner.batch_size,
)
_process_dataloader(raw_data_loader_inner)
all_features = torch.cat(all_features, 0)
if get_logits:
all_logits = torch.cat(all_logits, 0)
all_labels = torch.cat(all_labels, 0)
if include_raw_data:
all_images = torch.cat(all_images)
if len(all_features.shape) > 2:
warnings.warn(
f"Features are not vectors but higher dimensional "
f"({len(all_features.shape) - 1})"
)
if include_raw_data:
dataset = torch.utils.data.TensorDataset(all_features, all_labels, all_images)
else:
dataset = torch.utils.data.TensorDataset(all_features, all_labels)
dataloader = torch.utils.data.DataLoader(
dataset,
shuffle=not isinstance(raw_data_loader.sampler, SequentialSampler),
batch_size=raw_data_loader.batch_size,
)
return dataloader, all_logits, all_features.shape[-1], all_features.shape[0]
def _train_logistic_regression_classifier(
n_features: int,
train_loader: DataLoader,
classifier_logits: Optional[torch.Tensor],
optimizer: OptimizerType,
lr: float,
device: str,
n_classes: int = 2,
rescale_logits: LogitRescalingType = "fixed",
decision_boundary_closeness: Optional[float] = None,
solution_goodness: SolutionGoodnessType = "perfect",
class_weight: Optional[Union[Literal["balanced"], dict]] = None,
) -> torch.nn.Module:
"""
Trains a logistic regression model.
Args:
n_features: Feature dimensionality.
train_loader: Data loader containing the data to fit model on.
classifier_logits: Logits of the underlying classifier; will be used for logit
rescaling.
optimizer: Type of optimizer to use.
lr: Learning rate (only applies to explicit gradient-descent optimizer).
device: torch device.
rescale_logits: Rescale weights of model such that the logits have
at most unit absolute magnitude.
decision_boundary_closeness: (optional) The larger this value, the will
the decision boundary be placed to the boundary sample(s).
Returns:
Logistic regression model.
"""
if rescale_logits == "adaptive" and classifier_logits is None:
raise ValueError("classifier_logits must be supplied for adaptive rescaling")
def get_accuracy() -> Tuple[float, float, float]:
"""
:return: (total accuracy, accuracy for inner samples, for outer samples)
"""
# calculate accuracy
n_correct = {0: 0, 1: 0}
n_total = {0: 0, 1: 0}
with torch.no_grad():
for x, y in train_loader:
x = x.to(device)
logits = binary_classifier(x)
for k in n_total.keys():
n_correct[k] += (
(logits.argmax(-1).cpu() == y.cpu())
.float()[y == k]
.sum()
.item()
)
n_total[k] += len(x[y == k])
accuracy = (n_correct[0] + n_correct[1]) / (n_total[0] + n_total[1])
accuracy_inner = n_correct[0] / n_total[0]
accuracy_outer = n_correct[1] / n_total[1]
return accuracy, accuracy_inner, accuracy_outer
if not n_classes == 2:
raise NotImplementedError("Currently only supports 1 boundary class")
if optimizer.startswith("sklearn"):
if optimizer == "sklearn":
# Use logistic regression of sklearn to speed up fitting.
regression = LogisticRegression(
penalty="none",
max_iter=max(1000, int(lr)),
multi_class="multinomial",
class_weight=class_weight,
)
elif optimizer == "sklearn-svm":
# Since the problem should be perfectly possible to solve, C should not
# have any effect.
regression = LinearSVC(
penalty="l2", C=10e5, max_iter=max(1000, int(lr)), multi_class="ovr"
)
else:
raise ValueError("Invalid optimizer choice.")
regression.fit(
train_loader.dataset.tensors[0].cpu().numpy(),
train_loader.dataset.tensors[1].cpu().numpy(),
)
binary_classifier = torch.nn.Linear(n_features, n_classes)
binary_classifier.weight.data = torch.Tensor(
np.concatenate((-regression.coef_, regression.coef_), 0)
)
binary_classifier.bias.data = torch.Tensor(
np.concatenate((-regression.intercept_, regression.intercept_), 0)
)
binary_classifier = binary_classifier.to(device)
accuracy, accuracy_inner, accuracy_outer = get_accuracy()
if solution_goodness is not None and accuracy < 1.0:
raise_error = solution_goodness == "perfect"
raise_error |= (
solution_goodness == "good"
and accuracy_inner == 0
or accuracy_outer == 0
)
message = (
f"sklearn solver failed to find perfect solution, "
f"Accuracy = {accuracy:.4f} instead of 1.0; "
f"{accuracy_inner:.4f} and {accuracy_outer:.4f} for "
f"inner and boundary points."
)
if raise_error:
raise RuntimeError(message)
else:
warnings.warn(message)
else:
binary_classifier = torch.nn.Linear(n_features, n_classes).to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = {"sgd": torch.optim.SGD, "adam": torch.optim.Adam}[optimizer](
lr=lr, params=binary_classifier.parameters()
)
epoch = 0
while True:
epoch += 1
for x, y in train_loader:
optimizer.zero_grad()
x = x.to(device)
y = y.to(device)
logits = binary_classifier(x)
loss = criterion(logits, y)
loss.backward()
optimizer.step()
if epoch > 50000:
raise RuntimeError(
f"Could not fit binary discriminator in 50k iterations "
f"(Loss = {loss.item()}."
"Consider using different settings for the optimizer."
)
accuracy = get_accuracy()
# stop training once perfect accuracy is achieved
if accuracy == 1.0:
break
if rescale_logits is not None or decision_boundary_closeness is not None:
# Get value range of binarized logits.
with torch.no_grad():
logits = binary_classifier(
train_loader.dataset.tensors[0].to(device)
).detach()
if decision_boundary_closeness is not None:
logit_differences = logits[:, 0] - logits[:, 1]
lowest_difference = np.min(logit_differences.cpu().numpy())
binary_classifier.bias.data[0] -= (
decision_boundary_closeness * lowest_difference / 2
)
binary_classifier.bias.data[1] += (
decision_boundary_closeness * lowest_difference / 2
)
if rescale_logits is not None:
binarized_logit_range = (
logits.detach().cpu().numpy().max()
- logits.detach().cpu().numpy().min()
)
if rescale_logits == "fixed":
target_logit_range = 1.0
logit_offset = 0.0
logit_rescaling_factor = binarized_logit_range / target_logit_range
elif rescale_logits == "adaptive":
# Rescale the binarized logits such that they have the same value range
# i.e., min and max value match.
target_logit_range = (
classifier_logits.detach().cpu().numpy().max()
- classifier_logits.detach().cpu().numpy().min()
)
logit_rescaling_factor = binarized_logit_range / target_logit_range
logit_offset = (
classifier_logits.detach().cpu().numpy().min()
- logits.detach().cpu().numpy().min() / logit_rescaling_factor
)
elif rescale_logits == "tight":
# Rescale/shift weights such that the distance between the decision
# boundary and the boundary data is small.
# Calculate distance of boundary points to decision boundary.
distances = binary_classifier(
train_loader.dataset.tensors[0].to(device)[
train_loader.dataset.tensors[1].to(device) != 0
]
)[:, 1:].cpu()
min_distance = distances.min()
# Move decision boundary close to true boundary points
logit_rescaling_factor = 1.0
logit_offset = torch.tensor(
[+0.999 * min_distance.item(), -0.999 * min_distance.item()],
device=device,
)
else:
raise ValueError(f"Invalid value for rescale_logits: {rescale_logits}")
binary_classifier.bias.data /= logit_rescaling_factor
binary_classifier.bias.data += logit_offset
binary_classifier.weight.data /= logit_rescaling_factor
return binary_classifier
def _get_interior_boundary_discriminator_and_dataloaders(
classifier: torch.nn.Module,
x: torch.Tensor,
y: torch.Tensor,
linearization_settings: aut.DecisionBoundaryBinarizationSettings,
device: str,
batch_size: int = 512,
rescale_logits: LogitRescalingType = "fixed",
n_samples_evaluation: int = 0,
n_samples_asr_evaluation: int = 0,
verify_valid_inner_training_data_fn: Optional[
Callable[[torch.Tensor], np.ndarray]
] = None,
verify_valid_boundary_training_data_fn: Optional[
Callable[[torch.Tensor], np.ndarray]
] = None,
verify_valid_inner_validation_data_fn: Optional[
Callable[[torch.Tensor], np.ndarray]
] = None,
verify_valid_boundary_validation_data_fn: Optional[
Callable[[torch.Tensor], np.ndarray]
] = None,
get_boundary_adversarials_fn: Optional[
Callable[[torch.Tensor, torch.Tensor, float], np.ndarray]
] = None,
fill_batches_for_verification: bool = False,
far_off_distance: float = 1.25,
rejection_resampling_max_repetitions: int = 10,
train_classifier_fn: Callable[
[int, DataLoader, DataLoader, torch.Tensor, str, LogitRescalingType],
torch.nn.Module,
] = None,
decision_boundary_closeness: Optional[float] = None,
n_inference_repetitions_boundary: Optional[int] = None,
n_inference_repetitions_inner: Optional[int] = None,
relative_inner_boundary_gap: Optional[float] = 0.05,
sample_training_data_from_corners: bool = False,
) -> Tuple[
Tuple[torch.nn.Module, torch.nn.Module],
Tuple[DataLoader, DataLoader],
Tuple[float, float, float, float, bool, bool],
]:
"""
Creates a number of perturbed images, obtains the features for these images
and trains a linear, binary discriminator of these samples.
Args:
classifier: The classifier that will be used as a feature encoder.
x: The single clean image to apply apply the method on.
y: The ground-truth classification label of x.
linearization_settings: How to construct the binary classifier.
device: torch device
batch_size: Max batch size allowed to use
rescale_logits: Rescale weights of linear classifier such that logits
have a max scale of 1
n_samples_evaluation: Number of random samples to use for evaluation
verify_valid_inner_training_data_fn: Can be used for e.g. detector-based defenses.
Check whether all input points used for training/testing are actually valid
and won't get filtered out be the model/detector.
verify_valid_boundary_training_data_fn: See verify_valid_inner_training_data_fn but for boundary samples.
verify_valid_inner_validation_data_fn: See
verify_valid_inner_training_data_fn but for calculating the validation
scores, i.e. the random ASR.
verify_valid_boundary_validation_data_fn: See
verify_valid_boundary_training_data_fn but for calculating the validation
scores, i.e. the random ASR.
get_boundary_adversarials_fn: If given, use this function to
generate all but one of the boundary samples. This can be used for
evaluating detector-based evaluation functions.
fill_batches_for_verification: If computational cost of verification_fn
does not depend on the batch size, set this to True.
far_off_distance: Relative multiplier (in terms of epsilon) controlling
the distance between clean and far off training samples.
decision_boundary_closeness: (optional) The larger this value, the will
the decision boundary be placed to the boundary sample(s).
n_inference_repetitions_boundary: (optional) How often to repeat
inference for boundary samples for obtaining their features.
n_inference_repetitions_inner: (optional) How often to repeat
inference for inner samples for obtaining their features.
relative_inner_boundary_gap: (optional) Gap between interior and
boundary data relative to epsilon (i.e. a value of 0 means boundary points
can lie directly next to inner points)
sample_training_data_from_corners: Sample training data from the corners
of the epsilon ball; this setting is only possible when using linf norm.
Returns:
Tuple containing ((binary discriminator between interior and boundary points,
binary readout only),
(dataset of perturbed images, dataset of features of perturbed images),
(validation accuracies of inner/boundary/boundary surface/boundary corner points,
random attack success rate of surface/corner points))
"""
if sample_training_data_from_corners and linearization_settings.norm != "linf":
raise ValueError("Corners are only defined for linf norm.")
# Check if model is compatible with this check.
try:
with torch.no_grad():
if rescale_logits == "adaptive":
classifier(
torch.ones((1, 1, 1, 1), device=device), features_and_logits=True
)
else:
classifier(torch.ones((1, 1, 1, 1), device=device), features_only=True)
except TypeError as e:
message = str(e)
if "unexpected keyword argument 'features_only'" in message:
raise ValueError(
"model does not support `features_only` flag in forward pass."
)
if "unexpected keyword argument 'features_and_logits'" in message:
raise ValueError(
"model does not support `features_and_logits` flag in forward pass."
)
except Exception:
pass
(
raw_train_loader,
raw_train_loader_boundary,
raw_train_loader_inner,
n_raw_training_samples,
) = _create_raw_data(
x,
y,
linearization_settings.n_inner_points,
linearization_settings.n_boundary_points,
linearization_settings.n_boundary_adversarial_points,
linearization_settings.n_far_off_boundary_points,
linearization_settings.n_far_off_adversarial_points,
batch_size=batch_size,
fill_batches_for_verification=fill_batches_for_verification,
verify_valid_inner_input_data_fn=verify_valid_inner_training_data_fn,
verify_valid_boundary_input_data_fn=verify_valid_boundary_training_data_fn,
get_boundary_adversarials_fn=get_boundary_adversarials_fn,
device=device,
epsilon=linearization_settings.epsilon,
norm=linearization_settings.norm,
n_boundary_classes=1,
include_original=True,
xi=far_off_distance,
eta=1.0 - relative_inner_boundary_gap,
rejection_resampling_max_repetitions=rejection_resampling_max_repetitions,
sample_boundary_from_corners=sample_training_data_from_corners,
)
# Get features to train binary classifier on.
(
train_loader,
logits,
n_features,
n_training_samples,
) = _get_data_features_and_maybe_logits(
classifier,
raw_train_loader,
rescale_logits == "adaptive",
device,
include_raw_data=False,
n_repetitions_boundary=n_inference_repetitions_boundary,
n_repetitions_inner=n_inference_repetitions_inner,
raw_data_loader_boundary=raw_train_loader_boundary,
raw_data_loader_inner=raw_train_loader_inner,
)
if not (n_features > n_training_samples):
warnings.warn(
f"Feature dimension ({n_features}) should not be smaller than the "
f"number of training samples ({n_training_samples})",
RuntimeWarning,
)
# finally train new binary classifier on features
if train_classifier_fn is None:
binary_classifier = _train_logistic_regression_classifier(
n_features,
train_loader,
logits,
linearization_settings.optimizer,
linearization_settings.lr,
device,
class_weight=linearization_settings.class_weight,
rescale_logits=rescale_logits,
decision_boundary_closeness=decision_boundary_closeness,
)
linearized_model = __KwargsSequential(
networks.Lambda(
lambda x, **kwargs: classifier(x, features_only=True, **kwargs)
),
binary_classifier,
)
else:
binary_classifier = None
linearized_model = train_classifier_fn(
n_features,
train_loader,
raw_train_loader,
logits,
device,
rescale_logits=rescale_logits,
)
# evaluate on another set of random samples (we are only interested in the
# performance of points inside the epsilon ball)
if n_samples_evaluation > 0:
raw_validation_loader, _, _, _ = _create_raw_data(
x,
y,
n_samples_evaluation,
n_samples_evaluation,
0,
0,
0,
batch_size=batch_size,
fill_batches_for_verification=fill_batches_for_verification,
# TODO(zimmerrol): check if this makes sense. The motivation to remove this here
# was that the moved the check down to when the accuracy is calculated
verify_valid_boundary_input_data_fn=None,
verify_valid_inner_input_data_fn=None,
# verify_valid_input_data_fn=verify_valid_input_validation_data_fn,
get_boundary_adversarials_fn=get_boundary_adversarials_fn,
device=device,
epsilon=linearization_settings.epsilon,
norm=linearization_settings.norm,
n_boundary_classes=1,
include_original=False,
xi=far_off_distance,
rejection_resampling_max_repetitions=rejection_resampling_max_repetitions,
eta=1.0,
sample_boundary_from_corners=False,
)
_, raw_validation_loader_corners, _, _ = _create_raw_data(
x,
y,
1,
n_samples_evaluation,
0,
0,
0,
batch_size=batch_size,
fill_batches_for_verification=fill_batches_for_verification,
verify_valid_boundary_input_data_fn=None,
verify_valid_inner_input_data_fn=None,
get_boundary_adversarials_fn=get_boundary_adversarials_fn,
device=device,
epsilon=linearization_settings.epsilon,
norm=linearization_settings.norm,
n_boundary_classes=1,
include_original=False,
xi=far_off_distance,
rejection_resampling_max_repetitions=rejection_resampling_max_repetitions,
eta=1.0,
sample_boundary_from_corners=True,
)
raw_validation_loader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(
torch.cat(
(
raw_validation_loader.dataset.tensors[0],
raw_validation_loader_corners.dataset.tensors[0],
),
0,
),
torch.cat(
(
raw_validation_loader.dataset.tensors[1],
raw_validation_loader_corners.dataset.tensors[1],
),
0,
),
),
batch_size=raw_validation_loader.batch_size,
shuffle=False,
)
# Get features to test binary classifier on.
validation_loader, _, _, _ = _get_data_features_and_maybe_logits(
classifier, raw_validation_loader, False, device, include_raw_data=True
)
inner_correctly_classified = []
boundary_correctly_classified = []
for it, (x_features, y, x_images) in enumerate(validation_loader):
x_features = x_features.to(device)
x_images = x_images.to(device)
# If we use a custom train method use the raw images for validation as
# it is possible that the classifier has no simple linear readout.
# TODO(zimmerrol): also allow detector-like models here
# if the verify_valid_input_data_fn is used, this shouldn't be a
# concern anymore since all samples generated here have already passed
# the detector
with torch.no_grad():
if binary_classifier is not None:
y_pred = binary_classifier(x_features).argmax(-1).to("cpu")
else:
y_pred = linearized_model(x_images).argmax(-1).to("cpu")
# flag predictions for invalid data points such they are not
# counted as correctly classified samples
if verify_valid_inner_validation_data_fn is not None:
is_valid_input = verify_valid_inner_validation_data_fn(
x_images[y == 0]
)
y_pred[y == 0][~is_valid_input] = -1
if verify_valid_boundary_validation_data_fn is not None:
is_valid_input = verify_valid_boundary_validation_data_fn(
x_images[y == 1]
)
y_pred[y == 1][~is_valid_input] = -1
inner_correctly_classified += (y_pred[y == 0] == 0).numpy().tolist()
boundary_correctly_classified += (y_pred[y == 1] == 1).numpy().tolist()
inner_correctly_classified = np.array(inner_correctly_classified)
boundary_correctly_classified = np.array(boundary_correctly_classified)
validation_accuracy_inner = float(np.mean(inner_correctly_classified))
validation_accuracy_boundary = float(np.mean(boundary_correctly_classified))
validation_accuracy_boundary_surface = float(
np.mean(boundary_correctly_classified[:n_samples_evaluation])
)
validation_accuracy_boundary_corners = float(
np.mean(boundary_correctly_classified[n_samples_evaluation:])
)
random_attack_success_inner = (
np.mean(inner_correctly_classified[:n_samples_asr_evaluation]) < 1.0
)
random_attack_success_boundary_surface = (
np.mean(
boundary_correctly_classified[:n_samples_evaluation][
:n_samples_asr_evaluation
]
)
> 0.0
)
random_attack_success_boundary_corners = (
np.mean(
boundary_correctly_classified[n_samples_evaluation:][
:n_samples_asr_evaluation
]
)
> 0.0
)
random_attack_success_boundary_corners = np.logical_or(
random_attack_success_inner, random_attack_success_boundary_corners
)
random_attack_success_boundary_surface = np.logical_or(
random_attack_success_inner, random_attack_success_boundary_surface
)
validation_accuracies_and_asr = (
validation_accuracy_inner,
validation_accuracy_boundary,
validation_accuracy_boundary_surface,
validation_accuracy_boundary_corners,
random_attack_success_boundary_surface,
random_attack_success_boundary_corners,
)
else:
validation_accuracies_and_asr = None
return (
(linearized_model, binary_classifier),
(raw_train_loader, train_loader),
validation_accuracies_and_asr,
)
def __wrap_assert_get_boundary_adversarials_fn(
fn: Callable[[torch.Tensor, torch.Tensor, int, float], np.ndarray],
norm: ut.NormType,
) -> Callable[[torch.Tensor, torch.Tensor, int, float], np.ndarray]:
"""Make sure adversarial examples really lie on the epsilon ball boundary
(or are within a relative distance of 1%)."""
def inner(x: torch.Tensor, y: torch.Tensor, n: int, epsilon: float):
x_ = fn(x, y, n, epsilon)
delta = (x_ - x).cpu()
if norm == "linf":
distance = torch.abs(delta).flatten(1).max(1)[0].numpy()
elif norm in ("l2", "l1"):
distance = torch.norm(
delta, p=1 if norm == "l1" else 2, keepdim=False, dim=[1, 2, 3]
).numpy()
else:
raise ValueError(f"Unknown norm: {norm}")
# TODO(zimmerrol): Verify whether 1% tolerance is sensible.
assert np.isclose(distance, epsilon, atol=0.01 * epsilon), (
f"Magnitude of boundary adversarial examples ({distance}) "
f"does not match target distance ({epsilon}"
)
return x_
return inner
def interior_boundary_discrimination_attack(
classifier: torch.nn.Module,
test_loader: torch.utils.data.DataLoader,
attack_fn: Callable[
[torch.nn.Module, torch.utils.data.DataLoader, dict],
Tuple[np.ndarray, Tuple[torch.Tensor, torch.Tensor]],
],
linearization_settings: aut.DecisionBoundaryBinarizationSettings,
n_samples: int,
device: str,
batch_size: int = 512,
rescale_logits: LogitRescalingType = "fixed",
n_samples_evaluation: int = 0,
n_samples_asr_evaluation: int = 0,
verify_valid_inner_training_data_fn: Optional[
Callable[[torch.Tensor], np.ndarray]
] = None,
verify_valid_boundary_training_data_fn: Optional[
Callable[[torch.Tensor], np.ndarray]
] = None,
verify_valid_inner_validation_data_fn: Optional[
Callable[[torch.Tensor], np.ndarray]
] = None,
verify_valid_boundary_validation_data_fn: Optional[
Callable[[torch.Tensor], np.ndarray]
] = None,
get_boundary_adversarials_fn: Optional[
Callable[[torch.Tensor, torch.Tensor, int, float], np.ndarray]
] = None,
fill_batches_for_verification: bool = True,
far_off_distance: float = 1.50,
rejection_resampling_max_repetitions: int = 10,
train_classifier_fn: Callable[
[int, DataLoader, torch.Tensor, str, LogitRescalingType], torch.nn.Module
] = None,
fail_on_exception: bool = False,
decision_boundary_closeness: Optional[float] = None,
n_inference_repetitions_boundary: Optional[int] = None,
n_inference_repetitions_inner: Optional[int] = None,
relative_inner_boundary_gap: Optional[float] = 0.05,
sample_training_data_from_corners: bool = False,
) -> List[Tuple[bool, float, float, Tuple[float, float, float, float, bool, bool]]]:
"""
Performs the binarization test. This means, replacing the last linear layer
of the classifier with a binary classifier distinguishing between images
of different perturbation magnitude.
Args:
classifier: The classifier that will be used as a feature encoder.
test_loader: Data loader of the data to run the test on.
attack_fn: Function performing an adversarial attack on a classifier and
dataset passed as arguments.
linearization_settings: How to construct the binarized classifier.
n_samples: Number of samples to perform this test on.
device: torch device
batch_size: Max batch size allowed to use
rescale_logits: Rescale weights of linear classifier such that logits
have a max scale of 1
n_samples_evaluation: Number of random samples to use for evaluation
n_samples_asr_evaluation: Number of random samples used to calculate
the ASR of a random attacker
verify_valid_inner_training_data_fn: Can be used for e.g.
detector-based defenses.
Check whether all input points used for training/testing are actually valid
and won't get filtered out be the model/detector.
verify_valid_boundary_training_data_fn: See
verify_valid_inner_training_data_fn but for boundary samples.
verify_valid_inner_validation_data_fn: See
verify_valid_inner_training_data_fn but for calculating the validation
scores, i.e. the random ASR.
verify_valid_boundary_validation_data_fn: See
verify_valid_boundary_training_data_fn but for calculating the validation
scores, i.e. the random ASR.
get_boundary_adversarials_fn: If given, use this function to
generate all but one of the boundary samples. This can be used for
evaluating detector-based evaluation functions.
fill_batches_for_verification: If computational cost of verification_fn
does not depend on the batch size, set this to True.
far_off_distance: Relative multiplier (in terms of epsilon) controlling
the distance between clean and far off training samples.
rejection_resampling_max_repetitions: How often to resample to satisfy
constraints on training samples.
train_classifier_fn: Callback that trains a readout classifier on a set of
features.
fail_on_exception: Raise exception if a single samples fails or keep
running and report about this later.
decision_boundary_closeness: (optional) The larger this value, the closer
the decision boundary will be placed to the boundary sample(s).
n_inference_repetitions_boundary: (optional) How often to repeat
inference for boundary samples for obtaining their features.
n_inference_repetitions_inner: (optional) How often to repeat
inference for inner samples for obtaining their features.
relative_inner_boundary_gap: (optional) Gap between interior and
boundary data (in pixel space) relative to epsilon (i.e. a value of 0 means
boundary points can lie directly next to inner points)
sample_training_data_from_corners: Sample training data from the corners
of the epsilon ball; this setting is only possible when using
Returns:
List containing tuples of (attack successful, logit diff of results of attack_fn,
logit diff of best training sample, (validation accuracy inner,
validation accuracy boundary, random ASR)))
"""
if get_boundary_adversarials_fn is not None and (
linearization_settings.n_boundary_adversarial_points == 0
and linearization_settings.n_far_off_adversarial_points == 0
):
warnings.warn(
"get_boundary_adversarials_fn is set but number of boundary "
"and far-off adversarial examples is set to 0",
UserWarning,
)
results = []
data_iterator = iter(test_loader)
current_batch_x = None
current_batch_y = None
current_batch_index = 0
if get_boundary_adversarials_fn is not None:
# Make sure this function really returns boundary adversarials.
get_boundary_adversarials_fn = __wrap_assert_get_boundary_adversarials_fn(
get_boundary_adversarials_fn, linearization_settings.norm
)
# Show warnings only once
warnings_shown_for_messages = []
for i in tqdm.tqdm(range(n_samples)):
if current_batch_x is None or current_batch_index == len(current_batch_x) - 1:
try:
# Only use input and label.
current_batch_x, current_batch_y = next(data_iterator)
except StopIteration:
warnings.warn(
f"Could only gather {i} and not the "
f"{n_samples} requested samples."
)
break
current_batch_index = 0
else:
current_batch_index += 1
# Get current item/input data.
x = current_batch_x[current_batch_index]
y = current_batch_y[current_batch_index]
setup_successful = False
with warnings.catch_warnings(record=True) as ws:
try:
(
(binary_discriminator, binary_linear_layer),
(image_loader, feature_loader),
validation_accuracies,
) = _get_interior_boundary_discriminator_and_dataloaders(
classifier,
x,
y,
linearization_settings,
device,
rescale_logits=rescale_logits,
n_samples_evaluation=n_samples_evaluation,
n_samples_asr_evaluation=n_samples_asr_evaluation,
batch_size=batch_size,
verify_valid_inner_training_data_fn=verify_valid_inner_training_data_fn,
verify_valid_boundary_training_data_fn=verify_valid_boundary_training_data_fn,
verify_valid_inner_validation_data_fn=verify_valid_inner_validation_data_fn,
verify_valid_boundary_validation_data_fn=verify_valid_boundary_validation_data_fn,
get_boundary_adversarials_fn=get_boundary_adversarials_fn,
fill_batches_for_verification=fill_batches_for_verification,
far_off_distance=far_off_distance,
rejection_resampling_max_repetitions=rejection_resampling_max_repetitions,
train_classifier_fn=train_classifier_fn,
decision_boundary_closeness=decision_boundary_closeness,
n_inference_repetitions_boundary=n_inference_repetitions_boundary,
n_inference_repetitions_inner=n_inference_repetitions_inner,
relative_inner_boundary_gap=relative_inner_boundary_gap,
sample_training_data_from_corners=sample_training_data_from_corners,
)
setup_successful = True
except RuntimeError as ex:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname, lineno, fnname, code = traceback.extract_tb(exc_tb)[-1]
if fail_on_exception:
raise ex
else:
warnings.warn(f"Exception caught: {fname}:{lineno}({fnname}): {ex}")
for w in ws:
if str(w.message) not in warnings_shown_for_messages:
warnings_shown_for_messages.append(str(w.message))
warnings.warn(str(w.message), w.category)
if not setup_successful:
continue
attack_loader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(
torch.unsqueeze(x, 0), torch.zeros(1, dtype=torch.long)
),
shuffle=False,
batch_size=1,
)
if linearization_settings.n_far_off_boundary_points == 0:
attack_kwargs = {}
else:
attack_kwargs = dict(
reference_points_x=image_loader.dataset.tensors[0][
-linearization_settings.n_far_off_boundary_points * 1 :
],
reference_points_y=image_loader.dataset.tensors[1][
-linearization_settings.n_far_off_boundary_points * 1 :
],
)
with warnings.catch_warnings(record=True) as ws:
attack_successful, (x_adv, logits_adv) = attack_fn(
binary_discriminator, attack_loader, attack_kwargs
)
for w in ws:
if str(w.message) not in warnings_shown_for_messages:
warnings_shown_for_messages.append(str(w.message))
warnings.warn(f"{w.filename}:{w.lineno}:{w.message}", w.category)
logit_diff_adv = (logits_adv[:, 1] - logits_adv[:, 0]).item()
# Now compare the result of the attack (x_adv) with the training samples
# in terms of their confidence.
# For this, first get logits of binary discriminator for data it was
# trained on, but only do this for samples of the adversarial class (y = 1).
logits_training = []
if train_classifier_fn is None:
for x, y in feature_loader:
with torch.no_grad():
x = x[y == 1]
if len(x) == 0:
continue
logits_training.append(binary_linear_layer(x.to(device)).cpu())
else:
for x, y in image_loader:
with torch.no_grad():
x = x[y == 1]
if len(x) == 0:
continue
logits_training.append(binary_discriminator(x.to(device)).cpu())
logits_training = torch.cat(logits_training, 0)
# Now get training sample with max confidence (alternatively we could also
# just compute the distance to the linear boundary for all sample and pick
# the one with max distance).
logit_diff_training = torch.max(
logits_training[:, 1] - logits_training[:, 0]
).item()
result = (
attack_successful,
logit_diff_adv,
logit_diff_training,
validation_accuracies,
)
results.append(result)
return results
def format_result(
scores_logit_differences_and_validation_accuracies,
n_samples,
indent=0,
title="interior-vs-boundary discrimination",
):
"""Formats the result of the interior-vs-boundary discriminator test"""
if len(scores_logit_differences_and_validation_accuracies) == 0:
test_result = (np.nan, np.nan, np.nan, np.nan)
else:
scores = [it[0] for it in scores_logit_differences_and_validation_accuracies]
validation_scores = [
it[3] for it in scores_logit_differences_and_validation_accuracies
]
if validation_scores[0] is None:
validation_scores = (np.nan, np.nan, np.nan)
else:
validation_scores = np.array(validation_scores)
validation_scores = tuple(np.mean(validation_scores, 0))
logit_differences = [
(it[1], it[2]) for it in scores_logit_differences_and_validation_accuracies
]
logit_differences = np.array(logit_differences)
relative_performance = (logit_differences[:, 0] - logit_differences[:, 1]) / (
logit_differences[:, 1] + 1e-12
)
test_result = (
np.mean(scores),
np.mean(relative_performance),
np.std(relative_performance),
validation_scores,
)
indent = "\t" * indent
return (
"{0}{1}, ASR: {2}\n”, "
"{0}\tNormalized Logit-Difference-Improvement: {3} +- {4}\n"
"{0}\tValidation Accuracy (I, B, BS, BC, R. ASR S, R. ASR C): {5}\n"
"{0}\tSetup failed for {6}/{7} samples".format(
indent,
title,
*test_result,
n_samples - len(scores_logit_differences_and_validation_accuracies),
n_samples,
)
)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2021 Michael R Lomnitz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
# Local
import torchvision.transforms
from jpeg.compression import compress_jpeg
from jpeg.decompression import decompress_jpeg
from jpeg.utils import diff_round, quality_to_factor
class DifferentiableJPEG(nn.Module):
def __init__(self, height, width, differentiable=True, quality=80):
""" Initialize the DiffJPEG layer
Args:
height: Original image height
width: Original image width
differentiable: If true uses custom differentiable
rounding function, if false uses standard torch.round
quality: Quality factor for jpeg compression scheme.
"""
super(DifferentiableJPEG, self).__init__()
if differentiable:
rounding = diff_round
else:
rounding = torch.round
factor = quality_to_factor(quality)
self.compress = compress_jpeg(rounding=rounding, factor=factor)
self.decompress = decompress_jpeg(height, width, rounding=rounding,
factor=factor)
def forward(self, x):
y, cb, cr = self.compress(x)
recovered = self.decompress(y, cb, cr)
return recovered
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2021 Michael R Lomnitz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import itertools
import numpy as np
# PyTorch
import torch
import torch.nn as nn
# Local
from . import utils
class y_dequantize(nn.Module):
"""Dequantize Y channel
Args:
image(tensor): batch x height x width
factor(float): compression factor
Returns:
batch x height x width
"""
def __init__(self, factor=1):
super(y_dequantize, self).__init__()
self.y_table = utils.y_table
self.factor = factor
def forward(self, image):
return image * (self.y_table * self.factor)
class c_dequantize(nn.Module):
"""Dequantize CbCr channel
Args:
image(tensor): batch x height x width
factor(float): compression factor
Returns:
batch x height x width
"""
def __init__(self, factor=1):
super(c_dequantize, self).__init__()
self.factor = factor
self.c_table = utils.c_table
def forward(self, image):
return image * (self.c_table * self.factor)
class idct_8x8(nn.Module):
"""Inverse discrete Cosine Transformation
Args:
dcp(tensor): batch x height x width
Returns:
batch x height x width
"""
def __init__(self):
super(idct_8x8, self).__init__()
alpha = np.array([1. / np.sqrt(2)] + [1] * 7)
self.alpha = nn.Parameter(torch.from_numpy(np.outer(alpha, alpha)).float(),
requires_grad=False)
tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
for x, y, u, v in itertools.product(range(8), repeat=4):
tensor[x, y, u, v] = np.cos((2 * u + 1) * x * np.pi / 16) * np.cos(
(2 * v + 1) * y * np.pi / 16)
self.tensor = nn.Parameter(torch.from_numpy(tensor).float(),
requires_grad=False)
def forward(self, image):
image = image * self.alpha
result = 0.25 * torch.tensordot(image, self.tensor, dims=2) + 128
result.view(image.shape)
return result
class block_merging(nn.Module):
"""Merge pathces into image
Args:
patches(tensor) batch x height*width/64, height x width
height(int)
width(int)
Returns:
batch x height x width
"""
def __init__(self):
super(block_merging, self).__init__()
def forward(self, patches, height, width):
k = 8
batch_size = patches.shape[0]
image_reshaped = patches.view(batch_size, height // k, width // k, k, k)
image_transposed = image_reshaped.permute(0, 1, 3, 2, 4)
return image_transposed.contiguous().view(batch_size, height, width)
class chroma_upsampling(nn.Module):
"""Upsample chroma layers
Args:
y(tensor): y channel image
cb(tensor): cb channel
cr(tensor): cr channel
Returns:
batch x height x width x 3
"""
def __init__(self):
super(chroma_upsampling, self).__init__()
def forward(self, y, cb, cr):
def repeat(x, k=2):
height, width = x.shape[1:3]
x = x.unsqueeze(-1)
x = x.repeat(1, 1, k, k)
x = x.view(-1, height * k, width * k)
return x
cb = repeat(cb)
cr = repeat(cr)
return torch.cat([y.unsqueeze(3), cb.unsqueeze(3), cr.unsqueeze(3)], dim=3)
class ycbcr_to_rgb_jpeg(nn.Module):
"""Converts YCbCr image to RGB JPEG"""
def __init__(self):
super(ycbcr_to_rgb_jpeg, self).__init__()
matrix = np.array(
[[1., 0., 1.402], [1, -0.344136, -0.714136], [1, 1.772, 0]],
dtype=np.float32).T
self.shift = nn.Parameter(torch.tensor([0, -128., -128.]),
requires_grad=False)
self.matrix = nn.Parameter(torch.from_numpy(matrix), requires_grad=False)
def forward(self, image):
result = torch.tensordot(image + self.shift, self.matrix, dims=1)
# result = torch.from_numpy(result)
result.view(image.shape)
return result.permute(0, 3, 1, 2)
class decompress_jpeg(nn.Module):
"""Full JPEG decompression algortihm
Args:
compressed(dict(tensor)): batch x h*w/64 x 8 x 8
rounding(function): rounding function to use
factor(float): Compression factor
Returns:
batch x 3 x height x width
"""
def __init__(self, height, width, rounding=torch.round, factor=1):
super(decompress_jpeg, self).__init__()
self.c_dequantize = c_dequantize(factor=factor)
self.y_dequantize = y_dequantize(factor=factor)
self.idct = idct_8x8()
self.merging = block_merging()
self.chroma = chroma_upsampling()
self.colors = ycbcr_to_rgb_jpeg()
self.height, self.width = height, width
def forward(self, y, cb, cr):
components = {'y': y, 'cb': cb, 'cr': cr}
for k in components.keys():
if k in ('cb', 'cr'):
comp = self.c_dequantize(components[k])
height, width = int(self.height / 2), int(self.width / 2)
else:
comp = self.y_dequantize(components[k])
height, width = self.height, self.width
comp = self.idct(comp)
components[k] = self.merging(comp, height, width)
image = self.chroma(components['y'], components['cb'], components['cr'])
image = self.colors(image)
image = torch.min(255 * torch.ones_like(image),
torch.max(torch.zeros_like(image), image))
return image / 255
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2021 Michael R Lomnitz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import itertools
import numpy as np
# PyTorch
import torch
import torch.nn as nn
# Local
from . import utils
class rgb_to_ycbcr_jpeg(nn.Module):
"""Converts RGB image to YCbCr
Args:
image(tensor): batch x 3 x height x width
Returns:
result(tensor): batch x height x width x 3
"""
def __init__(self):
super(rgb_to_ycbcr_jpeg, self).__init__()
matrix = np.array(
[[0.299, 0.587, 0.114], [-0.168736, -0.331264, 0.5],
[0.5, -0.418688, -0.081312]], dtype=np.float32).T
self.shift = nn.Parameter(torch.tensor([0., 128., 128.]),
requires_grad=False)
#
self.matrix = nn.Parameter(torch.from_numpy(matrix),
requires_grad=False)
def forward(self, image):
image = image.permute(0, 2, 3, 1)
result = torch.tensordot(image, self.matrix, dims=1) + self.shift
# result = torch.from_numpy(result)
result.view(image.shape)
return result
class chroma_subsampling(nn.Module):
"""Chroma subsampling on CbCv channels
Args:
image(tensor): batch x height x width x 3
Returns:
y(tensor): batch x height x width
cb(tensor): batch x height/2 x width/2
cr(tensor): batch x height/2 x width/2
"""
def __init__(self):
super(chroma_subsampling, self).__init__()
def forward(self, image):
image_2 = image.permute(0, 3, 1, 2).clone()
avg_pool = nn.AvgPool2d(kernel_size=2, stride=(2, 2),
count_include_pad=False)
cb = avg_pool(image_2[:, 1, :, :].unsqueeze(1))
cr = avg_pool(image_2[:, 2, :, :].unsqueeze(1))
cb = cb.permute(0, 2, 3, 1)
cr = cr.permute(0, 2, 3, 1)
return image[:, :, :, 0], cb.squeeze(3), cr.squeeze(3)
class block_splitting(nn.Module):
""" Splitting image into patches
Input:
image(tensor): batch x height x width
Output:
patch(tensor): batch x h*w/64 x h x w
"""
def __init__(self):
super(block_splitting, self).__init__()
self.k = 8
def forward(self, image):
height, width = image.shape[1:3]
batch_size = image.shape[0]
image_reshaped = image.view(batch_size, height // self.k, self.k, -1,
self.k)
image_transposed = image_reshaped.permute(0, 1, 3, 2, 4)
return image_transposed.contiguous().view(batch_size, -1, self.k, self.k)
class dct_8x8(nn.Module):
"""Discrete Cosine Transformation
Args:
image(tensor): batch x height x width
Returns:
dcp(tensor): batch x height x width
"""
def __init__(self):
super(dct_8x8, self).__init__()
tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
for x, y, u, v in itertools.product(range(8), repeat=4):
tensor[x, y, u, v] = np.cos((2 * x + 1) * u * np.pi / 16) * np.cos(
(2 * y + 1) * v * np.pi / 16)
alpha = np.array([1. / np.sqrt(2)] + [1] * 7)
#
self.tensor = nn.Parameter(torch.from_numpy(tensor).float(),
requires_grad=False)
self.scale = nn.Parameter(
torch.from_numpy(np.outer(alpha, alpha) * 0.25).float(),
requires_grad=False)
def forward(self, image):
image = image - 128
result = self.scale * torch.tensordot(image, self.tensor, dims=2)
result.view(image.shape)
return result
class y_quantize(nn.Module):
"""JPEG Quantization for Y channel
Args:
image(tensor): batch x height x width
rounding(function): rounding function to use
factor(float): Degree of compression
Returns:
image(tensor): batch x height x width
"""
def __init__(self, rounding, factor=1):
super(y_quantize, self).__init__()
self.rounding = rounding
self.factor = factor
self.y_table = utils.y_table
def forward(self, image):
image = image.float() / (self.y_table * self.factor)
image = self.rounding(image)
return image
class c_quantize(nn.Module):
"""JPEG Quantization for CrCb channels
Args:
image(tensor): batch x height x width
rounding(function): rounding function to use
factor(float): Degree of compression
Returns:
image(tensor): batch x height x width
"""
def __init__(self, rounding, factor=1):
super(c_quantize, self).__init__()
self.rounding = rounding
self.factor = factor
self.c_table = utils.c_table
def forward(self, image):
image = image.float() / (self.c_table * self.factor)
image = self.rounding(image)
return image
class compress_jpeg(nn.Module):
"""Full JPEG compression algortihm
Args:
imgs(tensor): batch x 3 x height x width
rounding(function): rounding function to use
factor(float): Compression factor
Returns:
compressed(dict(tensor)): batch x h*w/64 x 8 x 8
"""
def __init__(self, rounding=torch.round, factor=1):
super(compress_jpeg, self).__init__()
self.l1 = nn.Sequential(
rgb_to_ycbcr_jpeg(),
chroma_subsampling()
)
self.l2 = nn.Sequential(
block_splitting(),
dct_8x8()
)
self.c_quantize = c_quantize(rounding=rounding, factor=factor)
self.y_quantize = y_quantize(rounding=rounding, factor=factor)
def forward(self, image):
y, cb, cr = self.l1(image * 255)
components = {'y': y, 'cb': cb, 'cr': cr}
for k in components.keys():
comp = self.l2(components[k])
if k in ('cb', 'cr'):
comp = self.c_quantize(comp)
else:
comp = self.y_quantize(comp)
components[k] = comp
return components['y'], components['cb'], components['cr']
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Taken from
# https://github.com/mlomnitz/DiffJPEG
from jpeg.jpeg_module import DifferentiableJPEG
__all__ = ["DifferentiableJPEG"] |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2021 Michael R Lomnitz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
# PyTorch
import torch
import torch.nn as nn
y_table = np.array(
[[16, 11, 10, 16, 24, 40, 51, 61], [12, 12, 14, 19, 26, 58, 60,
55], [14, 13, 16, 24, 40, 57, 69, 56],
[14, 17, 22, 29, 51, 87, 80, 62], [18, 22, 37, 56, 68, 109, 103,
77], [24, 35, 55, 64, 81, 104, 113, 92],
[49, 64, 78, 87, 103, 121, 120, 101], [72, 92, 95, 98, 112, 100, 103, 99]],
dtype=np.float32).T
y_table = nn.Parameter(torch.from_numpy(y_table))
#
c_table = np.empty((8, 8), dtype=np.float32)
c_table.fill(99)
c_table[:4, :4] = np.array([[17, 18, 24, 47], [18, 21, 26, 66],
[24, 26, 56, 99], [47, 66, 99, 99]]).T
c_table = nn.Parameter(torch.from_numpy(c_table))
def diff_round(x):
"""
Differentiable rounding function
Args:
x: Tensor.
Returns:
Rounded tensor.
"""
return torch.round(x) + (x - torch.round(x))**3
def quality_to_factor(quality):
"""Calculate factor corresponding to quality
Args:
quality: Quality for jpeg compression
Returns:
Compression factor
"""
if quality < 50:
quality = 5000. / quality
else:
quality = 200. - quality*2
return quality / 100.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from typing import Callable
from typing import List
import numpy as np
import torch
import torch.utils.data
import torchvision
from torch.nn import functional as F
from torchvision import transforms
import active_tests.logit_matching
import argparse_utils as aut
import networks
from attacks import pgd
def parse_arguments():
parser = argparse.ArgumentParser(
"CIFAR-10 (Defense w/ Detector) Evaluation Script")
parser.add_argument("-bs", "--batch-size", default=128, type=int)
parser.add_argument("-ns", "--n-samples", default=512, type=int)
parser.add_argument("-i", "--input", required=True, type=str)
parser.add_argument("-d", "--device", default=None, type=str)
parser.add_argument("-a", "--adversarial-attack",
type=aut.parse_adversarial_attack_argument,
default=None)
parser.add_argument("-l", "--logit-matching",
type=aut.parse_logit_matching_argument,
default=None)
args = parser.parse_args()
if args.adversarial_attack is not None:
print("Performing adversarial attack:", args.adversarial_attack)
if args.logit_matching is not None:
print("Performing logit matching:", args.logit_matching)
return args
def setup_dataloader(batch_size: int) -> torch.utils.data.DataLoader:
transform_test = transforms.Compose([
transforms.ToTensor(),
])
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=True, num_workers=8)
return testloader
def main():
args = parse_arguments()
classifier = networks.cifar_resnet18(num_classes=10)
detector = networks.Detector(n_features_classifier=10, classifier=classifier)
state_dict = torch.load(args.input)
classifier.load_state_dict(state_dict["classifier"])
detector.load_state_dict(state_dict["detector"])
classifier.train(False)
detector.train(False)
test_loader = setup_dataloader(args.batch_size)
if args.device is None:
args.device = "cuda" if torch.cuda.is_available() else "cpu"
classifier = classifier.to(args.device)
detector = detector.to(args.device)
if args.adversarial_attack is not None:
print("faulty adversarial evaluation 1, ASR:",
run_faulty_adversarial_evaluation(classifier, detector, test_loader,
args.adversarial_attack,
args.n_samples,
args.device))
print("correct adversarial evaluation, ASR:",
run_correct_adversarial_evaluation(classifier, detector, test_loader,
args.adversarial_attack,
args.n_samples,
args.device))
if args.logit_matching is not None:
print("logit matching (dataset):",
run_logit_matching_evaluation(classifier, detector, test_loader,
args.logit_matching,
args.n_samples,
args.device))
def run_faulty_adversarial_evaluation(classifier: torch.nn.Module,
detector: torch.nn.Module,
test_loader: torch.utils.data.DataLoader,
adversarial_attack_settings: aut.AdversarialAttackSettings,
n_samples: int, device: str) -> float:
def eval_batch(x: torch.Tensor, y: torch.Tensor) -> List[bool]:
x_adv = pgd.pgd(classifier, x, y, targeted=False,
n_steps=adversarial_attack_settings.n_steps,
step_size=adversarial_attack_settings.step_size,
epsilon=adversarial_attack_settings.epsilon,
norm=adversarial_attack_settings.norm,
random_start=True)[0]
with torch.no_grad():
not_detected = detector(x_adv).argmax(-1) == torch.zeros_like(y)
incorrectly_classified = classifier(x_adv).argmax(-1) != y
successful_attack = torch.logical_and(not_detected,
incorrectly_classified).cpu().detach().numpy().tolist()
return successful_attack
return run_adversarial_evaluation(eval_batch, test_loader, n_samples, device)
def run_correct_adversarial_evaluation(classifier: torch.nn.Module,
detector: torch.nn.Module,
test_loader: torch.utils.data.DataLoader,
adversarial_attack_settings: aut.AdversarialAttackSettings,
n_samples: int, device: str) -> float:
def eval_batch(x: torch.Tensor, y: torch.Tensor) -> List[bool]:
alpha = 0.5
def loss_fn(x, y):
classifier_logits = classifier(x)
detector_logits = detector(x)
# only maximize the CE wrt. to the GT label for correctly classified samples
classifier_mask = classifier_logits.argmax(-1).detach() == y
# only minimize the CE wrt. to the 0 label (i.e. not adversarial) for
# samples detected as adversarial examples
detector_mask = detector_logits.argmax(-1).detach() != 0
classifier_loss = -(F.cross_entropy(
classifier_logits,
y, reduction="none") * classifier_mask).mean()
detector_loss = (F.cross_entropy(
detector_logits, torch.zeros_like(y),
reduction="none") * detector_mask).mean()
return (1 - alpha) * classifier_loss + alpha * detector_loss
x_adv = pgd.general_pgd(x=x, y=y,
loss_fn=loss_fn,
is_adversarial_fn=lambda x, y: torch.logical_and(
classifier(x).argmax(-1) != y,
detector(x).argmax(
-1) == torch.zeros_like(y)),
n_steps=adversarial_attack_settings.n_steps,
step_size=adversarial_attack_settings.step_size,
epsilon=adversarial_attack_settings.epsilon,
norm=adversarial_attack_settings.norm,
random_start=True)[0]
with torch.no_grad():
not_detected = detector(x_adv).argmax(-1) == torch.zeros_like(y)
incorrectly_classified = classifier(x_adv).argmax(-1) != y
successful_attack = torch.logical_and(not_detected,
incorrectly_classified).cpu().detach().numpy().tolist()
return successful_attack
return run_adversarial_evaluation(eval_batch, test_loader, n_samples, device)
def run_adversarial_evaluation(
batch_eval_fn: Callable[[torch.tensor, torch.Tensor], List[bool]],
test_loader: torch.utils.data.DataLoader, n_samples: int,
device: str) -> float:
"""
:param batch_eval_fn:
:param test_loader:
:param n_samples:
:param device: torch device
:return: Returns Attack Success Rate
"""
results = []
for x, y in test_loader:
x = x.to(device)
y = y.to(device)
results += batch_eval_fn(x, y)
if len(results) >= n_samples:
break
results = results[:n_samples]
return np.mean(np.array(results).astype(np.float32))
def run_logit_matching_evaluation(classifier: Callable, detector: Callable,
test_loader: torch.utils.data.DataLoader,
logit_matching_settings: aut.LogitMatchingSettings, n_samples: int,
device: str):
merged_logits_fn = lambda x: torch.cat((classifier(x), detector(x)), 1)
results = []
for x, y in test_loader:
x = x.to(device)
results += active_tests.logit_matching.dataset_samples_logit_matching(
merged_logits_fn, x, logit_matching_settings.n_steps,
logit_matching_settings.step_size)
if len(results) >= n_samples:
break
results = results[:n_samples]
results = np.sqrt(np.array(results).sum(-1))
print(results)
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import textwrap
from typing import List
from typing import Tuple
from typing import Union
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data
import torchvision
import tqdm
from torchvision import transforms
from typing_extensions import Literal
import warnings
import active_tests.decision_boundary_binarization as dbl
import argparse_utils
import argparse_utils as aut
import networks
from attacks import adaptive_kwta_attack
from attacks import autopgd
from attacks import fab
from attacks import pgd
from attacks import thermometer_ls_pgd
LossType = Union[Literal["ce"], Literal["logit-diff"]]
def parse_arguments():
"""Parse arguments."""
parser = argparse.ArgumentParser("Evaluation Script")
parser.add_argument(
"-ds", "--dataset", choices=("cifar10", "imagenet"), default="cifar10"
)
parser.add_argument("-bs", "--batch-size", default=128, type=int)
parser.add_argument("-ns", "--n-samples", default=512, type=int)
parser.add_argument("-i", "--input", required=True, type=str)
parser.add_argument("-d", "--device", default=None, type=str)
parser.add_argument(
"-c",
"--classifier",
default="networks.cifar_resnet18",
type=argparse_utils.parse_classifier_argument,
)
parser.add_argument("-cin", "--classifier-input-noise", default=0.0, type=float)
parser.add_argument("-cgn", "--classifier-gradient-noise", default=0.0, type=float)
parser.add_argument("-cls", "--classifier-logit-scale", default=1.0, type=float)
parser.add_argument(
"-cinorm", "--classifier-input-normalization", action="store_true"
)
parser.add_argument(
"-cijq",
"--classifier-input-jpeg-quality",
default=100,
type=int,
help="Setting a negative value leads to a differentiable JPEG version",
)
parser.add_argument(
"-cigb", "--classifier-input-gaussian-blur-stddev", default=0.0, type=float
)
parser.add_argument(
"-a",
"--adversarial-attack",
type=aut.parse_adversarial_attack_argument,
default=None,
)
parser.add_argument(
"-dbl",
"--decision-boundary-binarization",
type=aut.parse_decision_boundary_binarization_argument,
default=None,
)
parser.add_argument("--dbl-sample-from-corners", action="store_true")
parser.add_argument("-nfs", "--n-final-softmax", default=1, type=int)
parser.add_argument("-ciusvt", "--classifier-input-usvt", action="store_true")
parser.add_argument("--no-ce-loss", action="store_true")
parser.add_argument("--no-logit-diff-loss", action="store_true")
parser.add_argument("--no-clean-evaluation", action="store_true")
args = parser.parse_args()
assert not (
args.no_ce_loss and args.no_logit_diff_loss
), "Only one loss can be disabled"
print("Detected type of tests to run:")
if args.adversarial_attack is not None:
print("\tadversarial attack:", args.adversarial_attack)
if args.decision_boundary_binarization is not None:
print(
"\tinterior-vs-boundary discrimination:",
args.decision_boundary_binarization,
)
print()
return args
def setup_dataloader(
dataset: Union[Literal["cifar10", "imagenet"]], batch_size: int
) -> torch.utils.data.DataLoader:
if dataset == "cifar10":
transform_test = transforms.Compose([transforms.ToTensor()])
create_dataset_fn = lambda download: torchvision.datasets.CIFAR10(
root="./data/cifar10",
train=False,
download=download,
transform=transform_test,
)
elif dataset == "imagenet":
transform_test = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]
)
create_dataset_fn = lambda _: torchvision.datasets.ImageNet(
root="./data/imagenet", split="val", transform=transform_test
)
else:
raise ValueError("Invalid value for dataset.")
try:
testset = create_dataset_fn(False)
except:
testset = create_dataset_fn(True)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=True, num_workers=8
)
return testloader
def main():
args = parse_arguments()
if args.input == "pretrained":
assert args.dataset == "imagenet"
classifier = args.classifier(pretrained=True)
print("Base Classifier:", args.classifier.__name__)
else:
classifier = args.classifier(
**({"pretrained": False} if args.dataset == "imagenet" else {})
)
print("Base Classifier:", args.classifier.__name__)
print("Loading checkpoint:", args.input)
state_dict = torch.load(args.input, map_location="cpu")
if "classifier" in state_dict:
classifier_state_dict = state_dict["classifier"]
else:
classifier_state_dict = state_dict
try:
classifier.load_state_dict(classifier_state_dict)
except RuntimeError as ex:
print(
f"Could not load weights due to error: "
f"{textwrap.shorten(str(ex), width=50, placeholder='...')}"
)
print("Trying to remap weights by removing 'module.' namespace")
modified_classifier_state_dict = {
(k[len("module.") :] if k.startswith("module.") else k): v
for k, v in classifier_state_dict.items()
}
try:
classifier.load_state_dict(modified_classifier_state_dict)
print("Successfully loaded renamed weights.")
except RuntimeError:
print("Remapping weights did also not work. Initial error:")
raise ex
classifier.train(False)
test_loader = setup_dataloader(args.dataset, args.batch_size)
if args.device is None:
args.device = "cuda" if torch.cuda.is_available() else "cpu"
classifier = classifier.to(args.device)
if args.classifier_input_normalization:
if args.dataset == "cifar10":
classifier = networks.InputNormalization(
classifier,
torch.tensor([0.4914, 0.4822, 0.4465]),
torch.tensor([0.2023, 0.1994, 0.2010]),
)
elif args.dataset == "imagenet":
classifier = networks.InputNormalization(
classifier,
torch.tensor([0.485, 0.456, 0.406]),
torch.tensor([0.229, 0.224, 0.225]),
)
else:
raise ValueError("Unknown dataset.")
if args.classifier_input_noise > 0:
classifier = networks.GaussianNoiseInputModule(
classifier, args.classifier_input_noise
)
if args.classifier_gradient_noise > 0:
classifier = networks.GaussianNoiseGradientModule(
classifier, args.classifier_gradient_noise
)
if args.classifier_input_jpeg_quality != 100:
if args.classifier_input_jpeg_quality > 0:
classifier = networks.JPEGForwardIdentityBackwardModule(
classifier,
args.classifier_input_jpeg_quality,
size=32 if args.dataset == "cifar10" else 224,
legacy=True,
)
print("Using (slow) legacy JPEG mode")
else:
classifier = networks.DifferentiableJPEGModule(
classifier,
args.classifier_input_jpeg_quality,
size=32 if args.dataset == "cifar10" else 224,
)
classifier = classifier.to(args.device)
if args.classifier_input_gaussian_blur_stddev > 0:
classifier = networks.GausianBlurForwardIdentityBackwardModule(
classifier, 3, args.classifier_input_gaussian_blur_stddev
)
if args.classifier_input_usvt:
classifier = networks.UVSTModule(classifier)
if args.classifier_logit_scale > 1:
classifier = networks.ScaledLogitsModule(
classifier, args.classifier_logit_scale
)
if args.n_final_softmax > 1:
classifier = torch.nn.Sequential(
classifier, *[torch.nn.Softmax() for _ in range(args.n_final_softmax)]
)
classifier = classifier.to(args.device)
if not args.no_clean_evaluation:
print(
"clean evaluation, Accuracy: {0}\n\tclass accuracy: {1}\n\tclass histogram: {3}".format(
*run_clean_evaluation(classifier, test_loader, args.device)
)
)
if args.adversarial_attack is not None:
print("adversarial evaluation:")
if not args.no_ce_loss:
print(
"\tadversarial evaluation (ce loss), ASR:",
run_adversarial_evaluation(
classifier,
test_loader,
"ce",
args.adversarial_attack,
args.n_samples,
args.device,
),
)
if not args.no_logit_diff_loss:
print(
"\tadversarial evaluation (logit-diff loss), ASR:",
run_adversarial_evaluation(
classifier,
test_loader,
"logit-diff",
args.adversarial_attack,
args.n_samples,
args.device,
),
)
max_eps_adversarial_attack_settings = copy.deepcopy(args.adversarial_attack)
# set epsilon to 0.5
# then rescale the step size so that it relatively to epsilon stays the same
max_eps_adversarial_attack_settings.epsilon = 0.50
max_eps_adversarial_attack_settings.step_size = (
args.adversarial_attack.step_size / args.adversarial_attack.epsilon * 0.5
)
if not args.no_ce_loss:
print(
"\tadversarial evaluation (ce loss, eps = 0.5), ASR:",
run_adversarial_evaluation(
classifier,
test_loader,
"ce",
max_eps_adversarial_attack_settings,
args.n_samples,
args.device,
),
)
if not args.no_logit_diff_loss:
print(
"\tadversarial evaluation (logit-diff loss, eps = 0.5), ASR:",
run_adversarial_evaluation(
classifier,
test_loader,
"logit-diff",
max_eps_adversarial_attack_settings,
args.n_samples,
args.device,
),
)
if args.decision_boundary_binarization is not None:
print("decision boundary binarization:")
if not args.no_ce_loss:
print(
run_decision_boundary_binarization(
classifier,
test_loader,
"ce",
args.decision_boundary_binarization,
args.n_samples,
args.device,
args.batch_size,
"interior-vs-boundary discrimination (ce loss)",
args.dbl_sample_from_corners,
)
)
if not args.no_logit_diff_loss:
print(
run_decision_boundary_binarization(
classifier,
test_loader,
"logit-diff",
args.decision_boundary_binarization,
args.n_samples,
args.device,
args.batch_size,
"interior-vs-boundary discrimination (logit-diff loss)",
args.dbl_sample_from_corners,
)
)
def run_clean_evaluation(
classifier: torch.nn.Module,
test_loader: torch.utils.data.DataLoader,
device: str,
n_classes: int = 10,
) -> Tuple[float, List[float], np.ndarray, List[int]]:
"""
Perform evaluation of classifier on clean data.
Args:
classifier: Classifier to evaluate.
test_loader: Dataloader to perform evaluation on.
device: torch device
n_classes: Number of classes in the dataset.
Returns
Accuracy, Accuracy per class, Correctly classified per sample,
Histogram of predicted labels
"""
n_correct = 0
n_total = 0
class_histogram_correct = {}
class_histogram_total = {}
class_histogram_predicted = {}
if n_classes is not None:
for i in range(n_classes):
class_histogram_correct[i] = 0
class_histogram_total[i] = 0
class_histogram_predicted[i] = 0
correctly_classified = []
pbar = tqdm.tqdm(test_loader, leave=False)
for x, y in pbar:
x = x.to(device)
y = y.to(device)
with torch.no_grad():
y_pred = classifier(x).argmax(-1)
n_correct += (y_pred == y).long().sum().item()
n_total += len(x)
correctly_classified.append((y_pred == y).detach().cpu())
for y_, y_pred_ in zip(
y.detach().cpu().numpy(), y_pred.detach().cpu().numpy()
):
if y_ not in class_histogram_correct:
class_histogram_correct[y_] = 0
class_histogram_correct[y_] += int(y_ == y_pred_)
if y_ not in class_histogram_total:
class_histogram_total[y_] = 0
class_histogram_total[y_] += 1
if y_pred_ not in class_histogram_predicted:
class_histogram_predicted[y_pred_] = 0
class_histogram_predicted[y_pred_] += 1
pbar.set_description(f"Accuracy = {n_correct / n_total:.4f}")
correctly_classified = torch.cat(correctly_classified).numpy()
class_histogram_correct = [
class_histogram_correct[k] for k in sorted(class_histogram_correct.keys())
]
class_histogram_total = [
class_histogram_total[k] for k in sorted(class_histogram_total.keys())
]
class_histogram_accuracy = [
a / b if b > 0 else np.nan
for a, b in zip(class_histogram_correct, class_histogram_total)
]
class_histogram_predicted = [
class_histogram_predicted[k] for k in sorted(class_histogram_predicted.keys())
]
return (
n_correct / n_total,
class_histogram_accuracy,
correctly_classified,
class_histogram_predicted,
)
def run_decision_boundary_binarization(
classifier: torch.nn.Module,
test_loader: torch.utils.data.DataLoader,
loss: LossType,
linearization_settings: aut.DecisionBoundaryBinarizationSettings,
n_samples: int,
device: str,
batch_size: int,
title: str = "interior-vs-boundary discimination",
sample_training_data_from_corners: bool = False,
) -> float:
"""Perform the binarization test for a classifier.
Args:
classifier: Classifier to evaluate.
test_loader: Test dataloader.
loss: Loss to use in the adversarial attack during the test.
linearization_settings: Settings of the test.
n_samples: Number of samples to perform test on.
device: Torch device.
batch_size: Batch size.
title: Name of the experiment that will be shown in log.
sample_training_data_from_corners: Sample boundary samples from
corners or surfaces.
Returns:
String summarizing the results of the test.
"""
def attack_fn(
model: torch.nn.Module, data_loader: torch.utils.data.DataLoader, attack_kwargs
):
result = run_adversarial_evaluation(
model,
data_loader,
loss,
linearization_settings.adversarial_attack_settings,
n_samples=1,
device=device,
return_samples=True,
n_classes=2,
early_stopping=True,
)
# return ASR, (x_adv, logits(x_adv))
return result[0], (result[1][1], result[1][2])
scores_logit_differences_and_validation_accuracies_and_asr = dbl.interior_boundary_discrimination_attack(
classifier,
test_loader,
attack_fn,
linearization_settings,
n_samples,
device,
n_samples_evaluation=200, # was set to n_samples
n_samples_asr_evaluation=linearization_settings.adversarial_attack_settings.n_steps,
rescale_logits="adaptive",
decision_boundary_closeness=0.9999,
sample_training_data_from_corners=sample_training_data_from_corners,
batch_size=batch_size,
)
return dbl.format_result(
scores_logit_differences_and_validation_accuracies_and_asr,
n_samples,
title=title,
)
def run_adversarial_evaluation(
classifier: torch.nn.Module,
test_loader: torch.utils.data.DataLoader,
loss: LossType,
adversarial_attack_settings: aut.AdversarialAttackSettings,
n_samples: int,
device: str,
randomly_targeted: bool = False,
n_classes: int = 10,
return_samples: bool = False,
early_stopping: bool = True,
) -> Tuple[float, ...]:
"""
Perform an adversarial evaluation of a classifier.
Args:
classifier: Classifier to evaluate.
test_loader: Test dataloader.
loss: Loss to use in adversarial attack.
adversarial_attack_settings: Settings of adversarial evaluation.
n_samples: Number of samples to evaluate robustness on.
device: Torch device:
randomly_targeted: Whether to use random targets for attack.
n_classes: Number of classes in the dataset (relevant for random targets)
return_samples: Returns clean and perturbed samples?
early_stopping: Stop once all samples have successfully been attacked
Returns:
Either only Attack Success Rate (ASR) or Tuple containing ASR and
clean/perturbed samples as well as their logits.
"""
loss_per_sample = adversarial_attack_settings.attack == "kwta"
if loss == "ce":
sign = 1 if randomly_targeted else -1
if loss_per_sample:
reduction = "none"
else:
reduction = "sum"
loss_fn = lambda x, y: sign * F.cross_entropy(x, y, reduction=reduction)
elif loss == "logit-diff":
sign = -1 if randomly_targeted else 1
def loss_fn(logits, y):
gt_logits = logits[range(len(y)), y]
other = torch.max(
logits - 2 * torch.max(logits) * F.one_hot(y, logits.shape[-1]), -1
)[0]
value = sign * (gt_logits - other)
if not loss_per_sample:
value = value.sum()
return value
if adversarial_attack_settings.attack == "kwta":
if loss != "logit-diff":
warnings.warn(
"Adaptive attack for kWTA originally uses logit "
"differences and not CE loss",
RuntimeWarning,
)
n_attacked = 0
attack_successful = []
clean_samples = []
perturbed_samples = []
clean_or_target_labels = []
predicted_logits = []
for x, y in test_loader:
x = x[: max(1, min(len(x), n_samples - n_attacked))]
y = y[: max(1, min(len(y), n_samples - n_attacked))]
x = x.to(device)
y = y.to(device)
if randomly_targeted:
y = (y + torch.randint_like(y, 0, n_classes)) % n_classes
if adversarial_attack_settings.attack == "pgd":
x_adv = pgd.general_pgd(
loss_fn=lambda x, y: loss_fn(classifier(x), y),
is_adversarial_fn=lambda x, y: classifier(x).argmax(-1) == y
if randomly_targeted
else classifier(x).argmax(-1) != y,
x=x,
y=y,
n_steps=adversarial_attack_settings.n_steps,
step_size=adversarial_attack_settings.step_size,
epsilon=adversarial_attack_settings.epsilon,
norm=adversarial_attack_settings.norm,
early_stopping=early_stopping,
n_averaging_steps=adversarial_attack_settings.n_averages,
random_start=adversarial_attack_settings.random_start,
)[0]
elif adversarial_attack_settings.attack == "autopgd":
temp = autopgd.auto_pgd(
model=classifier,
x=x,
y=y,
n_steps=adversarial_attack_settings.n_steps,
epsilon=adversarial_attack_settings.epsilon,
norm=adversarial_attack_settings.norm,
targeted=randomly_targeted,
n_averaging_steps=adversarial_attack_settings.n_averages,
)
x_adv = temp[0]
if randomly_targeted:
y = temp[-1]
elif adversarial_attack_settings.attack == "autopgd+":
temp = autopgd.auto_pgd(
model=classifier,
x=x,
y=y,
n_steps=adversarial_attack_settings.n_steps,
epsilon=adversarial_attack_settings.epsilon,
norm=adversarial_attack_settings.norm,
# from https://github.com/fra31/auto-attack/blob/
# 6482e4d6fbeeb51ae9585c41b16d50d14576aadc/autoattack/
# autoattack.py#L281
n_restarts=4,
targeted=randomly_targeted,
n_averaging_steps=adversarial_attack_settings.n_averages,
)
x_adv = temp[0]
if randomly_targeted:
y = temp[-1]
elif adversarial_attack_settings.attack == "fab":
temp = fab.fab(
model=classifier,
x=x,
y=y,
n_steps=adversarial_attack_settings.n_steps,
epsilon=adversarial_attack_settings.epsilon,
norm=adversarial_attack_settings.norm,
targeted=randomly_targeted,
n_restarts=5,
)
x_adv = temp[0]
if randomly_targeted:
y = temp[-1]
elif adversarial_attack_settings.attack == "kwta":
x_adv = adaptive_kwta_attack.gradient_estimator_pgd(
model=classifier,
loss_fn=lambda x, y: loss_fn(classifier(x), y),
x=x,
y=y,
n_steps=adversarial_attack_settings.n_steps,
step_size=adversarial_attack_settings.step_size,
epsilon=adversarial_attack_settings.epsilon,
norm=adversarial_attack_settings.norm,
random_start=True,
early_stopping=early_stopping,
targeted=randomly_targeted,
)[0]
elif adversarial_attack_settings.attack == "thermometer-lspgd":
if hasattr(classifier, "l"):
l = classifier.l
else:
l = 16
warnings.warn(
"Could not determine thermometer parameter l; "
"using default of 16",
RuntimeWarning,
)
x_adv = thermometer_ls_pgd.general_thermometer_ls_pgd(
loss_fn=lambda x, y: loss_fn(classifier(x, skip_encoder=True), y),
is_adversarial_fn=lambda x, y: classifier(x).argmax(-1) == y
if randomly_targeted
else classifier(x).argmax(-1) != y,
x=x,
y=y,
n_steps=adversarial_attack_settings.n_steps,
step_size=adversarial_attack_settings.step_size,
epsilon=adversarial_attack_settings.epsilon,
norm=adversarial_attack_settings.norm,
random_start=True,
early_stopping=early_stopping,
temperature=1.0,
annealing_factor=1.0, # 1.0/1.2,
n_restarts=0,
l=l,
)[0]
else:
raise ValueError(
f"Unknown adversarial attack "
f"({adversarial_attack_settings.attack})."
)
with torch.no_grad():
logits = classifier(x_adv)
if randomly_targeted:
correctly_classified = logits.argmax(-1) == y
attack_successful += (
correctly_classified.cpu().detach().numpy().tolist()
)
else:
incorrectly_classified = logits.argmax(-1) != y
attack_successful += (
incorrectly_classified.cpu().detach().numpy().tolist()
)
clean_samples.append(x.cpu())
perturbed_samples.append(x_adv.cpu())
clean_or_target_labels.append(y.cpu())
predicted_logits.append(logits.cpu())
n_attacked += len(x)
if n_attacked >= n_samples:
break
attack_successful = np.array(attack_successful)
clean_samples = np.concatenate(clean_samples, 0)
perturbed_samples = np.concatenate(perturbed_samples, 0)
clean_or_target_labels = np.concatenate(clean_or_target_labels, 0)
predicted_logits = np.concatenate(predicted_logits, 0)
attack_successful = attack_successful[:n_samples]
clean_samples = clean_samples[:n_samples]
perturbed_samples = perturbed_samples[:n_samples]
clean_or_target_labels = clean_or_target_labels[:n_samples]
predicted_logits = predicted_logits[:n_samples]
result = [np.mean(attack_successful).astype(np.float32)]
if return_samples:
result += [
(clean_samples, perturbed_samples, predicted_logits, clean_or_target_labels)
]
return tuple(result)
if __name__ == "__main__":
warnings.filterwarnings("ignore", category=UserWarning, module="torch")
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modified_cleverhans.devtools.version import dev_version
# Attach a hex digest to the version string to keep track of changes
# in the development branch
__version__ = '2.0.0-' + dev_version()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import sys
import warnings
from . import utils
def data_mnist(datadir='/tmp/', train_start=0, train_end=60000, test_start=0,
test_end=10000):
"""
Load and preprocess MNIST dataset
:param datadir: path to folder where data should be stored
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:return: tuple of four arrays containing training data, training labels,
testing data and testing labels.
"""
assert isinstance(train_start, int)
assert isinstance(train_end, int)
assert isinstance(test_start, int)
assert isinstance(test_end, int)
if 'tensorflow' in sys.modules:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(datadir, one_hot=True, reshape=False)
X_train = np.vstack((mnist.train.images, mnist.validation.images))
Y_train = np.vstack((mnist.train.labels, mnist.validation.labels))
X_test = mnist.test.images
Y_test = mnist.test.labels
else:
warnings.warn("CleverHans support for Theano is deprecated and "
"will be dropped on 2017-11-08.")
import keras
from keras.datasets import mnist
from keras.utils import np_utils
# These values are specific to MNIST
img_rows = 28
img_cols = 28
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
if keras.backend.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train[train_start:train_end]
Y_train = Y_train[train_start:train_end]
X_test = X_test[test_start:test_end]
Y_test = Y_test[test_start:test_end]
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
return X_train, Y_train, X_test, Y_test
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
class Model(object):
"""
An abstract interface for model wrappers that exposes model symbols
needed for making an attack. This abstraction removes the dependency on
any specific neural network package (e.g. Keras) from the core
code of CleverHans. It can also simplify exposing the hidden features of a
model when a specific package does not directly expose them.
"""
__metaclass__ = ABCMeta
def __init__(self):
pass
def __call__(self, *args, **kwargs):
"""
For compatibility with functions used as model definitions (taking
an input tensor and returning the tensor giving the output
of the model on that input).
"""
return self.get_probs(*args, **kwargs)
def get_layer(self, x, reuse, layer):
"""
Expose the hidden features of a model given a layer name.
:param x: A symbolic representation of the network input
:param layer: The name of the hidden layer to return features at.
:return: A symbolic representation of the hidden features
:raise: NoSuchLayerError if `layer` is not in the model.
"""
# Return the symbolic representation for this layer.
output = self.fprop(x, reuse)
try:
requested = output[layer]
except KeyError:
raise NoSuchLayerError()
return requested
def get_logits(self, x, reuse):
"""
:param x: A symbolic representation of the network input
:return: A symbolic representation of the output logits (i.e., the
values fed as inputs to the softmax layer).
"""
return self.get_layer(x, reuse, 'logits')
def get_probs(self, x, reuse=True):
"""
:param x: A symbolic representation of the network input
:return: A symbolic representation of the output probabilities (i.e.,
the output values produced by the softmax layer).
"""
try:
return self.get_layer(x, reuse, 'probs')
except NoSuchLayerError:
import tensorflow as tf
return tf.nn.softmax(self.get_logits(x, True))
def get_layer_names(self):
"""
:return: a list of names for the layers that can be exposed by this
model abstraction.
"""
if hasattr(self, 'layer_names'):
return self.layer_names
raise NotImplementedError('`get_layer_names` not implemented.')
def fprop(self, x, reuse):
"""
Exposes all the layers of the model returned by get_layer_names.
:param x: A symbolic representation of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.
"""
raise NotImplementedError('`fprop` not implemented.')
# special call for the ensemble model
def ensemble_call(self, *args, **kwargs):
"""
For compatibility with functions used as model definitions (taking
an input tensor and returning the tensor giving the output
of the model on that input).
"""
return self.get_ensemblepreds(*args, **kwargs)
def get_ensemblepreds(self, x, reuse=True):
"""
:param x: A symbolic representation of the network input
:return: A symbolic representation of the ensemble output predictions
"""
try:
return self.get_layer(x, reuse, 'combined')
except NoSuchLayerError:
raise NotImplementedError('`combinedLayer` not implemented.')
# Returns the average probability of the models that were finally used in the prediction after max voting
def get_combinedAvgCorrectProbs(self, x, reuse=True):
"""
:param x: A symbolic representation of the network input
:return: A symbolic representation of the output probabilities (i.e.,
the output values produced by the softmax layer).
"""
try:
return self.get_layer(x, reuse, 'combinedAvgCorrectProb')
except NoSuchLayerError:
raise NotImplementedError('`combinedAvgCorrectProbLayer` not implemented.')
# special functions for the teacher model in training with distillation
def get_teacher_logits(self, x, reuse):
"""
:param x: A symbolic representation of the network input
:return: A symbolic representation of the output logits (i.e., the
values fed as inputs to the softmax layer).
"""
return self.get_layer(x, reuse, 'teacher_logits')
def get_teacher_probs(self, x, reuse=True):
"""
:param x: A symbolic representation of the network input
:return: A symbolic representation of the output probabilities (i.e.,
the output values produced by the softmax layer).
"""
try:
return self.get_layer(x, reuse, 'teacher_probs')
except NoSuchLayerError:
import tensorflow as tf
return tf.nn.softmax(self.get_teacher_logits(x, True))
def teacher_call(self, *args, **kwargs):
"""
For compatibility with functions used as model definitions (taking
an input tensor and returning the tensor giving the output
of the model on that input).
"""
return self.get_teacher_probs(*args, **kwargs)
class CallableModelWrapper(Model):
def __init__(self, callable_fn, output_layer):
"""
Wrap a callable function that takes a tensor as input and returns
a tensor as output with the given layer name.
:param callable_fn: The callable function taking a tensor and
returning a given layer as output.
:param output_layer: A string of the output layer returned by the
function. (Usually either "probs" or "logits".)
"""
self.output_layer = output_layer
self.callable_fn = callable_fn
def get_layer_names(self):
return [self.output_layer]
def fprop(self, x, reuse):
return {self.output_layer: self.callable_fn(x)}
class NoSuchLayerError(ValueError):
"""Raised when a layer that does not exist is requested."""
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import numpy as np
from collections import OrderedDict
from six.moves import xrange
import warnings
import logging
known_number_types = (int, float, np.float16, np.float32, np.float64,
np.int8, np.int16, np.int32, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64)
class _ArgsWrapper(object):
"""
Wrapper that allows attribute access to dictionaries
"""
def __init__(self, args):
if not isinstance(args, dict):
args = vars(args)
self.args = args
def __getattr__(self, name):
return self.args.get(name)
class AccuracyReport(object):
"""
An object summarizing the accuracy results for experiments involving
training on clean examples or adversarial examples, then evaluating
on clean or adversarial examples.
"""
def __init__(self):
self.clean_train_clean_eval = 0.
self.clean_train_adv_eval = 0.
self.adv_train_clean_eval = 0.
self.adv_train_adv_eval = 0.
# Training data accuracy results to be used by tutorials
self.train_clean_train_clean_eval = 0.
self.train_clean_train_adv_eval = 0.
self.train_adv_train_clean_eval = 0.
self.train_adv_train_adv_eval = 0.
def batch_indices(batch_nb, data_length, batch_size):
"""
This helper function computes a batch start and end index
:param batch_nb: the batch number
:param data_length: the total length of the data being parsed by batches
:param batch_size: the number of inputs in each batch
:return: pair of (start, end) indices
"""
# Batch start and end index
start = int(batch_nb * batch_size)
end = int((batch_nb + 1) * batch_size)
# When there are not enough inputs left, we reuse some to complete the
# batch
if end > data_length:
shift = end - data_length
start -= shift
end -= shift
return start, end
def other_classes(nb_classes, class_ind):
"""
Returns a list of class indices excluding the class indexed by class_ind
:param nb_classes: number of classes in the task
:param class_ind: the class index to be omitted
:return: list of class indices excluding the class indexed by class_ind
"""
if class_ind < 0 or class_ind >= nb_classes:
error_str = "class_ind must be within the range (0, nb_classes - 1)"
raise ValueError(error_str)
other_classes_list = list(range(nb_classes))
other_classes_list.remove(class_ind)
return other_classes_list
def to_categorical(y, num_classes=None):
"""
Converts a class vector (integers) to binary class matrix.
This is adapted from the Keras function with the same name.
:param y: class vector to be converted into a matrix
(integers from 0 to num_classes).
:param num_classes: num_classes: total number of classes.
:return: A binary matrix representation of the input.
"""
y = np.array(y, dtype='int').ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes))
categorical[np.arange(n), y] = 1
return categorical
def random_targets(gt, nb_classes):
"""
Take in an array of correct labels and randomly select a different label
for each label in the array. This is typically used to randomly select a
target class in targeted adversarial examples attacks (i.e., when the
search algorithm takes in both a source class and target class to compute
the adversarial example).
:param gt: the ground truth (correct) labels. They can be provided as a
1D vector or 2D array of one-hot encoded labels.
:param nb_classes: The number of classes for this task. The random class
will be chosen between 0 and nb_classes such that it
is different from the correct class.
:return: A numpy array holding the randomly-selected target classes
encoded as one-hot labels.
"""
# If the ground truth labels are encoded as one-hot, convert to labels.
if len(gt.shape) == 2:
gt = np.argmax(gt, axis=1)
# This vector will hold the randomly selected labels.
result = np.zeros(gt.shape, dtype=np.int32)
for class_ind in xrange(nb_classes):
# Compute all indices in that class.
in_cl = gt == class_ind
size = np.sum(in_cl)
# Compute the set of potential targets for this class.
potential_targets = other_classes(nb_classes, class_ind)
# Draw with replacement random targets among the potential targets.
result[in_cl] = np.random.choice(potential_targets, size=size)
# Encode vector of random labels as one-hot labels.
result = to_categorical(result, nb_classes)
result = result.astype(np.int32)
return result
def pair_visual(original, adversarial, figure=None):
"""
This function displays two images: the original and the adversarial sample
:param original: the original input
:param adversarial: the input after perterbations have been applied
:param figure: if we've already displayed images, use the same plot
:return: the matplot figure to reuse for future samples
"""
import matplotlib.pyplot as plt
# Ensure our inputs are of proper shape
assert(len(original.shape) == 2 or len(original.shape) == 3)
# To avoid creating figures per input sample, reuse the sample plot
if figure is None:
plt.ion()
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Pair Visualization')
# Add the images to the plot
perterbations = adversarial - original
for index, image in enumerate((original, perterbations, adversarial)):
figure.add_subplot(1, 3, index + 1)
plt.axis('off')
# If the image is 2D, then we have 1 color channel
if len(image.shape) == 2:
plt.imshow(image, cmap='gray')
else:
plt.imshow(image)
# Give the plot some time to update
plt.pause(0.01)
# Draw the plot and return
plt.show()
return figure
def grid_visual(data):
"""
This function displays a grid of images to show full misclassification
:param data: grid data of the form;
[nb_classes : nb_classes : img_rows : img_cols : nb_channels]
:return: if necessary, the matplot figure to reuse
"""
import matplotlib.pyplot as plt
# Ensure interactive mode is disabled and initialize our graph
plt.ioff()
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Grid Visualization')
# Add the images to the plot
num_cols = data.shape[0]
num_rows = data.shape[1]
num_channels = data.shape[4]
current_row = 0
for y in xrange(num_rows):
for x in xrange(num_cols):
figure.add_subplot(num_rows, num_cols, (x + 1) + (y * num_cols))
plt.axis('off')
if num_channels == 1:
plt.imshow(data[x, y, :, :, 0], cmap='gray')
else:
plt.imshow(data[x, y, :, :, :])
# Draw the plot and return
plt.show()
return figure
def conv_2d(*args, **kwargs):
from modified_cleverhans.utils_keras import conv_2d
warnings.warn("utils.conv_2d is deprecated and may be removed on or after"
" 2018-01-05. Switch to utils_keras.conv_2d.")
return conv_2d(*args, **kwargs)
def cnn_model(*args, **kwargs):
from modified_cleverhans.utils_keras import cnn_model
warnings.warn("utils.cnn_model is deprecated and may be removed on or"
" after 2018-01-05. Switch to utils_keras.cnn_model.")
return cnn_model(*args, **kwargs)
def set_log_level(level, name="cleverhans"):
"""
Sets the threshold for the cleverhans logger to level
:param level: the logger threshold. You can find values here:
https://docs.python.org/2/library/logging.html#levels
:param name: the name used for the cleverhans logger
"""
logging.getLogger(name).setLevel(level)
def create_logger(name):
"""
Create a logger object with the given name.
If this is the first time that we call this method, then initialize the
formatter.
"""
base = logging.getLogger("cleverhans")
if len(base.handlers) == 0:
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s %(asctime)s %(name)s] ' +
'%(message)s')
ch.setFormatter(formatter)
base.addHandler(ch)
return base
def deterministic_dict(normal_dict):
"""
Returns a version of `normal_dict` whose iteration order is always the same
"""
out = OrderedDict()
for key in sorted(normal_dict.keys()):
out[key] = normal_dict[key]
return out
def parse_model_settings(model_path):
tokens = model_path.split('/')
precision_list = ['bin', 'binsc', 'fp']
precision = ''
start_index = 0
adv = False
for p in precision_list:
if p in tokens:
start_index = tokens.index(p)
precision = p
try:
nb_filters = int(tokens[start_index + 1].split('_')[1])
batch_size = int(tokens[start_index + 2].split('_')[1])
learning_rate = float(tokens[start_index + 3].split('_')[1])
nb_epochs = int(tokens[start_index + 4].split('_')[1])
adv_index = start_index + 5
if adv_index < len(tokens):
adv = True if 'adv' in tokens[adv_index] else False
print("Got %s model" % precision)
print("Got %d filters" % nb_filters)
print("Got batch_size %d" % batch_size)
print("Got batch_size %f" % learning_rate)
print("Got %d epochs" % nb_epochs)
except:
print("Could not parse tokens!")
sys.exit(1)
return nb_filters, batch_size, learning_rate, nb_epochs, adv
def build_model_save_path(root_path, batch_size, nb_filters, lr, epochs, adv, delay):
model_path = os.path.join(root_path, precision)
model_path += 'k_' + str(nb_filters) + '/'
model_path += 'bs_' + str(batch_size) + '/'
model_path += 'lr_' + str(lr) + '/'
model_path += 'ep_' + str(epochs)
if adv:
model_path += '/adv_%d' % delay
# optionally create this dir if it does not already exist,
# otherwise, increment
model_path = create_dir_if_not_exists(model_path)
return model_path
def create_dir_if_not_exists(path):
if not os.path.exists(path):
path += '/1'
os.makedirs(path)
else:
digits = []
sub_dirs = next(os.walk(path))[1]
[digits.append(s) for s in sub_dirs if s.isdigit()]
sub = '/' + str(int(max(digits)) + 1) if len(digits) > 0 else '/1'
path += sub
os.makedirs(path)
print('Logging to:%s' % path)
return path
def build_targeted_dataset(X_test, Y_test, indices, nb_classes, img_rows, img_cols, img_channels):
"""
Build a dataset for targeted attacks, each source image is repeated nb_classes -1
times, and target labels are assigned that do not overlap with true label.
:param X_test: clean source images
:param Y_test: true labels for X_test
:param indices: indices of source samples to use
:param nb_classes: number of classes in classification problem
:param img_rows: number of pixels along rows of image
:param img_cols: number of pixels along columns of image
"""
nb_samples = len(indices)
nb_target_classes = nb_classes - 1
X = X_test[indices]
Y = Y_test[indices]
adv_inputs = np.array(
[[instance] * nb_target_classes for
instance in X], dtype=np.float32)
adv_inputs = adv_inputs.reshape(
(nb_samples * nb_target_classes, img_rows, img_cols, img_channels))
true_labels = np.array(
[[instance] * nb_target_classes for
instance in Y], dtype=np.float32)
true_labels = true_labels.reshape(
nb_samples * nb_target_classes, nb_classes)
target_labels = np.zeros((nb_samples * nb_target_classes, nb_classes))
for n in range(nb_samples):
one_hot = np.zeros((nb_target_classes, nb_classes))
one_hot[np.arange(nb_target_classes), np.arange(nb_classes)
!= np.argmax(Y[n])] = 1.0
start = n * nb_target_classes
end = start + nb_target_classes
target_labels[start:end] = one_hot
return adv_inputs, true_labels, target_labels
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Model construction utilities based on keras
"""
from .model import Model
import keras
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from distutils.version import LooseVersion
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
from keras.layers import Conv2D
else:
from keras.layers import Convolution2D
def conv_2d(filters, kernel_shape, strides, padding, input_shape=None):
"""
Defines the right convolutional layer according to the
version of Keras that is installed.
:param filters: (required integer) the dimensionality of the output
space (i.e. the number output of filters in the
convolution)
:param kernel_shape: (required tuple or list of 2 integers) specifies
the strides of the convolution along the width and
height.
:param padding: (required string) can be either 'valid' (no padding around
input or feature map) or 'same' (pad to ensure that the
output feature map size is identical to the layer input)
:param input_shape: (optional) give input shape if this is the first
layer of the model
:return: the Keras layer
"""
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
if input_shape is not None:
return Conv2D(filters=filters, kernel_size=kernel_shape,
strides=strides, padding=padding,
input_shape=input_shape)
else:
return Conv2D(filters=filters, kernel_size=kernel_shape,
strides=strides, padding=padding)
else:
if input_shape is not None:
return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
subsample=strides, border_mode=padding,
input_shape=input_shape)
else:
return Convolution2D(filters, kernel_shape[0], kernel_shape[1],
subsample=strides, border_mode=padding)
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [conv_2d(nb_filters, (8, 8), (2, 2), "same",
input_shape=input_shape),
Activation('relu'),
conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
Activation('relu'),
conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
Activation('relu'),
Flatten(),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
if logits:
logits_tensor = model(input_ph)
model.add(Activation('softmax'))
if logits:
return model, logits_tensor
else:
return model
class KerasModelWrapper(Model):
"""
An implementation of `Model` that wraps a Keras model. It
specifically exposes the hidden features of a model by creating new models.
The symbolic graph is reused and so there is little overhead. Splitting
in-place operations can incur an overhead.
"""
def __init__(self, model=None):
"""
Create a wrapper for a Keras model
:param model: A Keras model
"""
super(KerasModelWrapper, self).__init__()
if model is None:
raise ValueError('model argument must be supplied.')
self.model = model
self.keras_model = None
def _get_softmax_name(self):
"""
Looks for the name of the softmax layer.
:return: Softmax layer name
"""
for i, layer in enumerate(self.model.layers):
cfg = layer.get_config()
if 'activation' in cfg and cfg['activation'] == 'softmax':
return layer.name
raise Exception("No softmax layers found")
def _get_logits_name(self):
"""
Looks for the name of the layer producing the logits.
:return: name of layer producing the logits
"""
softmax_name = self._get_softmax_name()
softmax_layer = self.model.get_layer(softmax_name)
node = softmax_layer.inbound_nodes[0]
logits_name = node.inbound_layers[0].name
return logits_name
def get_logits(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the logits
"""
logits_name = self._get_logits_name()
return self.get_layer(x, logits_name)
def get_probs(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the probs
"""
name = self._get_softmax_name()
return self.get_layer(x, name)
def get_layer_names(self):
"""
:return: Names of all the layers kept by Keras
"""
layer_names = [x.name for x in self.model.layers]
return layer_names
def fprop(self, x):
"""
Exposes all the layers of the model returned by get_layer_names.
:param x: A symbolic representation of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.
"""
from keras.models import Model as KerasModel
if self.keras_model is None:
# Get the input layer
new_input = self.model.get_input_at(0)
# Make a new model that returns each of the layers as output
out_layers = [x_layer.output for x_layer in self.model.layers]
self.keras_model = KerasModel(new_input, out_layers)
# and get the outputs for that model on the input x
outputs = self.keras_model(x)
# Keras only returns a list for outputs of length >= 1, if the model
# is only one layer, wrap a list
if len(self.model.layers) == 1:
outputs = [outputs]
# compute the dict to return
fprop_dict = dict(zip(self.get_layer_names(), outputs))
return fprop_dict
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import numpy as np
import six
import time
import warnings
from collections import OrderedDict
from .utils import batch_indices, _ArgsWrapper
import theano
import theano.tensor as T
import keras
floatX = theano.config.floatX
_TEST_PHASE = np.uint8(0)
_TRAIN_PHASE = np.uint8(1)
def get_or_compute_grads(loss_or_grads, params):
if isinstance(loss_or_grads, list):
return loss_or_grads
else:
return theano.grad(loss_or_grads, params)
def adadelta(loss_or_grads, params, learning_rate=1.0, rho=0.95, epsilon=1e-6):
""" From Lasagne
"""
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
# Using theano constant to prevent upcasting of float32
one = T.constant(1)
for param, grad in zip(params, grads):
value = param.get_value(borrow=True)
# accu: accumulate gradient magnitudes
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
# delta_accu: accumulate update magnitudes (recursively!)
delta_accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
# update accu (as in rmsprop)
accu_new = rho * accu + (one - rho) * grad ** 2
updates[accu] = accu_new
# compute parameter update, using the 'old' delta_accu
update = (grad * T.sqrt(delta_accu + epsilon) /
T.sqrt(accu_new + epsilon))
updates[param] = param - learning_rate * update
# update delta_accu (as accu, but accumulating updates)
delta_accu_new = rho * delta_accu + (one - rho) * update ** 2
updates[delta_accu] = delta_accu_new
return updates
def model_loss(y, model, mean=True):
"""
Define loss of Theano graph
:param y: correct labels
:param model: output of the model
:return: return mean of loss if True, otherwise return vector with per
sample loss
"""
warnings.warn("CleverHans support for Theano is deprecated and "
"will be dropped on 2017-11-08.")
from_logits = "softmax" not in str(model).lower()
if from_logits:
model = T.nnet.softmax(model)
out = T.nnet.categorical_crossentropy(model, y)
if mean:
out = T.mean(out)
return out
def th_model_train(x, y, predictions, params, X_train, Y_train, save=False,
predictions_adv=None, evaluate=None, args=None):
"""
Train a Theano graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param params: model trainable weights
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
:return: True if model trained
"""
warnings.warn("CleverHans support for Theano is deprecated and "
"will be dropped on 2017-11-08.")
args = _ArgsWrapper(args or {})
print("Starting model training using Theano.")
# Define loss
loss = model_loss(y, predictions)
if predictions_adv is not None:
loss = (loss + model_loss(y, predictions_adv)) / 2
print("Defined optimizer.")
train_step = theano.function(
inputs=[x, y],
outputs=[loss],
givens={keras.backend.learning_phase(): _TRAIN_PHASE},
allow_input_downcast=True,
on_unused_input='ignore',
updates=adadelta(
loss, params, learning_rate=args.learning_rate, rho=0.95,
epsilon=1e-08)
)
for epoch in six.moves.xrange(args.nb_epochs):
print("Epoch " + str(epoch))
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(batch, len(X_train), args.batch_size)
# Perform one training step
train_step(X_train[start:end], Y_train[start:end])
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
print("\tEpoch took " + str(cur - prev) + " seconds")
prev = cur
if evaluate is not None:
evaluate()
return True
def th_model_eval(x, y, model, X_test, Y_test, args=None):
"""
Compute the accuracy of a Theano model on some data
:param x: input placeholder
:param y: output placeholder (for labels)
:param model: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
warnings.warn("CleverHans support for Theano is deprecated and "
"will be dropped on 2017-11-08.")
args = _ArgsWrapper(args or {})
# Define symbol for accuracy
acc_value = keras.metrics.categorical_accuracy(y, model)
# Keras 2.0 categorical_accuracy no longer calculates the mean internally
# T.mean is called in here and is backward compatible with previous
# versions of Keras
acc_value = T.mean(acc_value)
# Init result var
accuracy = 0.0
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
eval_step = theano.function(
inputs=[x, y],
outputs=acc_value,
givens={keras.backend.learning_phase(): _TEST_PHASE},
on_unused_input="ignore",
allow_input_downcast=True,
updates=None
)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
print("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
cur_batch_size = end - start
# The last batch may be smaller than all others, so we need to
# account for variable batch size here
accuracy += cur_batch_size * \
eval_step(X_test[start:end], Y_test[start:end])
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
def batch_eval(th_inputs, th_outputs, numpy_inputs, args=None):
"""
A helper function that computes a tensor on numpy inputs by batches.
:param th_inputs:
:param th_outputs:
:param numpy_inputs:
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
"""
warnings.warn("CleverHans support for Theano is deprecated and "
"will be dropped on 2017-11-08.")
args = _ArgsWrapper(args or {})
n = len(numpy_inputs)
assert n > 0
assert n == len(th_inputs)
m = numpy_inputs[0].shape[0]
for i in six.moves.xrange(1, n):
assert numpy_inputs[i].shape[0] == m
out = []
for _ in th_outputs:
out.append([])
eval_step = theano.function(
inputs=th_inputs,
outputs=th_outputs,
givens={keras.backend.learning_phase(): _TEST_PHASE},
allow_input_downcast=True,
updates=None
)
for start in six.moves.xrange(0, m, args.batch_size):
batch = start // args.batch_size
if batch % 100 == 0 and batch > 0:
print("Batch " + str(batch))
# Compute batch start and end indices
start = batch * args.batch_size
end = start + args.batch_size
numpy_input_batches = [numpy_input[start:end]
for numpy_input in numpy_inputs]
cur_batch_size = numpy_input_batches[0].shape[0]
assert cur_batch_size <= args.batch_size
for e in numpy_input_batches:
assert e.shape[0] == cur_batch_size
numpy_output_batches = eval_step(*numpy_input_batches)
for e in numpy_output_batches:
assert e.shape[0] == cur_batch_size, e.shape
for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
out_elem.append(numpy_output_batch)
out = [np.concatenate(x, axis=0) for x in out]
for e in out:
assert e.shape[0] == m, e.shape
return out
def model_argmax(x, predictions, sample):
"""
Helper function that computes the current class prediction
:param x: the input placeholder
:param predictions: the model's symbolic output
:param sample: (1 x 1 x img_rows x img_cols) numpy array with sample input
:return: the argmax output of predictions, i.e. the current predicted class
"""
warnings.warn("CleverHans support for Theano is deprecated and "
"will be dropped on 2017-11-08.")
probabilities = theano.function(
inputs=[x],
outputs=predictions,
givens={keras.backend.learning_phase(): _TEST_PHASE},
allow_input_downcast=True,
updates=None
)(x)
return np.argmax(probabilities)
def l2_batch_normalize(x, epsilon=1e-12):
"""
Helper function to normalize a batch of vectors.
:param x: the input placeholder
:param epsilon: stabilizes division
:return: the batch of l2 normalized vector
"""
epsilon = np.asarray(epsilon, dtype=floatX)
x_shape = x.shape
x = T.reshape(x, (x.shape[0], -1))
x /= (epsilon + T.max(T.abs_(x), 1, keepdims=True))
square_sum = T.sum(T.sqr(x), 1, keepdims=True)
x /= T.sqrt(np.sqrt(epsilon) + square_sum)
return x.reshape(x_shape)
def kl_with_logits(q_logits, p_logits):
"""Helper function to compute kl-divergence KL(q || p)
"""
q = T.nnet.softmax(q_logits)
q_log = T.nnet.logsoftmax(q_logits)
p_log = T.nnet.logsoftmax(p_logits)
loss = T.sum(q * (q_log - p_log), axis=1)
return loss
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import theano
import warnings
from theano import gradient, tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from . import utils_th
floatX = theano.config.floatX
def fgsm(x, predictions, eps, clip_min=None, clip_max=None):
return fgm(x, predictions, y=None, eps=eps, ord=np.inf, clip_min=clip_min,
clip_max=clip_max)
def fgm(x, predictions, y=None, eps=0.3, ord=np.inf, clip_min=None,
clip_max=None):
"""
Theano implementation of the Fast Gradient
Sign method.
:param x: the input placeholder
:param predictions: the model's output tensor
:param y: the output placeholder. Use None (the default) to avoid the
label leaking effect.
:param eps: the epsilon (input variation parameter)
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf (other norms not implemented yet).
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:return: a tensor for the adversarial example
"""
warnings.warn("CleverHans support for Theano is deprecated and "
"will be dropped on 2017-11-08.")
assert ord == np.inf, "Theano implementation not available for this norm."
eps = np.asarray(eps, dtype=floatX)
if y is None:
# Using model predictions as ground truth to avoid label leaking
y = T.eq(predictions, T.max(predictions, axis=1, keepdims=True))
y = T.cast(y, utils_th.floatX)
y = y / T.sum(y, 1, keepdims=True)
# Compute loss
loss = utils_th.model_loss(y, predictions, mean=True)
# Define gradient of loss wrt input
grad = T.grad(loss, x)
# Take sign of gradient
signed_grad = T.sgn(grad)
# Multiply by constant epsilon
scaled_signed_grad = eps * signed_grad
# Add perturbation to original example to obtain adversarial example
adv_x = gradient.disconnected_grad(x + scaled_signed_grad)
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) and (clip_max is not None):
adv_x = T.clip(adv_x, clip_min, clip_max)
return adv_x
def vatm(model, x, predictions, eps, num_iterations=1, xi=1e-6,
clip_min=None, clip_max=None, seed=12345):
"""
Theano implementation of the perturbation method used for virtual
adversarial training: https://arxiv.org/abs/1507.00677
:param model: the model which returns the network unnormalized logits
:param x: the input placeholder
:param predictions: the model's unnormalized output tensor
:param eps: the epsilon (input variation parameter)
:param num_iterations: the number of iterations
:param xi: the finite difference parameter
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:param seed: the seed for random generator
:return: a tensor for the adversarial example
"""
eps = np.asarray(eps, dtype=floatX)
xi = np.asarray(xi, dtype=floatX)
rng = RandomStreams(seed=seed)
d = rng.normal(size=x.shape, dtype=x.dtype)
for i in range(num_iterations):
d = xi * utils_th.l2_batch_normalize(d)
logits_d = model(x + d)
kl = utils_th.kl_with_logits(predictions, logits_d)
Hd = T.grad(kl.sum(), d)
d = gradient.disconnected_grad(Hd)
d = eps * utils_th.l2_batch_normalize(d)
adv_x = gradient.disconnected_grad(x + d)
if (clip_min is not None) and (clip_max is not None):
adv_x = T.clip(adv_x, clip_min, clip_max)
return adv_x
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import numpy as np
from six.moves import xrange
import tensorflow as tf
import warnings
from . import utils_tf
from . import utils
_logger = utils.create_logger("cleverhans.attacks.tf")
def fgsm(x, predictions, eps=0.3, clip_min=None, clip_max=None):
return fgm(x, predictions, y=None, eps=eps, ord=np.inf, clip_min=clip_min,
clip_max=clip_max)
def fgm(x, preds, y=None, eps=0.3, ord=np.inf,
clip_min=None, clip_max=None,
targeted=False):
"""
TensorFlow implementation of the Fast Gradient Method.
:param x: the input placeholder
:param preds: the model's output tensor (the attack expects the
probabilities, i.e., the output of the softmax)
:param y: (optional) A placeholder for the model labels. If targeted
is true, then provide the target label. Otherwise, only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param eps: the epsilon (input variation parameter)
:param ord: (optional) Order of the norm (mimics NumPy).
Possible values: np.inf, 1 or 2.
:param clip_min: Minimum float value for adversarial example components
:param clip_max: Maximum float value for adversarial example components
:param targeted: Is the attack targeted or untargeted? Untargeted, the
default, will try to make the label incorrect. Targeted
will instead try to move in the direction of being more
like y.
:return: a tensor for the adversarial example
"""
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = tf.reduce_max(preds, 1, keep_dims=True)
y = tf.to_float(tf.equal(preds, preds_max))
y = tf.stop_gradient(y)
y = y / tf.reduce_sum(y, 1, keep_dims=True)
# Compute loss
loss = utils_tf.model_loss(y, preds, mean=False)
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x)
if ord == np.inf:
# Take sign of gradient
normalized_grad = tf.sign(grad)
# The following line should not change the numerical results.
# It applies only because `normalized_grad` is the output of
# a `sign` op, which has zero derivative anyway.
# It should not be applied for the other norms, where the
# perturbation has a non-zero derivative.
normalized_grad = tf.stop_gradient(normalized_grad)
elif ord == 1:
red_ind = list(xrange(1, len(x.get_shape())))
normalized_grad = grad / tf.reduce_sum(tf.abs(grad),
reduction_indices=red_ind,
keep_dims=True)
elif ord == 2:
red_ind = list(xrange(1, len(x.get_shape())))
square = tf.reduce_sum(tf.square(grad),
reduction_indices=red_ind,
keep_dims=True)
normalized_grad = grad / tf.sqrt(square)
else:
raise NotImplementedError("Only L-inf, L1 and L2 norms are "
"currently implemented.")
# Multiply by constant epsilon
scaled_grad = eps * normalized_grad
print('scaled_grad.get_shape() = ')
print(scaled_grad.get_shape())
# Add perturbation to original example to obtain adversarial example
adv_x = x + scaled_grad
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
def vatm(model, x, logits, eps, num_iterations=1, xi=1e-6,
clip_min=None, clip_max=None, scope=None):
"""
Tensorflow implementation of the perturbation method used for virtual
adversarial training: https://arxiv.org/abs/1507.00677
:param model: the model which returns the network unnormalized logits
:param x: the input placeholder
:param logits: the model's unnormalized output tensor (the input to
the softmax layer)
:param eps: the epsilon (input variation parameter)
:param num_iterations: the number of iterations
:param xi: the finite difference parameter
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:param seed: the seed for random generator
:return: a tensor for the adversarial example
"""
with tf.name_scope(scope, "virtual_adversarial_perturbation"):
d = tf.random_normal(tf.shape(x))
for i in range(num_iterations):
d = xi * utils_tf.l2_batch_normalize(d)
logits_d = model.get_logits(x + d, reuse=True)
kl = utils_tf.kl_with_logits(logits, logits_d)
Hd = tf.gradients(kl, d)[0]
d = tf.stop_gradient(Hd)
d = eps * utils_tf.l2_batch_normalize(d)
adv_x = x + d
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
"""
TensorFlow implementation for apply perturbations to input features based
on salency maps
:param i: index of first selected feature
:param j: index of second selected feature
:param X: a matrix containing our input features for our sample
:param increase: boolean; true if we are increasing pixels, false otherwise
:param theta: delta for each feature adjustment
:param clip_min: mininum value for a feature in our sample
:param clip_max: maximum value for a feature in our sample
: return: a perturbed input feature matrix for a target class
"""
# perturb our input sample
if increase:
X[0, i] = np.minimum(clip_max, X[0, i] + theta)
X[0, j] = np.minimum(clip_max, X[0, j] + theta)
else:
X[0, i] = np.maximum(clip_min, X[0, i] - theta)
X[0, j] = np.maximum(clip_min, X[0, j] - theta)
return X
def saliency_map(grads_target, grads_other, search_domain, increase):
"""
TensorFlow implementation for computing saliency maps
:param grads_target: a matrix containing forward derivatives for the
target class
:param grads_other: a matrix where every element is the sum of forward
derivatives over all non-target classes at that index
:param search_domain: the set of input indices that we are considering
:param increase: boolean; true if we are increasing pixels, false otherwise
:return: (i, j, search_domain) the two input indices selected and the
updated search domain
"""
# Compute the size of the input (the number of features)
nf = len(grads_target)
# Remove the already-used input features from the search space
invalid = list(set(range(nf)) - search_domain)
increase_coef = (2 * int(increase) - 1)
grads_target[invalid] = - increase_coef * np.max(np.abs(grads_target))
grads_other[invalid] = increase_coef * np.max(np.abs(grads_other))
# Create a 2D numpy array of the sum of grads_target and grads_other
target_sum = grads_target.reshape((1, nf)) + grads_target.reshape((nf, 1))
other_sum = grads_other.reshape((1, nf)) + grads_other.reshape((nf, 1))
# Create a mask to only keep features that match saliency map conditions
if increase:
scores_mask = ((target_sum > 0) & (other_sum < 0))
else:
scores_mask = ((target_sum < 0) & (other_sum > 0))
# Create a 2D numpy array of the scores for each pair of candidate features
scores = scores_mask * (-target_sum * other_sum)
# A pixel can only be selected (and changed) once
np.fill_diagonal(scores, 0)
# Extract the best two pixels
best = np.argmax(scores)
p1, p2 = best % nf, best // nf
# Remove used pixels from our search domain
search_domain.discard(p1)
search_domain.discard(p2)
return p1, p2, search_domain
def jacobian(sess, x, grads, target, X, nb_features, nb_classes, feed=None):
"""
TensorFlow implementation of the foward derivative / Jacobian
:param x: the input placeholder
:param grads: the list of TF gradients returned by jacobian_graph()
:param target: the target misclassification class
:param X: numpy array with sample input
:param nb_features: the number of features in the input
:return: matrix of forward derivatives flattened into vectors
"""
# Prepare feeding dictionary for all gradient computations
feed_dict = {x: X}
if feed is not None:
feed_dict.update(feed)
# Initialize a numpy array to hold the Jacobian component values
jacobian_val = np.zeros((nb_classes, nb_features), dtype=np.float32)
# Compute the gradients for all classes
for class_ind, grad in enumerate(grads):
run_grad = sess.run(grad, feed_dict)
jacobian_val[class_ind] = np.reshape(run_grad, (1, nb_features))
# Sum over all classes different from the target class to prepare for
# saliency map computation in the next step of the attack
other_classes = utils.other_classes(nb_classes, target)
grad_others = np.sum(jacobian_val[other_classes, :], axis=0)
return jacobian_val[target], grad_others
def jacobian_graph(predictions, x, nb_classes):
"""
Create the Jacobian graph to be ran later in a TF session
:param predictions: the model's symbolic output (linear output,
pre-softmax)
:param x: the input placeholder
:param nb_classes: the number of classes the model has
:return:
"""
# This function will return a list of TF gradients
list_derivatives = []
# Define the TF graph elements to compute our derivatives for each class
for class_ind in xrange(nb_classes):
derivatives, = tf.gradients(predictions[:, class_ind], x)
list_derivatives.append(derivatives)
return list_derivatives
def jsma(sess, x, predictions, grads, sample, target, theta, gamma, clip_min,
clip_max, feed=None):
"""
TensorFlow implementation of the JSMA (see https://arxiv.org/abs/1511.07528
for details about the algorithm design choices).
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output (the attack expects the
probabilities, i.e., the output of the softmax, but will
also work with logits typically)
:param grads: symbolic gradients
:param sample: numpy array with sample input
:param target: target class for sample input
:param theta: delta for each feature adjustment
:param gamma: a float between 0 - 1 indicating the maximum distortion
percentage
:param clip_min: minimum value for components of the example returned
:param clip_max: maximum value for components of the example returned
:return: an adversarial sample
"""
# Copy the source sample and define the maximum number of features
# (i.e. the maximum number of iterations) that we may perturb
adv_x = copy.copy(sample)
# count the number of features. For MNIST, 1x28x28 = 784; for
# CIFAR, 3x32x32 = 3072; etc.
nb_features = np.product(adv_x.shape[1:])
# reshape sample for sake of standardization
original_shape = adv_x.shape
adv_x = np.reshape(adv_x, (1, nb_features))
# compute maximum number of iterations
max_iters = np.floor(nb_features * gamma / 2)
# Find number of classes based on grads
nb_classes = len(grads)
increase = bool(theta > 0)
# Compute our initial search domain. We optimize the initial search domain
# by removing all features that are already at their maximum values (if
# increasing input features---otherwise, at their minimum value).
if increase:
search_domain = set([i for i in xrange(nb_features)
if adv_x[0, i] < clip_max])
else:
search_domain = set([i for i in xrange(nb_features)
if adv_x[0, i] > clip_min])
# Initialize the loop variables
iteration = 0
adv_x_original_shape = np.reshape(adv_x, original_shape)
current = utils_tf.model_argmax(sess, x, predictions, adv_x_original_shape,
feed=feed)
_logger.debug("Starting JSMA attack up to {} iterations".format(max_iters))
# Repeat this main loop until we have achieved misclassification
while (current != target and iteration < max_iters and
len(search_domain) > 1):
# Reshape the adversarial example
adv_x_original_shape = np.reshape(adv_x, original_shape)
# Compute the Jacobian components
grads_target, grads_others = jacobian(sess, x, grads, target,
adv_x_original_shape,
nb_features, nb_classes,
feed=feed)
if iteration % ((max_iters + 1) // 5) == 0 and iteration > 0:
_logger.debug("Iteration {} of {}".format(iteration,
int(max_iters)))
# Compute the saliency map for each of our target classes
# and return the two best candidate features for perturbation
i, j, search_domain = saliency_map(
grads_target, grads_others, search_domain, increase)
# Apply the perturbation to the two input features selected previously
adv_x = apply_perturbations(
i, j, adv_x, increase, theta, clip_min, clip_max)
# Update our current prediction by querying the model
current = utils_tf.model_argmax(sess, x, predictions,
adv_x_original_shape, feed=feed)
# Update loop variables
iteration = iteration + 1
if current == target:
_logger.info("Attack succeeded using {} iterations".format(iteration))
else:
_logger.info(("Failed to find adversarial example " +
"after {} iterations").format(iteration))
# Compute the ratio of pixels perturbed by the algorithm
percent_perturbed = float(iteration * 2) / nb_features
# Report success when the adversarial example is misclassified in the
# target class
if current == target:
return np.reshape(adv_x, original_shape), 1, percent_perturbed
else:
return np.reshape(adv_x, original_shape), 0, percent_perturbed
def jsma_batch(sess, x, pred, grads, X, theta, gamma, clip_min, clip_max,
nb_classes, y_target=None, feed=None, **kwargs):
"""
Applies the JSMA to a batch of inputs
:param sess: TF session
:param x: the input placeholder
:param pred: the model's symbolic output
:param grads: symbolic gradients
:param X: numpy array with sample inputs
:param theta: delta for each feature adjustment
:param gamma: a float between 0 - 1 indicating the maximum distortion
percentage
:param clip_min: minimum value for components of the example returned
:param clip_max: maximum value for components of the example returned
:param nb_classes: number of model output classes
:param y_target: target class for sample input
:return: adversarial examples
"""
if 'targets' in kwargs:
warnings.warn('The targets parameter is deprecated, use y_target.'
'targets will be removed on 2018-02-03.')
y_target = kwargs['targets']
X_adv = np.zeros(X.shape)
for ind, val in enumerate(X):
val = np.expand_dims(val, axis=0)
if y_target is None:
# No y_target provided, randomly choose from other classes
from .utils_tf import model_argmax
gt = model_argmax(sess, x, pred, val, feed=feed)
# Randomly choose from the incorrect classes for each sample
from .utils import random_targets
target = random_targets(gt, nb_classes)[0]
else:
target = y_target[ind]
X_adv[ind], _, _ = jsma(sess, x, pred, grads, val, np.argmax(target),
theta, gamma, clip_min, clip_max, feed=feed)
return np.asarray(X_adv, dtype=np.float32)
def jacobian_augmentation(sess, x, X_sub_prev, Y_sub, grads, lmbda,
keras_phase=None, feed=None):
"""
Augment an adversary's substitute training set using the Jacobian
of a substitute model to generate new synthetic inputs.
See https://arxiv.org/abs/1602.02697 for more details.
See cleverhans_tutorials/mnist_blackbox.py for example use case
:param sess: TF session in which the substitute model is defined
:param x: input TF placeholder for the substitute model
:param X_sub_prev: substitute training data available to the adversary
at the previous iteration
:param Y_sub: substitute training labels available to the adversary
at the previous iteration
:param grads: Jacobian symbolic graph for the substitute
(should be generated using attacks_tf.jacobian_graph)
:param keras_phase: (deprecated) if not None, holds keras learning_phase
:return: augmented substitute data (will need to be labeled by oracle)
"""
assert len(x.get_shape()) == len(np.shape(X_sub_prev))
assert len(grads) >= np.max(Y_sub) + 1
assert len(X_sub_prev) == len(Y_sub)
if keras_phase is not None:
warnings.warn("keras_phase argument is deprecated and will be removed"
" on 2017-09-28. Instead, use K.set_learning_phase(0) at"
" the start of your script and serve with tensorflow.")
# Prepare input_shape (outside loop) for feeding dictionary below
input_shape = list(x.get_shape())
input_shape[0] = 1
# Create new numpy array for adversary training data
# with twice as many components on the first dimension.
X_sub = np.vstack([X_sub_prev, X_sub_prev])
# For each input in the previous' substitute training iteration
for ind, input in enumerate(X_sub_prev):
# Select gradient corresponding to the label predicted by the oracle
grad = grads[Y_sub[ind]]
# Prepare feeding dictionary
feed_dict = {x: np.reshape(input, input_shape)}
if feed is not None:
feed_dict.update(feed)
# Compute sign matrix
grad_val = sess.run([tf.sign(grad)], feed_dict=feed_dict)[0]
# Create new synthetic point in adversary substitute training set
X_sub[2 * ind] = X_sub[ind] + lmbda * grad_val
# Return augmented training data (needs to be labeled afterwards)
return X_sub
class CarliniWagnerL2(object):
def __init__(self, sess, model, batch_size, confidence,
targeted, learning_rate,
binary_search_steps, max_iterations,
abort_early, initial_const,
clip_min, clip_max, num_labels, shape):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param sess: a TF session.
:param model: a cleverhans.model.Model object.
:param batch_size: Number of attacks to run simultaneously.
:param confidence: Confidence of adversarial examples: higher produces
examples with larger l2 distortion, but more
strongly classified as adversarial.
:param targeted: boolean controlling the behavior of the adversarial
examples produced. If set to False, they will be
misclassified in any wrong class. If set to True,
they will be misclassified in a chosen target class.
:param learning_rate: The learning rate for the attack algorithm.
Smaller values produce better results but are
slower to converge.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and confidence of the classification.
:param max_iterations: The maximum number of iterations. Setting this
to a larger value will produce lower distortion
results. Using only a few iterations requires
a larger learning rate, and will produce larger
distortion results.
:param abort_early: If true, allows early aborts if gradient descent
is unable to make progress (i.e., gets stuck in
a local minimum).
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the pururbation
and confidence of classification.
If binary_search_steps is large, the initial
constant is not important. A smaller value of
this constant gives lower distortion results.
:param clip_min: (optional float) Minimum input component value.
:param clip_max: (optional float) Maximum input component value.
:param num_labels: the number of classes in the model's output.
:param shape: the shape of the model's input tensor.
"""
self.sess = sess
self.TARGETED = targeted
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.BINARY_SEARCH_STEPS = binary_search_steps
self.ABORT_EARLY = abort_early
self.CONFIDENCE = confidence
self.initial_const = initial_const
self.batch_size = batch_size
self.clip_min = clip_min
self.clip_max = clip_max
self.model = model
self.repeat = binary_search_steps >= 10
self.shape = shape = tuple([batch_size] + list(shape))
# the variable we're going to optimize over
modifier = tf.Variable(np.zeros(shape, dtype=np.float32))
# these are variables to be more efficient in sending data to tf
self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32,
name='timg')
self.tlab = tf.Variable(np.zeros((batch_size, num_labels)),
dtype=tf.float32, name='tlab')
self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32,
name='const')
# and here's what we use to assign them
self.assign_timg = tf.placeholder(tf.float32, shape,
name='assign_timg')
self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels),
name='assign_tlab')
self.assign_const = tf.placeholder(tf.float32, [batch_size],
name='assign_const')
# the resulting instance, tanh'd to keep bounded from clip_min
# to clip_max
self.newimg = (tf.tanh(modifier + self.timg) + 1) / 2
self.newimg = self.newimg * (clip_max - clip_min) + clip_min
# prediction BEFORE-SOFTMAX of the model
self.output = model.get_logits(self.newimg, reuse=True)
# distance to the input data
self.other = (tf.tanh(self.timg) + 1) / \
2 * (clip_max - clip_min) + clip_min
self.l2dist = tf.reduce_sum(tf.square(self.newimg - self.other),
list(range(1, len(shape))))
# compute the probability of the label class versus the maximum other
real = tf.reduce_sum((self.tlab) * self.output, 1)
other = tf.reduce_max(
(1 - self.tlab) * self.output - self.tlab * 10000,
1)
if self.TARGETED:
# if targeted, optimize for making the other class most likely
loss1 = tf.maximum(0.0, other - real + self.CONFIDENCE)
else:
# if untargeted, optimize for making this class least likely.
loss1 = tf.maximum(0.0, real - other + self.CONFIDENCE)
# sum up the losses
self.loss2 = tf.reduce_sum(self.l2dist)
self.loss1 = tf.reduce_sum(self.const * loss1)
self.loss = self.loss1 + self.loss2
# Setup the adam optimizer and keep track of variables we're creating
start_vars = set(x.name for x in tf.global_variables())
optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)
self.train = optimizer.minimize(self.loss, var_list=[modifier])
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
# these are the variables to initialize when we run
self.setup = []
self.setup.append(self.timg.assign(self.assign_timg))
self.setup.append(self.tlab.assign(self.assign_tlab))
self.setup.append(self.const.assign(self.assign_const))
self.init = tf.variables_initializer(var_list=[modifier] + new_vars)
def attack(self, imgs, targets, phase):
"""
Perform the L_2 attack on the given instance for the given targets.
If self.targeted is true, then the targets represents the target labels
If self.targeted is false, then targets are the original class labels
"""
r = []
for i in range(0, len(imgs), self.batch_size):
_logger.debug(("Running CWL2 attack on instance " +
"{} of {}").format(i, len(imgs)))
r.extend(self.attack_batch(imgs[i:i + self.batch_size],
targets[i:i + self.batch_size], phase))
return np.array(r)
def attack_batch(self, imgs, labs, phase):
"""
Run the attack on a batch of instance and labels.
"""
def compare(x, y):
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
if self.TARGETED:
x[y] -= self.CONFIDENCE
else:
x[y] += self.CONFIDENCE
x = np.argmax(x)
if self.TARGETED:
return x == y
else:
return x != y
batch_size = self.batch_size
oimgs = np.clip(imgs, self.clip_min, self.clip_max)
# re-scale instances to be within range [0, 1]
imgs = (imgs - self.clip_min) / (self.clip_max - self.clip_min)
imgs = np.clip(imgs, 0, 1)
# now convert to [-1, 1]
imgs = (imgs * 2) - 1
# convert to tanh-space
imgs = np.arctanh(imgs * .999999)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
CONST = np.ones(batch_size) * self.initial_const
upper_bound = np.ones(batch_size) * 1e10
# placeholders for the best l2, score, and instance attack found so far
o_bestl2 = [1e10] * batch_size
o_bestscore = [-1] * batch_size
o_bestattack = np.copy(oimgs)
for outer_step in range(self.BINARY_SEARCH_STEPS):
# completely reset adam's internal state.
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
bestl2 = [1e10] * batch_size
bestscore = [-1] * batch_size
_logger.debug(" Binary search step {} of {}".
format(outer_step, self.BINARY_SEARCH_STEPS))
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:
CONST = upper_bound
# set the variables so that we don't have to send them over again
self.sess.run(self.setup, {self.assign_timg: batch,
self.assign_tlab: batchlab,
self.assign_const: CONST,
phase: False})
prev = 1e6
for iteration in range(self.MAX_ITERATIONS):
# perform the attack
_, l, l2s, scores, nimg = self.sess.run([self.train,
self.loss,
self.l2dist,
self.output,
self.newimg],
feed_dict={phase: False})
if iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
_logger.debug((" Iteration {} of {}: loss={:.3g} " +
"l2={:.3g} f={:.3g}")
.format(iteration, self.MAX_ITERATIONS,
l, np.mean(l2s), np.mean(scores)))
# check if we should abort search if we're getting nowhere.
if self.ABORT_EARLY and \
iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
if l > prev * .9999:
msg = " Failed to make progress; stop early"
_logger.debug(msg)
break
prev = l
# adjust the best result found so far
for e, (l2, sc, ii) in enumerate(zip(l2s, scores, nimg)):
lab = np.argmax(batchlab[e])
if l2 < bestl2[e] and compare(sc, lab):
bestl2[e] = l2
bestscore[e] = np.argmax(sc)
if l2 < o_bestl2[e] and compare(sc, lab):
o_bestl2[e] = l2
o_bestscore[e] = np.argmax(sc)
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(batch_size):
if compare(bestscore[e], np.argmax(batchlab[e])) and \
bestscore[e] != -1:
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(" Successfully generated adversarial examples " +
"on {} of {} instances.".
format(sum(upper_bound < 1e9), batch_size))
o_bestl2 = np.array(o_bestl2)
mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
_logger.debug(" Mean successful distortion: {:.4g}".format(mean))
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack
class ElasticNetMethod(object):
def __init__(self, sess, model, beta,
batch_size, confidence,
targeted, learning_rate,
binary_search_steps, max_iterations,
abort_early, initial_const,
clip_min, clip_max, num_labels, shape):
"""
EAD Attack with the EN Decision Rule
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param sess: a TF session.
:param model: a cleverhans.model.Model object.
:param beta: Trades off L2 distortion with L1 distortion: higher
produces examples with lower L1 distortion, at the
cost of higher L2 (and typically Linf) distortion
:param batch_size: Number of attacks to run simultaneously.
:param confidence: Confidence of adversarial examples: higher produces
examples with larger l2 distortion, but more
strongly classified as adversarial.
:param targeted: boolean controlling the behavior of the adversarial
examples produced. If set to False, they will be
misclassified in any wrong class. If set to True,
they will be misclassified in a chosen target class.
:param learning_rate: The learning rate for the attack algorithm.
Smaller values produce better results but are
slower to converge.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the perturbation
and confidence of the classification.
:param max_iterations: The maximum number of iterations. Setting this
to a larger value will produce lower distortion
results. Using only a few iterations requires
a larger learning rate, and will produce larger
distortion results.
:param abort_early: If true, allows early abort when the total
loss starts to increase (greatly speeds up attack,
but hurts performance, particularly on ImageNet)
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and confidence of classification.
If binary_search_steps is large, the initial
constant is not important. A smaller value of
this constant gives lower distortion results.
:param clip_min: (optional float) Minimum input component value.
:param clip_max: (optional float) Maximum input component value.
:param num_labels: the number of classes in the model's output.
:param shape: the shape of the model's input tensor.
"""
self.sess = sess
self.TARGETED = targeted
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.BINARY_SEARCH_STEPS = binary_search_steps
self.ABORT_EARLY = abort_early
self.CONFIDENCE = confidence
self.initial_const = initial_const
self.batch_size = batch_size
self.clip_min = clip_min
self.clip_max = clip_max
self.model = model
self.beta = beta
self.beta_t = tf.cast(self.beta, tf.float32)
self.repeat = binary_search_steps >= 10
self.shape = shape = tuple([batch_size] + list(shape))
# these are variables to be more efficient in sending data to tf
self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32,
name='timg')
self.newimg = tf.Variable(np.zeros(shape), dtype=tf.float32,
name='newimg')
self.slack = tf.Variable(np.zeros(shape), dtype=tf.float32,
name='slack')
self.tlab = tf.Variable(np.zeros((batch_size, num_labels)),
dtype=tf.float32, name='tlab')
self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32,
name='const')
# and here's what we use to assign them
self.assign_timg = tf.placeholder(tf.float32, shape,
name='assign_timg')
self.assign_newimg = tf.placeholder(tf.float32, shape,
name='assign_newimg')
self.assign_slack = tf.placeholder(tf.float32, shape,
name='assign_slack')
self.assign_tlab = tf.placeholder(tf.float32, (batch_size,
num_labels),
name='assign_tlab')
self.assign_const = tf.placeholder(tf.float32, [batch_size],
name='assign_const')
self.global_step = tf.Variable(0, trainable=False)
self.global_step_t = tf.cast(self.global_step, tf.float32)
"""Fast Iterative Shrinkage Thresholding"""
"""--------------------------------"""
self.zt = tf.divide(self.global_step_t,
self.global_step_t + tf.cast(3, tf.float32))
cond1 = tf.cast(tf.greater(tf.subtract(self.slack, self.timg),
self.beta_t), tf.float32)
cond2 = tf.cast(tf.less_equal(tf.abs(tf.subtract(self.slack,
self.timg)),
self.beta_t), tf.float32)
cond3 = tf.cast(tf.less(tf.subtract(self.slack, self.timg),
tf.negative(self.beta_t)), tf.float32)
upper = tf.minimum(tf.subtract(self.slack, self.beta_t),
tf.cast(self.clip_max, tf.float32))
lower = tf.maximum(tf.add(self.slack, self.beta_t),
tf.cast(self.clip_min, tf.float32))
self.assign_newimg = tf.multiply(cond1, upper)
self.assign_newimg += tf.multiply(cond2, self.timg)
self.assign_newimg += tf.multiply(cond3, lower)
self.assign_slack = self.assign_newimg
self.assign_slack += tf.multiply(self.zt,
self.assign_newimg - self.newimg)
self.setter = tf.assign(self.newimg, self.assign_newimg)
self.setter_y = tf.assign(self.slack, self.assign_slack)
"""--------------------------------"""
# prediction BEFORE-SOFTMAX of the model
self.output = model.get_logits(self.newimg, reuse=True)
self.output_y = model.get_logits(self.slack, reuse=True)
# distance to the input data
self.l2dist = tf.reduce_sum(tf.square(self.newimg - self.timg),
list(range(1, len(shape))))
self.l2dist_y = tf.reduce_sum(tf.square(self.slack - self.timg),
list(range(1, len(shape))))
self.l1dist = tf.reduce_sum(tf.abs(self.newimg - self.timg),
list(range(1, len(shape))))
self.l1dist_y = tf.reduce_sum(tf.abs(self.slack - self.timg),
list(range(1, len(shape))))
self.elasticdist = self.l2dist + tf.multiply(self.l1dist,
self.beta_t)
self.elasticdist_y = self.l2dist_y + tf.multiply(self.l1dist_y,
self.beta_t)
# compute the probability of the label class versus the maximum other
real = tf.reduce_sum((self.tlab) * self.output, 1)
real_y = tf.reduce_sum((self.tlab) * self.output_y, 1)
other = tf.reduce_max((1 - self.tlab) * self.output -
(self.tlab * 10000), 1)
other_y = tf.reduce_max((1 - self.tlab) * self.output_y -
(self.tlab * 10000), 1)
if self.TARGETED:
# if targeted, optimize for making the other class most likely
loss1 = tf.maximum(0.0, other - real + self.CONFIDENCE)
loss1_y = tf.maximum(0.0, other_y - real_y + self.CONFIDENCE)
else:
# if untargeted, optimize for making this class least likely.
loss1 = tf.maximum(0.0, real - other + self.CONFIDENCE)
loss1_y = tf.maximum(0.0, real_y - other_y + self.CONFIDENCE)
# sum up the losses
self.loss21 = tf.reduce_sum(self.l1dist)
self.loss21_y = tf.reduce_sum(self.l1dist_y)
self.loss2 = tf.reduce_sum(self.l2dist)
self.loss2_y = tf.reduce_sum(self.l2dist_y)
self.loss1 = tf.reduce_sum(self.const * loss1)
self.loss1_y = tf.reduce_sum(self.const * loss1_y)
self.loss2 = tf.reduce_sum(self.l2dist)
self.loss_opt = self.loss1_y + self.loss2_y
self.loss = self.loss1 + self.loss2 + \
tf.multiply(self.beta_t, self.loss21)
self.learning_rate = tf.train.polynomial_decay(self.LEARNING_RATE,
self.global_step,
self.MAX_ITERATIONS,
0, power=0.5)
# Setup the optimizer and keep track of variables we're creating
start_vars = set(x.name for x in tf.global_variables())
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.train = optimizer.minimize(self.loss_opt,
var_list=[self.slack],
global_step=self.global_step)
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
# these are the variables to initialize when we run
self.setup = []
self.setup.append(self.timg.assign(self.assign_timg))
self.setup.append(self.tlab.assign(self.assign_tlab))
self.setup.append(self.const.assign(self.assign_const))
self.init = tf.variables_initializer(var_list=[self.global_step] +
[self.slack] + [self.newimg] +
new_vars)
def attack(self, imgs, targets):
"""
Perform the EAD attack on the given instance for the given targets.
If self.targeted is true, then the targets represents the target labels
If self.targeted is false, then targets are the original class labels
"""
r = []
for i in range(0, len(imgs), self.batch_size):
_logger.debug(("Running EAD attack on instance " +
"{} of {}").format(i, len(imgs)))
r.extend(self.attack_batch(imgs[i:i + self.batch_size],
targets[i:i + self.batch_size], phase))
return np.array(r)
def attack_batch(self, imgs, labs, phase):
"""
Run the attack on a batch of instance and labels.
"""
def compare(x, y):
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
if self.TARGETED:
x[y] -= self.CONFIDENCE
else:
x[y] += self.CONFIDENCE
x = np.argmax(x)
if self.TARGETED:
return x == y
else:
return x != y
batch_size = self.batch_size
imgs = np.clip(imgs, self.clip_min, self.clip_max)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
CONST = np.ones(batch_size) * self.initial_const
upper_bound = np.ones(batch_size) * 1e10
# placeholders for the best en, score, and instance attack found so far
o_besten = [1e10] * batch_size
o_bestscore = [-1] * batch_size
o_bestattack = np.copy(imgs)
for outer_step in range(self.BINARY_SEARCH_STEPS):
# completely reset the optimizer's internal state.
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
besten = [1e10] * batch_size
bestscore = [-1] * batch_size
_logger.debug(" Binary search step {} of {}".
format(outer_step, self.BINARY_SEARCH_STEPS))
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:
CONST = upper_bound
# set the variables so that we don't have to send them over again
self.sess.run(self.setup, {self.assign_timg: batch,
self.assign_tlab: batchlab,
self.assign_const: CONST})
self.sess.run(self.setter, feed_dict={self.assign_newimg: batch})
self.sess.run(self.setter_y, feed_dict={self.assign_slack: batch})
prev = 1e6
for iteration in range(self.MAX_ITERATIONS):
# perform the attack
self.sess.run([self.train])
self.sess.run([self.setter, self.setter_y])
l, l2s, l1s, elastic = self.sess.run([self.loss,
self.l2dist,
self.l1dist,
self.elasticdist])
scores, nimg = self.sess.run([self.output, self.newimg])
if iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
_logger.debug((" Iteration {} of {}: loss={:.3g} " +
"l2={:.3g} l1={:.3g} f={:.3g}")
.format(iteration, self.MAX_ITERATIONS,
l, np.mean(l2s), np.mean(l1s),
np.mean(scores)))
# check if we should abort search if we're getting nowhere.
if self.ABORT_EARLY and \
iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
if l > prev * .9999:
msg = " Failed to make progress; stop early"
_logger.debug(msg)
break
prev = l
# adjust the best result found so far
for e, (en, sc, ii) in enumerate(zip(elastic, scores, nimg)):
lab = np.argmax(batchlab[e])
if en < besten[e] and compare(sc, lab):
besten[e] = en
bestscore[e] = np.argmax(sc)
if en < o_besten[e] and compare(sc, lab):
o_besten[e] = en
o_bestscore[e] = np.argmax(sc)
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(batch_size):
if compare(bestscore[e], np.argmax(batchlab[e])) and \
bestscore[e] != -1:
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(" Successfully generated adversarial examples " +
"on {} of {} instances.".
format(sum(upper_bound < 1e9), batch_size))
o_besten = np.array(o_besten)
mean = np.mean(np.sqrt(o_besten[o_besten < 1e9]))
_logger.debug(" Elastic Mean successful distortion: {:.4g}".
format(mean))
# return the best solution found
o_besten = np.array(o_besten)
return o_bestattack
def deepfool_batch(sess, x, pred, logits, grads, X, nb_candidate, overshoot,
max_iter, clip_min, clip_max, nb_classes, feed=None):
"""
Applies DeepFool to a batch of inputs
:param sess: TF session
:param x: The input placeholder
:param pred: The model's sorted symbolic output of logits, only the top
nb_candidate classes are contained
:param logits: The model's unnormalized output tensor (the input to
the softmax layer)
:param grads: Symbolic gradients of the top nb_candidate classes, procuded
from gradient_graph
:param X: Numpy array with sample inputs
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for DeepFool
:param clip_min: Minimum value for components of the example returned
:param clip_max: Maximum value for components of the example returned
:param nb_classes: Number of model output classes
:return: Adversarial examples
"""
X_adv = deepfool_attack(sess, x, pred, logits, grads, X, nb_candidate,
overshoot, max_iter, clip_min, clip_max, feed=feed)
return np.asarray(X_adv, dtype=np.float32)
def deepfool_attack(sess, x, predictions, logits, grads, sample, nb_candidate,
overshoot, max_iter, clip_min, clip_max, feed=None):
"""
TensorFlow implementation of DeepFool.
Paper link: see https://arxiv.org/pdf/1511.04599.pdf
:param sess: TF session
:param x: The input placeholder
:param predictions: The model's sorted symbolic output of logits, only the
top nb_candidate classes are contained
:param logits: The model's unnormalized output tensor (the input to
the softmax layer)
:param grads: Symbolic gradients of the top nb_candidate classes, procuded
from gradient_graph
:param sample: Numpy array with sample input
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for DeepFool
:param clip_min: Minimum value for components of the example returned
:param clip_max: Maximum value for components of the example returned
:return: Adversarial examples
"""
import copy
adv_x = copy.copy(sample)
# Initialize the loop variables
iteration = 0
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
w = np.squeeze(np.zeros(sample.shape[1:])) # same shape as original image
r_tot = np.zeros(sample.shape)
original = current # use original label as the reference
_logger.debug("Starting DeepFool attack up to {} iterations".
format(max_iter))
# Repeat this main loop until we have achieved misclassification
while (np.any(current == original) and iteration < max_iter):
if iteration % 5 == 0 and iteration > 0:
_logger.info("Attack result at iteration {} is {}".format(
iteration,
current))
gradients = sess.run(grads, feed_dict={x: adv_x})
predictions_val = sess.run(predictions, feed_dict={x: adv_x})
for idx in range(sample.shape[0]):
pert = np.inf
if current[idx] != original[idx]:
continue
for k in range(1, nb_candidate):
w_k = gradients[idx, k, ...] - gradients[idx, 0, ...]
f_k = predictions_val[idx, k] - predictions_val[idx, 0]
# adding value 0.00001 to prevent f_k = 0
pert_k = (abs(f_k) + 0.00001) / np.linalg.norm(w_k.flatten())
if pert_k < pert:
pert = pert_k
w = w_k
r_i = pert * w / np.linalg.norm(w)
r_tot[idx, ...] = r_tot[idx, ...] + r_i
adv_x = np.clip(r_tot + sample, clip_min, clip_max)
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
# Update loop variables
iteration = iteration + 1
# need more revision, including info like how many succeed
_logger.info("Attack result at iteration {} is {}".format(iteration,
current))
_logger.info("{} out of {}".format(sum(current != original),
sample.shape[0]) +
" becomes adversarial examples at iteration {}".format(
iteration))
# need to clip this image into the given range
adv_x = np.clip((1 + overshoot) * r_tot + sample, clip_min, clip_max)
return adv_x
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
import numpy as np
from six.moves import xrange
import warnings
import collections
import modified_cleverhans.utils as utils
from modified_cleverhans.model import Model, CallableModelWrapper
_logger = utils.create_logger("cleverhans.attacks")
class Attack(object):
"""
Abstract base class for all attack classes.
"""
__metaclass__ = ABCMeta
def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
"""
:param model: An instance of the cleverhans.model.Model class.
:param back: The backend to use. Either 'tf' (default) or 'th'
(support for Theano is however deprecated and will
be removed on 2017-11-08).
:param sess: The tf session to run graphs in (use None for Theano)
"""
if not(back == 'tf' or back == 'th'):
raise ValueError("Backend argument must either be 'tf' or 'th'.")
if back == 'th' and sess is not None:
raise Exception("A session should not be provided when using th.")
elif back == 'tf' and sess is None:
import tensorflow as tf
sess = tf.get_default_session()
if not isinstance(model, Model):
if hasattr(model, '__call__'):
warnings.warn("CleverHans support for supplying a callable"
" instead of an instance of the"
" cleverhans.model.Model class is"
" deprecated and will be dropped on 2018-01-11.")
else:
raise ValueError("The model argument should be an instance of"
" the cleverhans.model.Model class.")
if back == 'th':
warnings.warn("CleverHans support for Theano is deprecated and "
"will be dropped on 2017-11-08.")
# Prepare attributes
self.model = model
self.back = back
self.sess = sess
self.model_type = model_type # for EMPIR added model_type
self.num_classes = num_classes # for EMPIR added number of classes
# We are going to keep track of old graphs and cache them.
self.graphs = {}
# When calling generate_np, arguments in the following set should be
# fed into the graph, as they are not structural items that require
# generating a new graph.
# This dict should map names of arguments to the types they should
# have.
# (Usually, the target class will be a feedable keyword argument.)
self.feedable_kwargs = {}
# When calling generate_np, arguments in the following set should NOT
# be fed into the graph, as they ARE structural items that require
# generating a new graph.
# This list should contain the names of the structural arguments.
self.structural_kwargs = []
def generate(self, x, phase, **kwargs):
"""
Generate the attack's symbolic graph for adversarial examples. This
method should be overriden in any child class that implements an
attack that is expressable symbolically. Otherwise, it will wrap the
numerical implementation as a symbolic operator.
:param x: The model's symbolic inputs.
:param **kwargs: optional parameters used by child classes.
:return: A symbolic representation of the adversarial examples.
"""
if self.back == 'th':
raise NotImplementedError('Theano version not implemented.')
# the set of arguments that are structural properties of the attack
# if these arguments are different, we must construct a new graph
fixed = dict((k, v) for k, v in kwargs.items()
if k in self.structural_kwargs)
# the set of arguments that are passed as placeholders to the graph
# on each call, and can change without constructing a new graph
feedable = dict((k, v) for k, v in kwargs.items()
if k in self.feedable_kwargs)
if len(fixed) + len(feedable) < len(kwargs):
warnings.warn("Supplied extra keyword arguments that are not "
"used in the graph computation. They have been "
"ignored.")
if not all(isinstance(value, collections.Hashable)
for value in fixed.values()):
# we have received a fixed value that isn't hashable
# this means we can't cache this graph for later use,
# and it will have to be discarded later
hash_key = None
else:
# create a unique key for this set of fixed paramaters
hash_key = tuple(sorted(fixed.items()))
if hash_key not in self.graphs:
self.construct_graph(phase, fixed, feedable, x, hash_key)
x, new_kwargs, x_adv = self.graphs[hash_key]
return x_adv
def construct_graph(self, phase, fixed, feedable, x_val, hash_key):
"""
Construct the graph required to run the attack through generate_np.
:param fixed: Structural elements that require defining a new graph.
:param feedable: Arguments that can be fed to the same graph when
they take different values.
:param x_val: symbolic adversarial example
:param hash_key: the key used to store this graph in our cache
"""
# try our very best to create a TF placeholder for each of the
# feedable keyword arguments, and check the types are one of
# the allowed types
import tensorflow as tf
class_name = str(self.__class__).split(".")[-1][:-2]
_logger.info("Constructing new graph for attack " + class_name)
# remove the None arguments, they are just left blank
for k in list(feedable.keys()):
if feedable[k] is None:
del feedable[k]
# process all of the rest and create placeholders for them
new_kwargs = dict(x for x in fixed.items())
for name, value in feedable.items():
given_type = self.feedable_kwargs[name]
if isinstance(value, np.ndarray):
new_shape = [None] + list(value.shape[1:])
new_kwargs[name] = tf.placeholder(given_type, new_shape)
elif isinstance(value, utils.known_number_types):
new_kwargs[name] = tf.placeholder(given_type, shape=[])
else:
raise ValueError("Could not identify type of argument " +
name + ": " + str(value))
# x is a special placeholder we always want to have
x_shape = [None] + list(x_val.shape)[1:]
x = tf.placeholder(tf.float32, shape=x_shape)
# now we generate the graph that we want
x_adv = self.generate(x, phase, **new_kwargs)
self.graphs[hash_key] = (x, new_kwargs, x_adv)
if len(self.graphs) >= 10:
warnings.warn("Calling generate_np() with multiple different "
"structural paramaters is inefficient and should"
" be avoided. Calling generate() is preferred.")
def generate_np(self, x_val, phase, **kwargs):
"""
Generate adversarial examples and return them as a NumPy array.
Sub-classes *should not* implement this method unless they must
perform special handling of arguments.
:param x_val: A NumPy array with the original inputs.
:param **kwargs: optional parameters used by child classes.
:return: A NumPy array holding the adversarial examples.
"""
if self.back == 'th':
raise NotImplementedError('Theano version not implemented.')
if self.sess is None:
raise ValueError("Cannot use `generate_np` when no `sess` was"
" provided")
# the set of arguments that are structural properties of the attack
# if these arguments are different, we must construct a new graph
fixed = dict((k, v) for k, v in kwargs.items()
if k in self.structural_kwargs)
# the set of arguments that are passed as placeholders to the graph
# on each call, and can change without constructing a new graph
feedable = dict((k, v) for k, v in kwargs.items()
if k in self.feedable_kwargs)
if len(fixed) + len(feedable) < len(kwargs):
warnings.warn("Supplied extra keyword arguments that are not "
"used in the graph computation. They have been "
"ignored.")
if not all(isinstance(value, collections.Hashable)
for value in fixed.values()):
# we have received a fixed value that isn't hashable
# this means we can't cache this graph for later use,
# and it will have to be discarded later
hash_key = None
else:
# create a unique key for this set of fixed paramaters
hash_key = tuple(sorted(fixed.items()))
if hash_key not in self.graphs:
self.construct_graph(phase, fixed, feedable, x_val, hash_key)
x, new_kwargs, x_adv = self.graphs[hash_key]
feed_dict = {x: x_val, phase: False}
for name in feedable:
feed_dict[new_kwargs[name]] = feedable[name]
return self.sess.run(x_adv, feed_dict)
def get_or_guess_labels(self, x, kwargs):
"""
Get the label to use in generating an adversarial example for x.
The kwargs are fed directly from the kwargs of the attack.
If 'y' is in kwargs, then assume it's an untargeted attack and
use that as the label.
If 'y_target' is in kwargs, then assume it's a targeted attack and
use that as the label.
Otherwise, use the model's prediction as the label and perform an
untargeted attack.
"""
import tensorflow as tf
if 'y' in kwargs and 'y_target' in kwargs:
raise ValueError("Can not set both 'y' and 'y_target'.")
elif 'y' in kwargs:
labels = kwargs['y']
elif 'y_target' in kwargs:
labels = kwargs['y_target']
else:
if self.model_type == 'ensembleThree':
preds = self.model.get_ensemblepreds(x, reuse=True)
original_predictions = tf.to_float(tf.one_hot(preds, self.num_classes)) # preds just gives the class number above
else:
preds = self.model.get_probs(x, reuse=True)
preds_max = tf.reduce_max(preds, 1, keep_dims=True)
original_predictions = tf.to_float(tf.equal(preds,
preds_max))
labels = tf.stop_gradient(original_predictions)
if isinstance(labels, np.ndarray):
nb_classes = labels.shape[1]
else:
nb_classes = labels.get_shape().as_list()[1]
return labels, nb_classes
def parse_params(self, params=None):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
:param params: a dictionary of attack-specific parameters
:return: True when parsing was successful
"""
return True
class FastGradientMethod(Attack):
"""
This attack was originally implemented by Goodfellow et al. (2015) with the
infinity norm (and is known as the "Fast Gradient Sign Method"). This
implementation extends the attack to other norms, and is therefore called
the Fast Gradient Method.
Paper link: https://arxiv.org/abs/1412.6572
"""
def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
"""
Create a FastGradientMethod instance.
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
super(FastGradientMethod, self).__init__(model, back, sess, model_type, num_classes)
self.feedable_kwargs = {'eps': np.float32,
'y': np.float32,
'y_target': np.float32,
'clip_min': np.float32,
'clip_max': np.float32}
self.structural_kwargs = ['ord']
if not isinstance(self.model, Model):
self.model = CallableModelWrapper(self.model, 'probs')
def generate(self, x, phase, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param eps: (optional float) attack step size (input variation)
:param ord: (optional) Order of the norm (mimics NumPy).
Possible values: np.inf, 1 or 2.
:param y: (optional) A tensor with the model labels. Only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
if self.back == 'tf':
from .attacks_tf import fgm
else:
from .attacks_th import fgm
labels, nb_classes = self.get_or_guess_labels(x, kwargs)
if self.model_type == 'ensembleThree': ## for EMPIR: extra if condition for covering the multiple combined model case
return fgm(x, self.model.get_combinedAvgCorrectProbs(x, reuse=True), y=labels, eps=self.eps,
ord=self.ord, clip_min=self.clip_min,
clip_max=self.clip_max,
targeted=(self.y_target is not None))
else:
return fgm(x, self.model.get_probs(x, reuse=True), y=labels, eps=self.eps,
ord=self.ord, clip_min=self.clip_min,
clip_max=self.clip_max,
targeted=(self.y_target is not None))
def parse_params(self, eps=0.3, ord=np.inf, y=None, y_target=None,
clip_min=None, clip_max=None, **kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float) attack step size (input variation)
:param ord: (optional) Order of the norm (mimics NumPy).
Possible values: np.inf, 1 or 2.
:param y: (optional) A tensor with the model labels. Only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Save attack-specific parameters
self.eps = eps
self.ord = ord
self.y = y
self.y_target = y_target
self.clip_min = clip_min
self.clip_max = clip_max
if self.y is not None and self.y_target is not None:
raise ValueError("Must not set both y and y_target")
# Check if order of the norm is acceptable given current implementation
if self.ord not in [np.inf, int(1), int(2)]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
if self.back == 'th' and self.ord != np.inf:
raise NotImplementedError("The only FastGradientMethod norm "
"implemented for Theano is np.inf.")
return True
class BasicIterativeMethod(Attack):
"""
The Basic Iterative Method (Kurakin et al. 2016). The original paper used
hard labels for this attack; no label smoothing.
Paper link: https://arxiv.org/pdf/1607.02533.pdf
"""
def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
"""
Create a BasicIterativeMethod instance.
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
super(BasicIterativeMethod, self).__init__(model, back, sess, model_type, num_classes)
self.feedable_kwargs = {'eps': np.float32,
'eps_iter': np.float32,
'y': np.float32,
'y_target': np.float32,
'clip_min': np.float32,
'clip_max': np.float32}
self.structural_kwargs = ['ord', 'nb_iter']
if not isinstance(self.model, Model):
self.model = CallableModelWrapper(self.model, 'probs')
def generate(self, x, phase, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param eps: (required float) maximum distortion of adversarial example
compared to original input
:param eps_iter: (required float) step size for each attack iteration
:param nb_iter: (required int) Number of attack iterations.
:param y: (optional) A tensor with the model labels.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
import tensorflow as tf
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
# Initialize loop variables
eta = 0
# Fix labels to the first model predictions for loss computation
if self.model_type == 'ensembleThree':
model_preds = self.model.get_combinedAvgCorrectProbs(x, reuse=True)
else:
model_preds = self.model.get_probs(x, reuse=True)
preds_max = tf.reduce_max(model_preds, 1, keep_dims=True)
if self.y_target is not None:
y = self.y_target
targeted = True
elif self.y is not None:
y = self.y
targeted = False
else:
y = tf.to_float(tf.equal(model_preds, preds_max))
y = tf.stop_gradient(y)
targeted = False
y_kwarg = 'y_target' if targeted else 'y'
fgm_params = {'eps': self.eps_iter, y_kwarg: y, 'ord': self.ord,
'clip_min': self.clip_min, 'clip_max': self.clip_max}
for i in range(self.nb_iter):
FGM = FastGradientMethod(self.model, back=self.back,
sess=self.sess)
# FGM = FastGradientMethod(self.model, back=self.back, model_type=self.model_type,
# num_classes=self.num_classes, sess=self.sess)
# Compute this step's perturbation
eta = FGM.generate(x + eta, phase, **fgm_params) - x
# Clipping perturbation eta to self.ord norm ball
if self.ord == np.inf:
eta = tf.clip_by_value(eta, -self.eps, self.eps)
elif self.ord in [1, 2]:
reduc_ind = list(xrange(1, len(eta.get_shape())))
if self.ord == 1:
norm = tf.reduce_sum(tf.abs(eta),
reduction_indices=reduc_ind,
keep_dims=True)
elif self.ord == 2:
norm = tf.sqrt(tf.reduce_sum(tf.square(eta),
reduction_indices=reduc_ind,
keep_dims=True))
eta = eta * self.eps / norm
# Define adversarial example (and clip if necessary)
adv_x = x + eta
if self.clip_min is not None and self.clip_max is not None:
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x
def parse_params(self, eps=0.3, eps_iter=0.05, nb_iter=10, y=None,
ord=np.inf, clip_min=None, clip_max=None,
y_target=None, **kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (required float) maximum distortion of adversarial example
compared to original input
:param eps_iter: (required float) step size for each attack iteration
:param nb_iter: (required int) Number of attack iterations.
:param y: (optional) A tensor with the model labels.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Save attack-specific parameters
self.eps = eps
self.eps_iter = eps_iter
self.nb_iter = nb_iter
self.y = y
self.y_target = y_target
self.ord = ord
self.clip_min = clip_min
self.clip_max = clip_max
if self.y is not None and self.y_target is not None:
raise ValueError("Must not set both y and y_target")
# Check if order of the norm is acceptable given current implementation
if self.ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
if self.back == 'th':
error_string = "BasicIterativeMethod is not implemented in Theano"
raise NotImplementedError(error_string)
return True
class SaliencyMapMethod(Attack):
"""
The Jacobian-based Saliency Map Method (Papernot et al. 2016).
Paper link: https://arxiv.org/pdf/1511.07528.pdf
"""
def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
"""
Create a SaliencyMapMethod instance.
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
super(SaliencyMapMethod, self).__init__(model, back, sess, model_type, num_classes)
if not isinstance(self.model, Model):
self.model = CallableModelWrapper(self.model, 'probs')
if self.back == 'th':
error = "Theano version of SaliencyMapMethod not implemented."
raise NotImplementedError(error)
import tensorflow as tf
self.feedable_kwargs = {'y_target': tf.float32,
'phase': tf.bool}
self.structural_kwargs = ['theta', 'gamma',
'clip_max', 'clip_min']
def generate(self, x, phase, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param theta: (optional float) Perturbation introduced to modified
components (can be positive or negative)
:param gamma: (optional float) Maximum percentage of perturbed features
:param clip_min: (optional float) Minimum component value for clipping
:param clip_max: (optional float) Maximum component value for clipping
:param y_target: (optional) Target tensor if the attack is targeted
"""
import tensorflow as tf
from .attacks_tf import jacobian_graph, jsma_batch
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
# Define Jacobian graph wrt to this input placeholder
if self.model_type == 'ensembleThree':
preds = self.model.get_combinedAvgCorrectProbs(x, reuse=True)
else:
preds = self.model.get_probs(x, reuse=True)
nb_classes = preds.get_shape().as_list()[-1]
grads = jacobian_graph(preds, x, nb_classes)
# Define appropriate graph (targeted / random target labels)
if self.y_target is not None:
def jsma_wrap(x_val, y_target):
return jsma_batch(self.sess, x, preds, grads, x_val,
self.theta, self.gamma, self.clip_min,
self.clip_max, nb_classes,
y_target=y_target, feed={phase: False})
# Attack is targeted, target placeholder will need to be fed
wrap = tf.py_func(jsma_wrap, [x, self.y_target], tf.float32)
else:
def jsma_wrap(x_val):
return jsma_batch(self.sess, x, preds, grads, x_val,
self.theta, self.gamma, self.clip_min,
self.clip_max, nb_classes,
y_target=None, feed={phase: False})
# Attack is untargeted, target values will be chosen at random
wrap = tf.py_func(jsma_wrap, [x], tf.float32)
return wrap
def parse_params(self, theta=1., gamma=np.inf, nb_classes=None,
clip_min=0., clip_max=1., y_target=None, **kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param theta: (optional float) Perturbation introduced to modified
components (can be positive or negative)
:param gamma: (optional float) Maximum percentage of perturbed features
:param nb_classes: (optional int) Number of model output classes
:param clip_min: (optional float) Minimum component value for clipping
:param clip_max: (optional float) Maximum component value for clipping
:param y_target: (optional) Target tensor if the attack is targeted
"""
if nb_classes is not None:
warnings.warn("The nb_classes argument is depricated and will "
"be removed on 2018-02-11")
self.theta = theta
self.gamma = gamma
self.clip_min = clip_min
self.clip_max = clip_max
self.y_target = y_target
return True
class VirtualAdversarialMethod(Attack):
"""
This attack was originally proposed by Miyato et al. (2016) and was used
for virtual adversarial training.
Paper link: https://arxiv.org/abs/1507.00677
"""
def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
"""
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
super(VirtualAdversarialMethod, self).__init__(model, back, sess, model_type, num_classes)
if self.back == 'th':
error = "For the Theano version of VAM please call vatm directly."
raise NotImplementedError(error)
import tensorflow as tf
self.feedable_kwargs = {'eps': tf.float32, 'xi': tf.float32,
'clip_min': tf.float32,
'clip_max': tf.float32}
self.structural_kwargs = ['num_iterations']
if not isinstance(self.model, Model):
self.model = CallableModelWrapper(self.model, 'logits')
def generate(self, x, phase, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param eps: (optional float ) the epsilon (input variation parameter)
:param num_iterations: (optional) the number of iterations
:param xi: (optional float) the finite difference parameter
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
return vatm(self.model, x, self.model.get_logits(x), eps=self.eps,
num_iterations=self.num_iterations, xi=self.xi,
clip_min=self.clip_min, clip_max=self.clip_max)
def parse_params(self, eps=2.0, num_iterations=1, xi=1e-6, clip_min=None,
clip_max=None, **kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float )the epsilon (input variation parameter)
:param num_iterations: (optional) the number of iterations
:param xi: (optional float) the finite difference parameter
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Save attack-specific parameters
self.eps = eps
self.num_iterations = num_iterations
self.xi = xi
self.clip_min = clip_min
self.clip_max = clip_max
return True
class CarliniWagnerL2(Attack):
"""
This attack was originally proposed by Carlini and Wagner. It is an
iterative attack that finds adversarial examples on many defenses that
are robust to other attacks.
Paper link: https://arxiv.org/abs/1608.04644
At a high level, this attack is an iterative attack using Adam and
a specially-chosen loss function to find adversarial examples with
lower distortion than other attacks. This comes at the cost of speed,
as this attack is often much slower than others.
"""
def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
"""
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
super(CarliniWagnerL2, self).__init__(model, back, sess, model_type, num_classes)
if self.back == 'th':
raise NotImplementedError('Theano version not implemented.')
import tensorflow as tf
self.feedable_kwargs = {'y': tf.float32,
'y_target': tf.float32,
'phase': tf.bool}
self.structural_kwargs = ['batch_size', 'confidence',
'targeted', 'learning_rate',
'binary_search_steps', 'max_iterations',
'abort_early', 'initial_const',
'clip_min', 'clip_max']
if not isinstance(self.model, Model):
self.model = CallableModelWrapper(self.model, 'logits')
def generate(self, x, phase, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: (required) A tensor with the inputs.
:param y: (optional) A tensor with the true labels for an untargeted
attack. If None (and y_target is None) then use the
original labels the classifier assigns.
:param y_target: (optional) A tensor with the target labels for a
targeted attack.
:param confidence: Confidence of adversarial examples: higher produces
examples with larger l2 distortion, but more
strongly classified as adversarial.
:param batch_size: Number of attacks to run simultaneously.
:param learning_rate: The learning rate for the attack algorithm.
Smaller values produce better results but are
slower to converge.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and confidence of the classification.
:param max_iterations: The maximum number of iterations. Setting this
to a larger value will produce lower distortion
results. Using only a few iterations requires
a larger learning rate, and will produce larger
distortion results.
:param abort_early: If true, allows early aborts if gradient descent
is unable to make progress (i.e., gets stuck in
a local minimum).
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the pururbation
and confidence of classification.
If binary_search_steps is large, the initial
constant is not important. A smaller value of
this constant gives lower distortion results.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
import tensorflow as tf
from .attacks_tf import CarliniWagnerL2 as CWL2
self.parse_params(**kwargs)
labels, nb_classes = self.get_or_guess_labels(x, kwargs)
attack = CWL2(self.sess, self.model, self.batch_size,
self.confidence, 'y_target' in kwargs,
self.learning_rate, self.binary_search_steps,
self.max_iterations, self.abort_early,
self.initial_const, self.clip_min, self.clip_max,
nb_classes, x.get_shape().as_list()[1:])
def cw_wrap(x_val, y_val):
return np.array(attack.attack(x_val, y_val, phase), dtype=np.float32)
wrap = tf.py_func(cw_wrap, [x, labels], tf.float32)
return wrap
def parse_params(self, y=None, y_target=None, nb_classes=None,
batch_size=1, confidence=0,
learning_rate=5e-3,
binary_search_steps=5, max_iterations=1000,
abort_early=True, initial_const=1e-2,
clip_min=0, clip_max=1):
# ignore the y and y_target argument
if nb_classes is not None:
warnings.warn("The nb_classes argument is depricated and will "
"be removed on 2018-02-11")
self.batch_size = batch_size
self.confidence = confidence
self.learning_rate = learning_rate
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.abort_early = abort_early
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max
class ElasticNetMethod(Attack):
"""
This attack features L1-oriented adversarial examples and includes
the C&W L2 attack as a special case (when beta is set to 0).
Adversarial examples attain similar performance to those
generated by the C&W L2 attack, and more importantly,
have improved transferability properties and
complement adversarial training.
Paper link: https://arxiv.org/abs/1709.04114
"""
def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
"""
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
super(ElasticNetMethod, self).__init__(model, back, sess, model_type, num_classes)
if self.back == 'th':
raise NotImplementedError('Theano version not implemented.')
import tensorflow as tf
self.feedable_kwargs = {'y': tf.float32,
'y_target': tf.float32}
self.structural_kwargs = ['beta', 'batch_size', 'confidence',
'targeted', 'learning_rate',
'binary_search_steps', 'max_iterations',
'abort_early', 'initial_const',
'clip_min', 'clip_max']
if not isinstance(self.model, Model):
self.model = CallableModelWrapper(self.model, 'logits')
def generate(self, x, phase, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: (required) A tensor with the inputs.
:param y: (optional) A tensor with the true labels for an untargeted
attack. If None (and y_target is None) then use the
original labels the classifier assigns.
:param y_target: (optional) A tensor with the target labels for a
targeted attack.
:param beta: Trades off L2 distortion with L1 distortion: higher
produces examples with lower L1 distortion, at the
cost of higher L2 (and typically Linf) distortion
:param confidence: Confidence of adversarial examples: higher produces
examples with larger l2 distortion, but more
strongly classified as adversarial.
:param batch_size: Number of attacks to run simultaneously.
:param learning_rate: The learning rate for the attack algorithm.
Smaller values produce better results but are
slower to converge.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the perturbation
and confidence of the classification.
:param max_iterations: The maximum number of iterations. Setting this
to a larger value will produce lower distortion
results. Using only a few iterations requires
a larger learning rate, and will produce larger
distortion results.
:param abort_early: If true, allows early abort when the total
loss starts to increase (greatly speeds up attack,
but hurts performance, particularly on ImageNet)
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and confidence of classification.
If binary_search_steps is large, the initial
constant is not important. A smaller value of
this constant gives lower distortion results.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
import tensorflow as tf
self.parse_params(**kwargs)
from .attacks_tf import ElasticNetMethod as EAD
labels, nb_classes = self.get_or_guess_labels(x, kwargs)
attack = EAD(self.sess, self.model, self.beta,
self.batch_size, self.confidence,
'y_target' in kwargs, self.learning_rate,
self.binary_search_steps, self.max_iterations,
self.abort_early, self.initial_const,
self.clip_min, self.clip_max,
nb_classes, x.get_shape().as_list()[1:])
def ead_wrap(x_val, y_val):
return np.array(attack.attack(x_val, y_val), dtype=np.float32)
wrap = tf.py_func(ead_wrap, [x, labels], tf.float32)
return wrap
def parse_params(self, y=None, y_target=None,
nb_classes=None, beta=1e-3,
batch_size=9, confidence=0,
learning_rate=1e-2,
binary_search_steps=9, max_iterations=1000,
abort_early=False, initial_const=1e-3,
clip_min=0, clip_max=1):
# ignore the y and y_target argument
if nb_classes is not None:
warnings.warn("The nb_classes argument is depricated and will "
"be removed on 2018-02-11")
self.beta = beta
self.batch_size = batch_size
self.confidence = confidence
self.learning_rate = learning_rate
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.abort_early = abort_early
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max
class DeepFool(Attack):
"""
DeepFool is an untargeted & iterative attack which is based on an
iterative linearization of the classifier. The implementation here
is w.r.t. the L2 norm.
Paper link: "https://arxiv.org/pdf/1511.04599.pdf"
"""
def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10):
"""
Create a DeepFool instance.
"""
super(DeepFool, self).__init__(model, back, sess, model_type, num_classes)
if self.back == 'th':
raise NotImplementedError('Theano version not implemented.')
self.structural_kwargs = ['over_shoot', 'max_iter', 'clip_max',
'clip_min', 'nb_candidate']
if not isinstance(self.model, Model):
self.model = CallableModelWrapper(self.model, 'logits')
def generate(self, x, phase, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for deepfool
:param nb_classes: The number of model output classes
:param clip_min: Minimum component value for clipping
:param clip_max: Maximum component value for clipping
"""
import tensorflow as tf
from .attacks_tf import jacobian_graph, deepfool_batch
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
# Define graph wrt to this input placeholder
logits = self.model.get_logits(x)
self.nb_classes = logits.get_shape().as_list()[-1]
assert self.nb_candidate <= self.nb_classes,\
'nb_candidate should not be greater than nb_classes'
preds = tf.reshape(tf.nn.top_k(logits, k=self.nb_candidate)[0],
[-1, self.nb_candidate])
# grads will be the shape [batch_size, nb_candidate, image_size]
grads = tf.stack(jacobian_graph(preds, x, self.nb_candidate), axis=1)
# Define graph
def deepfool_wrap(x_val):
return deepfool_batch(self.sess, x, preds, logits, grads, x_val,
self.nb_candidate, self.overshoot,
self.max_iter, self.clip_min, self.clip_max,
self.nb_classes)
return tf.py_func(deepfool_wrap, [x], tf.float32)
def parse_params(self, nb_candidate=10, overshoot=0.02, max_iter=50,
nb_classes=None, clip_min=0., clip_max=1., **kwargs):
"""
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for deepfool
:param nb_classes: The number of model output classes
:param clip_min: Minimum component value for clipping
:param clip_max: Maximum component value for clipping
"""
if nb_classes is not None:
warnings.warn("The nb_classes argument is depricated and will "
"be removed on 2018-02-11")
self.nb_candidate = nb_candidate
self.overshoot = overshoot
self.max_iter = max_iter
self.clip_min = clip_min
self.clip_max = clip_max
return True
def fgsm(x, predictions, eps, back='tf', clip_min=None, clip_max=None):
"""
A wrapper for the Fast Gradient Sign Method.
It calls the right function, depending on the
user's backend.
:param x: the input
:param predictions: the model's output
(Note: in the original paper that introduced this
attack, the loss was computed by comparing the
model predictions with the hard labels (from the
dataset). Instead, this version implements the loss
by comparing the model predictions with the most
likely class. This tweak is recommended since the
discovery of label leaking in the following paper:
https://arxiv.org/abs/1611.01236)
:param eps: the epsilon (input variation parameter)
:param back: switch between TensorFlow ('tf') and
Theano ('th') implementation
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:return: a tensor for the adversarial example
"""
warnings.warn("attacks.fgsm is deprecated and will be removed on "
"2017-09-27. Instantiate an object from FastGradientMethod.")
if back == 'tf':
# Compute FGSM using TensorFlow
from .attacks_tf import fgm
return fgm(x, predictions, y=None, eps=eps, ord=np.inf,
clip_min=clip_min, clip_max=clip_max)
elif back == 'th':
# Compute FGSM using Theano
from .attacks_th import fgm
return fgm(x, predictions, eps, clip_min=clip_min, clip_max=clip_max)
def vatm(model, x, logits, eps, back='tf', num_iterations=1, xi=1e-6,
clip_min=None, clip_max=None):
"""
A wrapper for the perturbation methods used for virtual adversarial
training : https://arxiv.org/abs/1507.00677
It calls the right function, depending on the
user's backend.
:param model: the model which returns the network unnormalized logits
:param x: the input placeholder
:param logits: the model's unnormalized output tensor
:param eps: the epsilon (input variation parameter)
:param num_iterations: the number of iterations
:param xi: the finite difference parameter
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:return: a tensor for the adversarial example
"""
if back == 'tf':
# Compute VATM using TensorFlow
from .attacks_tf import vatm as vatm_tf
return vatm_tf(model, x, logits, eps, num_iterations=num_iterations,
xi=xi, clip_min=clip_min, clip_max=clip_max)
elif back == 'th':
# Compute VATM using Theano
from .attacks_th import vatm as vatm_th
return vatm_th(model, x, logits, eps, num_iterations=num_iterations,
xi=xi, clip_min=clip_min, clip_max=clip_max)
def jsma(sess, x, predictions, grads, sample, target, theta, gamma=np.inf,
increase=True, back='tf', clip_min=None, clip_max=None):
"""
A wrapper for the Jacobian-based saliency map approach.
It calls the right function, depending on the
user's backend.
:param sess: TF session
:param x: the input
:param predictions: the model's symbolic output (linear output,
pre-softmax)
:param sample: (1 x 1 x img_rows x img_cols) numpy array with sample input
:param target: target class for input sample
:param theta: delta for each feature adjustment
:param gamma: a float between 0 - 1 indicating the maximum distortion
percentage
:param increase: boolean; true if we are increasing pixels, false otherwise
:param back: switch between TensorFlow ('tf') and
Theano ('th') implementation
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:return: an adversarial sample
"""
warnings.warn("attacks.jsma is deprecated and will be removed on "
"2017-09-27. Instantiate an object from SaliencyMapMethod.")
if back == 'tf':
# Compute Jacobian-based saliency map attack using TensorFlow
from .attacks_tf import jsma
return jsma(sess, x, predictions, grads, sample, target, theta, gamma,
clip_min, clip_max)
elif back == 'th':
raise NotImplementedError("Theano jsma not implemented.")
class MadryEtAl(Attack):
"""
The Projected Gradient Descent Attack (Madry et al. 2016).
Paper link: https://arxiv.org/pdf/1706.06083.pdf
"""
def __init__(self, model, back='tf', sess=None, model_type='default', num_classes=10,
attack_type='vanilla'):
"""
Create a MadryEtAl instance.
"""
super(MadryEtAl, self).__init__(model, back, sess, model_type, num_classes)
self.feedable_kwargs = {'eps': np.float32,
'eps_iter': np.float32,
'y': np.float32,
'y_target': np.float32,
'clip_min': np.float32,
'clip_max': np.float32}
self.attack_type = attack_type
self.structural_kwargs = ['ord', 'nb_iter']
if not isinstance(self.model, Model):
self.model = CallableModelWrapper(self.model, 'probs')
def generate(self, x, phase, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param eps: (required float) maximum distortion of adversarial example
compared to original input
:param eps_iter: (required float) step size for each attack iteration
:param nb_iter: (required int) Number of attack iterations.
:param y: (optional) A tensor with the model labels.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
labels, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted = self.y_target is not None
# Initialize loop variables
adv_x = self.attack(x)
return adv_x
def parse_params(self, eps=0.3, eps_iter=0.01, nb_iter=40, y=None,
ord=np.inf, clip_min=None, clip_max=None,
y_target=None, **kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (required float) maximum distortion of adversarial example
compared to original input
:param eps_iter: (required float) step size for each attack iteration
:param nb_iter: (required int) Number of attack iterations.
:param y: (optional) A tensor with the model labels.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Save attack-specific parameters
self.eps = eps
self.eps_iter = eps_iter
self.nb_iter = nb_iter
self.y = y
self.y_target = y_target
self.ord = ord
self.clip_min = clip_min
self.clip_max = clip_max
if self.y is not None and self.y_target is not None:
raise ValueError("Must not set both y and y_target")
# Check if order of the norm is acceptable given current implementation
if self.ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
if self.back == 'th':
error_string = ("ProjectedGradientDescentMethod is"
" not implemented in Theano")
raise NotImplementedError(error_string)
return True
def attack_single_step(self, x, eta, y):
"""
Given the original image and the perturbation computed so far, computes
a new perturbation.
:param x: A tensor with the original input.
:param eta: A tensor the same shape as x that holds the perturbation.
:param y: A tensor with the target labels or ground-truth labels.
"""
import tensorflow as tf
from modified_cleverhans.utils_tf import model_loss, clip_eta
adv_x = x + eta
if self.attack_type != "robust":
if self.model_type == 'ensembleThree': ## for EMPIR extra if condition for covering the multiple combined model case
preds = self.model.get_combinedAvgCorrectProbs(adv_x, reuse=True)
else:
preds = self.model.get_probs(adv_x, reuse=True)
loss = model_loss(y, preds)
else:
# modification from zimmerrol to make sure their loss was correctly implemented
preds = self.model.get_layer(adv_x, True, 'combined_logits')
preds = tf.reduce_sum(preds, 1)
loss = model_loss(y, preds)
if self.targeted:
loss = -loss
grad, = tf.gradients(loss, adv_x)
scaled_signed_grad = self.eps_iter * tf.sign(grad)
adv_x = adv_x + scaled_signed_grad
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
eta = adv_x - x
eta = clip_eta(eta, self.ord, self.eps)
return x, eta
def attack(self, x, **kwargs):
"""
This method creates a symbolic graph that given an input image,
first randomly perturbs the image. The
perturbation is bounded to an epsilon ball. Then multiple steps of
gradient descent is performed to increase the probability of a target
label or decrease the probability of the ground-truth label.
:param x: A tensor with the input image.
"""
import tensorflow as tf
from modified_cleverhans.utils_tf import clip_eta
eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps)
eta = clip_eta(eta, self.ord, self.eps)
if self.y is not None:
y = self.y
else:
if self.model_type == 'ensembleThree': ## for EMPIR extra if condition for covering the ensemble model case
preds = self.model.get_combinedAvgCorrectProbs(x)
# default below
else:
preds = self.model.get_probs(x)
preds_max = tf.reduce_max(preds, 1, keep_dims=True)
y = tf.to_float(tf.equal(preds, preds_max))
y = y / tf.reduce_sum(y, 1, keep_dims=True)
y = tf.stop_gradient(y)
for i in range(self.nb_iter):
x, eta = self.attack_single_step(x, eta, y)
adv_x = x + eta
if self.clip_min is not None and self.clip_max is not None:
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from distutils.version import LooseVersion
import math
import numpy as np
import os
from six.moves import xrange
import tensorflow as tf
import time
import warnings
import logging
from .utils import batch_indices, _ArgsWrapper, create_logger, set_log_level
FLAGS = tf.app.flags.FLAGS
_logger = create_logger("cleverhans.utils.tf")
class _FlagsWrapper(_ArgsWrapper):
"""
Wrapper that tries to find missing parameters in TensorFlow FLAGS
for backwards compatibility.
Plain _ArgsWrapper should be used instead if the support for FLAGS
is removed.
"""
def __getattr__(self, name):
val = self.args.get(name)
if val is None:
warnings.warn('Setting parameters ({}) from TensorFlow FLAGS is '
'deprecated.'.format(name))
val = FLAGS.__getattr__(name)
return val
def model_loss(y, model, mean=True):
"""
Define loss of TF graph
:param y: correct labels
:param model: output of the model
:param mean: boolean indicating whether should return mean of loss
or vector of losses for each input of the batch
:return: return mean of loss if True, otherwise return vector with per
sample loss
"""
op = model.op
if "softmax" in str(op).lower():
logits, = op.inputs
else:
logits = model
out = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)
if mean:
out = tf.reduce_mean(out)
return out
def initialize_uninitialized_global_variables(sess):
"""
Only initializes the variables of a TensorFlow session that were not
already initialized.
:param sess: the TensorFlow session
:return:
"""
# List all global variables
global_vars = tf.global_variables()
# Find initialized status for all variables
is_var_init = [tf.is_variable_initialized(var) for var in global_vars]
is_initialized = sess.run(is_var_init)
# List all variables that were not initialized previously
not_initialized_vars = [var for (var, init) in
zip(global_vars, is_initialized) if not init]
# Initialize all uninitialized variables found, if any
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
def sign_changes_count_op(prv_k, k):
print(k)
print(prv_k)
return tf.cast(np.product(k.shape[:]), tf.int32) - tf.reduce_sum(
tf.cast(tf.equal(prv_k, k), tf.int32))
def normalized_sign_changes_op(prv_k, k):
return 1.0 - tf.reduce_sum(tf.cast(tf.equal(
prv_k, k), tf.float32)) / tf.cast(np.product(k.shape[:]), tf.float32)
def create_kernel_placeholder(model, i):
return tf.placeholder(
tf.float32, [model.layers[i].kernels.shape[0], model.layers[i].kernels.shape[1],
model.layers[i].kernels.shape[2], model.layers[i].kernels.shape[3]])
def model_train(sess, x, y, predictions, X_train, Y_train, model=None, phase=None,
writer=None, save=False, predictions_adv=None, init_all=False,
evaluate=None, verbose=True, feed=None, args=None, rng=None):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param verbose: (boolean) all print statements disabled when set to False.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'log_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:return: True if model trained
"""
args = _FlagsWrapper(args or {})
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
assert args.learning_rate, "Learning rate was not given in args dict"
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.log_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if not verbose:
set_log_level(logging.WARNING)
warnings.warn("verbose argument is deprecated and will be removed"
" on 2018-02-11. Instead, use utils.set_log_level()."
" For backward compatibility, log_level was set to"
" logging.WARNING (30).")
if rng is None:
rng = np.random.RandomState()
# Define loss
loss = model_loss(y, predictions)
if predictions_adv is not None:
loss = (loss + model_loss(y, predictions_adv)) / 2
with tf.variable_scope(args.train_scope, reuse=args.reuse_global_step):
global_step = tf.get_variable(
"global_step", dtype=tf.int32, initializer=tf.constant(0), trainable=False)
train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
train_step = train_step.minimize(loss)
if writer is not None:
assert args.loss_name, "Name of scalar summary loss"
training_summary = tf.summary.scalar(args.loss_name, loss)
merge_op = tf.summary.merge_all()
with sess.as_default():
if hasattr(tf, "global_variables_initializer"):
if init_all:
tf.global_variables_initializer().run()
else:
initialize_uninitialized_global_variables(sess)
else:
warnings.warn("Update your copy of tensorflow; future versions of "
"CleverHans may drop support for this version.")
sess.run(tf.initialize_all_variables())
init_step = sess.run(global_step)
for epoch in xrange(args.nb_epochs):
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
step = init_step + (epoch * nb_batches + batch)
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), args.batch_size)
# Perform one training step
feed_dict = {x: X_train[index_shuf[start:end]],
y: Y_train[index_shuf[start:end]],
phase: args.is_training}
if feed is not None:
feed_dict.update(feed)
sess.run(train_step, feed_dict=feed_dict)
if batch % 100 == 0:
if writer is not None:
loss_val, merged_summ = sess.run(
[loss, merge_op], feed_dict=feed_dict)
writer.add_summary(merged_summ, step)
writer.flush()
else:
loss_val = sess.run(loss, feed_dict=feed_dict)
#print('epoch %d, batch %d, step %d, loss %.4f' %
# (epoch, batch, step, loss_val))
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
if verbose:
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
evaluate()
#global_step = step
if save:
save_path = os.path.join(args.log_dir, args.filename)
#save_path = args.log_dir
saver = tf.train.Saver()
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
saver.save(sess, save_path, global_step=step)
_logger.info("Completed model training and saved at: " +
str(save_path))
else:
_logger.info("Completed model training.")
return True
# Variation of model_train for the teacher model in distillation
def model_train_teacher(sess, x, y, predictions, logits, temperature, X_train, Y_train, model=None, phase=None,
writer=None, save=False, predictions_adv=None, init_all=False,
evaluate=None, verbose=True, feed=None, args=None, rng=None):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param verbose: (boolean) all print statements disabled when set to False.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'log_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:return: True if model trained
"""
args = _FlagsWrapper(args or {})
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
assert args.learning_rate, "Learning rate was not given in args dict"
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.log_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if not verbose:
set_log_level(logging.WARNING)
warnings.warn("verbose argument is deprecated and will be removed"
" on 2018-02-11. Instead, use utils.set_log_level()."
" For backward compatibility, log_level was set to"
" logging.WARNING (30).")
if rng is None:
rng = np.random.RandomState()
# Define loss
# loss = model_loss(y, predictions)
loss = model_loss_temp(y, predictions, temperature)
if predictions_adv is not None:
loss = (loss + model_loss(y, predictions_adv)) / 2
with tf.variable_scope(args.train_scope, reuse=args.reuse_global_step):
teacher_global_step = tf.get_variable(
"teacher_global_step", dtype=tf.int32, initializer=tf.constant(0), trainable=False)
train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
train_step = train_step.minimize(loss)
scaled_preds = tf.nn.softmax(logits / temperature)
scaled_preds_train = np.zeros([len(X_train), np.size(Y_train, 1)])
if writer is not None:
assert args.loss_name, "Name of scalar summary loss"
training_summary = tf.summary.scalar(args.loss_name, loss)
merge_op = tf.summary.merge_all()
with sess.as_default():
if hasattr(tf, "global_variables_initializer"):
if init_all:
tf.global_variables_initializer().run()
else:
initialize_uninitialized_global_variables(sess)
else:
warnings.warn("Update your copy of tensorflow; future versions of "
"CleverHans may drop support for this version.")
sess.run(tf.initialize_all_variables())
init_step = sess.run(teacher_global_step)
for epoch in xrange(args.nb_epochs):
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
step = init_step + (epoch * nb_batches + batch)
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), args.batch_size)
# Perform one training step
feed_dict = {x: X_train[index_shuf[start:end]],
y: Y_train[index_shuf[start:end]],
phase: args.is_training}
if feed is not None:
feed_dict.update(feed)
sess.run(train_step, feed_dict=feed_dict)
if epoch == args.nb_epochs - 1:
scaled_predicted = sess.run(scaled_preds, feed_dict=feed_dict)
scaled_preds_train[start:end] = scaled_predicted
if batch % 100 == 0:
if writer is not None:
loss_val, merged_summ = sess.run(
[loss, merge_op], feed_dict=feed_dict)
writer.add_summary(merged_summ, step)
writer.flush()
else:
loss_val = sess.run(loss, feed_dict=feed_dict)
#print('epoch %d, batch %d, step %d, loss %.4f' %
# (epoch, batch, step, loss_val))
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
if verbose:
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
evaluate()
#teacher_global_step = step
if save:
save_path = os.path.join(args.log_dir, args.filename)
#save_path = args.log_dir
saver = tf.train.Saver()
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
saver.save(sess, save_path, global_step=step)
_logger.info("Completed model training and saved at: " +
str(save_path))
else:
_logger.info("Completed model training.")
return scaled_preds_train
# Modified version of model_train for model loss calculation with a different temperature
def model_train_student(sess, x, y, predictions, temperature, X_train, Y_train, y_teacher=None,
teacher_preds=None, alpha=0.5, beta=0.5, model=None, phase=None, writer=None, save=False,
predictions_adv=None, init_all=False, evaluate=None, verbose=True, feed=None, args=None, rng=None):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param verbose: (boolean) all print statements disabled when set to False.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'log_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:return: True if model trained
"""
args = _FlagsWrapper(args or {})
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
assert args.learning_rate, "Learning rate was not given in args dict"
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.log_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if not verbose:
set_log_level(logging.WARNING)
warnings.warn("verbose argument is deprecated and will be removed"
" on 2018-02-11. Instead, use utils.set_log_level()."
" For backward compatibility, log_level was set to"
" logging.WARNING (30).")
if rng is None:
rng = np.random.RandomState()
# Define loss
# Incorporating both hard and soft labels for training
if y_teacher is not None:
loss = alpha*model_loss(y, predictions) + beta*model_loss_temp(y_teacher, predictions, temperature)
else:
loss = model_loss(y, predictions)
with tf.variable_scope(args.train_scope, reuse=args.reuse_global_step):
global_step = tf.get_variable(
"global_step", dtype=tf.int32, initializer=tf.constant(0), trainable=False)
train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
train_step = train_step.minimize(loss)
if writer is not None:
assert args.loss_name, "Name of scalar summary loss"
training_summary = tf.summary.scalar(args.loss_name, loss)
merge_op = tf.summary.merge_all()
with sess.as_default():
if hasattr(tf, "global_variables_initializer"):
if init_all:
tf.global_variables_initializer().run()
else:
initialize_uninitialized_global_variables(sess)
else:
warnings.warn("Update your copy of tensorflow; future versions of "
"CleverHans may drop support for this version.")
sess.run(tf.initialize_all_variables())
init_step = sess.run(global_step)
for epoch in xrange(args.nb_epochs):
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
step = init_step + (epoch * nb_batches + batch)
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), args.batch_size)
# Perform one training step
feed_dict = {x: X_train[index_shuf[start:end]],
y: Y_train[index_shuf[start:end]],
y_teacher: teacher_preds[index_shuf[start:end]],
phase: args.is_training}
if feed is not None:
feed_dict.update(feed)
sess.run(train_step, feed_dict=feed_dict)
if batch % 100 == 0:
if writer is not None:
loss_val, merged_summ = sess.run(
[loss, merge_op], feed_dict=feed_dict)
writer.add_summary(merged_summ, step)
writer.flush()
else:
loss_val = sess.run(loss, feed_dict=feed_dict)
#print('epoch %d, batch %d, step %d, loss %.4f' %
# (epoch, batch, step, loss_val))
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
if verbose:
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
evaluate()
#global_step = step
if save:
save_path = os.path.join(args.log_dir, args.filename)
#save_path = args.log_dir
saver = tf.train.Saver()
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
saver.save(sess, save_path, global_step=step)
_logger.info("Completed model training and saved at: " +
str(save_path))
else:
_logger.info("Completed model training.")
return True
def model_train_inpgrad_reg(sess, x, y, predictions, X_train, Y_train, model=None, phase=None,
writer=None, save=False, predictions_adv=None, init_all=False,
evaluate=None, l2dbl = 0, l2cs = 0, verbose=True, feed=None, args=None, rng=None):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param verbose: (boolean) all print statements disabled when set to False.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'log_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:return: True if model trained
"""
args = _FlagsWrapper(args or {})
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
assert args.learning_rate, "Learning rate was not given in args dict"
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.log_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if not verbose:
set_log_level(logging.WARNING)
warnings.warn("verbose argument is deprecated and will be removed"
" on 2018-02-11. Instead, use utils.set_log_level()."
" For backward compatibility, log_level was set to"
" logging.WARNING (30).")
if rng is None:
rng = np.random.RandomState()
# Define loss
loss = model_loss(y, predictions) + model_loss_inpgrad_reg(x, y, predictions, l2dbl, l2cs)
if predictions_adv is not None:
loss = (loss + model_loss(x, y, predictions_adv)) / 2
with tf.variable_scope(args.train_scope, reuse=args.reuse_global_step):
global_step = tf.get_variable(
"global_step", dtype=tf.int32, initializer=tf.constant(0), trainable=False)
train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
train_step = train_step.minimize(loss)
if writer is not None:
assert args.loss_name, "Name of scalar summary loss"
training_summary = tf.summary.scalar(args.loss_name, loss)
merge_op = tf.summary.merge_all()
with sess.as_default():
if hasattr(tf, "global_variables_initializer"):
if init_all:
tf.global_variables_initializer().run()
else:
initialize_uninitialized_global_variables(sess)
else:
warnings.warn("Update your copy of tensorflow; future versions of "
"CleverHans may drop support for this version.")
sess.run(tf.initialize_all_variables())
init_step = sess.run(global_step)
for epoch in xrange(args.nb_epochs):
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
step = init_step + (epoch * nb_batches + batch)
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), args.batch_size)
# Perform one training step
feed_dict = {x: X_train[index_shuf[start:end]],
y: Y_train[index_shuf[start:end]],
phase: args.is_training}
if feed is not None:
feed_dict.update(feed)
sess.run(train_step, feed_dict=feed_dict)
if batch % 100 == 0:
if writer is not None:
loss_val, merged_summ = sess.run(
[loss, merge_op], feed_dict=feed_dict)
writer.add_summary(merged_summ, step)
writer.flush()
else:
loss_val = sess.run(loss, feed_dict=feed_dict)
#print('epoch %d, batch %d, step %d, loss %.4f' %
# (epoch, batch, step, loss_val))
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
if verbose:
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
evaluate()
#global_step = step
if save:
save_path = os.path.join(args.log_dir, args.filename)
#save_path = args.log_dir
saver = tf.train.Saver()
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
saver.save(sess, save_path, global_step=step)
_logger.info("Completed model training and saved at: " +
str(save_path))
else:
_logger.info("Completed model training.")
return True
# Imagenet training
def model_train_imagenet(sess, x, y, predictions, train_iterator, X_train, Y_train, model=None, phase=None,
writer=None, save=False, predictions_adv=None, init_all=False,
evaluate=None, verbose=True, feed=None, args=None, rng=None):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param verbose: (boolean) all print statements disabled when set to False.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'log_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:return: True if model trained
"""
args = _FlagsWrapper(args or {})
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
assert args.learning_rate, "Learning rate was not given in args dict"
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.log_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if not verbose:
set_log_level(logging.WARNING)
warnings.warn("verbose argument is deprecated and will be removed"
" on 2018-02-11. Instead, use utils.set_log_level()."
" For backward compatibility, log_level was set to"
" logging.WARNING (30).")
if rng is None:
rng = np.random.RandomState()
# Define loss
loss = model_loss(y, predictions)
if predictions_adv is not None:
loss = (loss + model_loss(y, predictions_adv)) / 2
with tf.variable_scope(args.train_scope, reuse=args.reuse_global_step):
global_step = tf.get_variable(
"global_step", dtype=tf.int32, initializer=tf.constant(0), trainable=False)
learning_rate_tensor = tf.placeholder(tf.float32, shape=[])
if args.lowprecision:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_step = tf.train.AdamOptimizer(learning_rate=learning_rate_tensor, epsilon=1e-5) #Copied epsilon from dorefanet
train_step = train_step.minimize(loss)
# Find the batch norm variables
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
batch_vars = [var for var in all_vars if ('batchNorm' in var.name) and (('moving_mean' in var.name) or ('moving_variance' in var.name))] # Gives the moving mean and moving vaiance variables which are not part of trainable variables but have to be restored
save_vars = tf.trainable_variables() + batch_vars# Declare save variable to have both batch norm and other trainable variables
else:
train_step = tf.train.AdamOptimizer(learning_rate=learning_rate_tensor, epsilon=1e-5) #Copied epsilon from dorefanet
train_step = train_step.minimize(loss)
if writer is not None:
assert args.loss_name, "Name of scalar summary loss"
training_summary = tf.summary.scalar(args.loss_name, loss)
merge_op = tf.summary.merge_all()
with sess.as_default():
if hasattr(tf, "global_variables_initializer"):
if init_all:
tf.global_variables_initializer().run()
else:
initialize_uninitialized_global_variables(sess)
else:
warnings.warn("Update your copy of tensorflow; future versions of "
"CleverHans may drop support for this version.")
sess.run(tf.initialize_all_variables())
init_step = sess.run(global_step)
step = init_step
for epoch in xrange(args.nb_epochs):
prev = time.time()
# Initialize the iterator
sess.run(train_iterator.initializer)
if args.lowprecision:
if (epoch == 60):
args.learning_rate = 4e-5
if (epoch == 75):
args.learning_rate = 8e-6
else: # for FP models decreasing lr after different number of epochs based on tensorpack model
if (epoch == 30) or (epoch == 60) or (epoch == 80):
args.learning_rate = args.learning_rate/10
# Try feeding new values till end
num_batches = 0
try:
while True:
# Perform one training epoch
X_array, Y_array = sess.run([X_train, Y_train]) # Get np arrays of X and Y
feed_dict = {x: X_array,
y: Y_array,
phase: args.is_training,
learning_rate_tensor: args.learning_rate}
sess.run(train_step, feed_dict=feed_dict)
loss_val = sess.run(loss, feed_dict=feed_dict)
num_batches = num_batches+1
except tf.errors.OutOfRangeError:
pass
print('epoch %d, loss %.4f' %
(epoch, loss_val))
cur = time.time()
if verbose:
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
evaluate()
step = step + num_batches # Training steps in batches
# save every 10 epochs
if save and (epoch % 10 == 0):
save_path = os.path.join(args.log_dir, args.filename)
#save_path = args.log_dir
if args.lowprecision: # The batch norm variables also need to be saved
saver = tf.train.Saver(save_vars)
else:
saver = tf.train.Saver()
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
saver.save(sess, save_path, global_step=step)
_logger.info("Completed model training and saved at: " +
str(save_path))
# Save at the end as well
if save:
save_path = os.path.join(args.log_dir, args.filename)
#save_path = args.log_dir
if args.lowprecision: # The batch norm variables also need to be saved
saver = tf.train.Saver(save_vars)
else:
saver = tf.train.Saver()
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
saver.save(sess, save_path, global_step=step)
_logger.info("Completed model training and saved at: " +
str(save_path))
else:
_logger.info("Completed model training.")
return True
def model_eval(sess, x, y, predictions=None, X_test=None, Y_test=None, phase=None, writer=None,
feed=None, args=None, model=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:param model: (deprecated) if not None, holds model output predictions
:return: a float with the accuracy value
"""
args = _FlagsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
if model is None and predictions is None:
raise ValueError("One of model argument "
"or predictions argument must be supplied.")
if model is not None:
warnings.warn("model argument is deprecated. "
"Switch to predictions argument. "
"model argument will be removed after 2018-01-05.")
if predictions is None:
predictions = model
else:
raise ValueError("Exactly one of model argument"
" and predictions argument should be specified.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
tf.argmax(predictions,
axis=tf.rank(predictions) - 1))
acc_value = tf.reduce_mean(tf.to_float(correct_preds))
# Init result var
accuracy = 0.0
if writer is not None:
eval_summary = tf.summary.scalar('acc', acc_value)
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
cur_batch_size = end - start
# The last batch may be smaller than all others, so we need to
# account for variable batch size here
feed_dict = {x: X_test[start:end],
y: Y_test[start:end],
phase: False}
if feed is not None:
feed_dict.update(feed)
if writer is not None:
cur_acc, eval_summ = sess.run(
[acc_value, eval_summary], feed_dict=feed_dict)
writer.add_summary(eval_summ, batch)
writer.flush()
else:
cur_acc = acc_value.eval(feed_dict=feed_dict)
accuracy += (cur_batch_size * cur_acc)
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
def model_eval_ensemble(sess, x, y, predictions=None, X_test=None, Y_test=None, phase=None, writer=None,
feed=None, args=None, model=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:param model: (deprecated) if not None, holds model output predictions
:return: a float with the accuracy value
"""
args = _FlagsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
if model is None and (predictions is None):
raise ValueError("One of model argument "
"or both predictions argument must be supplied.")
if model is not None:
warnings.warn("model argument is deprecated. "
"Switch to predictions argument. "
"model argument will be removed after 2018-01-05.")
if predictions is None:
predictions = model
else:
raise ValueError("Exactly one of model argument"
" and predictions argument should be specified.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
predictions)
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
predictions)
acc_value = tf.reduce_mean(tf.to_float(correct_preds))
# Init result var
accuracy = 0.0
if writer is not None:
eval_summary = tf.summary.scalar('acc', acc_value)
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
cur_batch_size = end - start
# The last batch may be smaller than all others, so we need to
# account for variable batch size here
feed_dict = {x: X_test[start:end],
y: Y_test[start:end],
phase: False}
if feed is not None:
feed_dict.update(feed)
if writer is not None:
cur_acc, eval_summ = sess.run(
[acc_value, eval_summary], feed_dict=feed_dict)
writer.add_summary(eval_summ, batch)
writer.flush()
else:
cur_acc = acc_value.eval(feed_dict=feed_dict)
accuracy += (cur_batch_size * cur_acc)
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
def model_eval_imagenet(sess, x, y, predictions=None, test_iterator=None, X_test=None, Y_test=None, phase=None, writer=None,
feed=None, args=None, model=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:param model: (deprecated) if not None, holds model output predictions
:return: a float with the accuracy value
"""
args = _FlagsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
if model is None and predictions is None:
raise ValueError("One of model argument "
"or predictions argument must be supplied.")
if model is not None:
warnings.warn("model argument is deprecated. "
"Switch to predictions argument. "
"model argument will be removed after 2018-01-05.")
if predictions is None:
predictions = model
else:
raise ValueError("Exactly one of model argument"
" and predictions argument should be specified.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
tf.argmax(predictions,
axis=tf.rank(predictions) - 1))
acc_value = tf.reduce_mean(tf.to_float(correct_preds))
# Init result var
accuracy = 0.0
if writer is not None:
eval_summary = tf.summary.scalar('acc', acc_value)
with sess.as_default():
# Initialize the iterator
sess.run(test_iterator.initializer)
# Try feeding new values till end
num_batches = 0
try:
while True:
X_array, Y_array = sess.run([X_test, Y_test])
feed_dict = {x: X_array,
y: Y_array,
phase: False}
if feed is not None:
feed_dict.update(feed)
num_batches = num_batches + 1
if writer is not None:
cur_acc, eval_summ = sess.run(
[acc_value, eval_summary], feed_dict=feed_dict)
writer.add_summary(eval_summ, batch)
writer.flush()
else:
cur_acc = acc_value.eval(feed_dict=feed_dict)
accuracy += cur_acc
except tf.errors.OutOfRangeError:
pass
# Divide by number of examples to get final value
accuracy = accuracy/num_batches
return accuracy
def model_eval_adv_imagenet(sess, x, y, predictions=None, test_iterator=None, X_test=None, Y_test=None,
phase=None, writer=None, feed=None, attacker=None, args=None, model=None, attack_params=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param feed_adv: An optional dictionary for generating adversarial attacks that is appended to the feeding
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:param model: (deprecated) if not None, holds model output predictions
:return: a float with the accuracy value
"""
args = _FlagsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
if model is None and predictions is None:
raise ValueError("One of model argument "
"or predictions argument must be supplied.")
if model is not None:
warnings.warn("model argument is deprecated. "
"Switch to predictions argument. "
"model argument will be removed after 2018-01-05.")
if predictions is None:
predictions = model
else:
raise ValueError("Exactly one of model argument"
" and predictions argument should be specified.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
tf.argmax(predictions,
axis=tf.rank(predictions) - 1))
acc_value = tf.reduce_mean(tf.to_float(correct_preds))
# Init result var
accuracy = 0.0
if writer is not None:
eval_summary = tf.summary.scalar('acc', acc_value)
with sess.as_default():
# Initialize the iterator
sess.run(test_iterator.initializer)
num_batches = 0
try:
while True:
X_array, Y_array = sess.run([X_test, Y_test])
X_shape = X_array.shape
if X_array.shape[0] < args.batch_size: # Last batch discarded to avoid error with CW attack
break
# Generate the adversarial examples
X_adv_array = attacker.generate_np(X_array, phase, **attack_params)
feed_dict = {x: X_adv_array,
y: Y_array,
phase: False}
if feed is not None:
feed_dict.update(feed)
num_batches = num_batches + 1
if writer is not None:
cur_acc, eval_summ = sess.run(
[acc_value, eval_summary], feed_dict=feed_dict)
writer.add_summary(eval_summ, batch)
writer.flush()
else:
cur_acc = acc_value.eval(feed_dict=feed_dict)
accuracy += cur_acc
except tf.errors.OutOfRangeError:
pass
# Divide by number of examples to get final value
accuracy = accuracy/num_batches # accuracy was already reduce mean across that batch so we have to divide number of batches
return accuracy
def model_eval_ensemble_imagenet(sess, x, y, predictions=None, test_iterator=None, X_test=None, Y_test=None, phase=None, writer=None,
feed=None, args=None, model=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:param model: (deprecated) if not None, holds model output predictions
:return: a float with the accuracy value
"""
args = _FlagsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
if model is None and (predictions is None):
raise ValueError("One of model argument "
"or both predictions argument must be supplied.")
if model is not None:
warnings.warn("model argument is deprecated. "
"Switch to predictions argument. "
"model argument will be removed after 2018-01-05.")
if predictions is None:
predictions = model
else:
raise ValueError("Exactly one of model argument"
" and predictions argument should be specified.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
predictions)
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
predictions)
acc_value = tf.reduce_mean(tf.to_float(correct_preds))
# Init result var
accuracy = 0.0
if writer is not None:
eval_summary = tf.summary.scalar('acc', acc_value)
with sess.as_default():
# Initialize the iterator
sess.run(test_iterator.initializer)
# Try feeding new values till end
num_batches = 0
try:
while True:
X_array, Y_array = sess.run([X_test, Y_test])
feed_dict = {x: X_array,
y: Y_array,
phase: False}
if feed is not None:
feed_dict.update(feed)
num_batches = num_batches + 1
if writer is not None:
cur_acc, eval_summ = sess.run(
[acc_value, eval_summary], feed_dict=feed_dict)
writer.add_summary(eval_summ, batch)
writer.flush()
else:
cur_acc = acc_value.eval(feed_dict=feed_dict)
accuracy += cur_acc
except tf.errors.OutOfRangeError:
pass
# Divide by number of examples to get final value
accuracy = accuracy/num_batches
return accuracy
def model_eval_ensemble_adv_imagenet(sess, x, y, predictions=None, test_iterator=None, X_test=None, Y_test=None, phase=None, writer=None,
feed=None, attacker=None, args=None, model=None, attack_params=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:param model: (deprecated) if not None, holds model output predictions
:return: a float with the accuracy value
"""
args = _FlagsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
if model is None and (predictions is None):
raise ValueError("One of model argument "
"or both predictions argument must be supplied.")
if model is not None:
warnings.warn("model argument is deprecated. "
"Switch to predictions argument. "
"model argument will be removed after 2018-01-05.")
if predictions is None:
predictions = model
else:
raise ValueError("Exactly one of model argument"
" and predictions argument should be specified.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
predictions)
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
predictions)
acc_value = tf.reduce_mean(tf.to_float(correct_preds))
# Init result var
accuracy = 0.0
if writer is not None:
eval_summary = tf.summary.scalar('acc', acc_value)
with sess.as_default():
# Initialize the iterator
sess.run(test_iterator.initializer)
num_batches = 0
try:
while True:
X_array, Y_array = sess.run([X_test, Y_test])
X_shape = X_array.shape
if X_array.shape[0] < args.batch_size:
break
# Generate the adversarial examples
X_adv_array = attacker.generate_np(X_array, phase, **attack_params)
feed_dict = {x: X_adv_array,
y: Y_array,
phase: False}
if feed is not None:
feed_dict.update(feed)
num_batches = num_batches + 1
if writer is not None:
cur_acc, eval_summ = sess.run(
[acc_value, eval_summary], feed_dict=feed_dict)
writer.add_summary(eval_summ, batch)
writer.flush()
else:
cur_acc = acc_value.eval(feed_dict=feed_dict)
accuracy += cur_acc
except tf.errors.OutOfRangeError:
pass
# Divide by number of examples to get final value
accuracy = accuracy/num_batches
return accuracy
def tf_model_load(sess, file_path=None):
"""
:param sess: the session object to restore
:param file_path: path to the restored session, if None is
taken from FLAGS.log_dir and FLAGS.filename
:return:
"""
with sess.as_default():
saver = tf.train.Saver()
if file_path is None:
file_path = os.path.join(FLAGS.log_dir, FLAGS.filename)
saver.restore(sess, tf.train.latest_checkpoint(file_path))
return True
def batch_eval(sess, tf_inputs, tf_outputs, numpy_inputs, feed=None,
args=None):
"""
A helper function that computes a tensor on numpy inputs by batches.
:param sess:
:param tf_inputs:
:param tf_outputs:
:param numpy_inputs:
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
"""
args = _FlagsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
n = len(numpy_inputs)
assert n > 0
assert n == len(tf_inputs)
m = numpy_inputs[0].shape[0]
for i in xrange(1, n):
assert numpy_inputs[i].shape[0] == m
out = []
for _ in tf_outputs:
out.append([])
with sess.as_default():
for start in xrange(0, m, args.batch_size):
batch = start // args.batch_size
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Compute batch start and end indices
start = batch * args.batch_size
end = start + args.batch_size
numpy_input_batches = [numpy_input[start:end]
for numpy_input in numpy_inputs]
cur_batch_size = numpy_input_batches[0].shape[0]
assert cur_batch_size <= args.batch_size
for e in numpy_input_batches:
assert e.shape[0] == cur_batch_size
feed_dict = dict(zip(tf_inputs, numpy_input_batches))
if feed is not None:
feed_dict.update(feed)
numpy_output_batches = sess.run(tf_outputs, feed_dict=feed_dict)
for e in numpy_output_batches:
assert e.shape[0] == cur_batch_size, e.shape
for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
out_elem.append(numpy_output_batch)
out = [np.concatenate(x, axis=0) for x in out]
for e in out:
assert e.shape[0] == m, e.shape
return out
def model_argmax(sess, x, predictions, samples, feed=None):
"""
Helper function that computes the current class prediction
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output
:param samples: numpy array with input samples (dims must match x)
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:return: the argmax output of predictions, i.e. the current predicted class
"""
feed_dict = {x: samples}
if feed is not None:
feed_dict.update(feed)
probabilities = sess.run(predictions, feed_dict)
if samples.shape[0] == 1:
return np.argmax(probabilities)
else:
return np.argmax(probabilities, axis=1)
def l2_batch_normalize(x, epsilon=1e-12, scope=None):
"""
Helper function to normalize a batch of vectors.
:param x: the input placeholder
:param epsilon: stabilizes division
:return: the batch of l2 normalized vector
"""
with tf.name_scope(scope, "l2_batch_normalize") as scope:
x_shape = tf.shape(x)
x = tf.contrib.layers.flatten(x)
x /= (epsilon + tf.reduce_max(tf.abs(x), 1, keep_dims=True))
square_sum = tf.reduce_sum(tf.square(x), 1, keep_dims=True)
x_inv_norm = tf.rsqrt(np.sqrt(epsilon) + square_sum)
x_norm = tf.multiply(x, x_inv_norm)
return tf.reshape(x_norm, x_shape, scope)
def kl_with_logits(p_logits, q_logits, scope=None,
loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES):
"""Helper function to compute kl-divergence KL(p || q)
"""
with tf.name_scope(scope, "kl_divergence") as name:
p = tf.nn.softmax(p_logits)
p_log = tf.nn.log_softmax(p_logits)
q_log = tf.nn.log_softmax(q_logits)
loss = tf.reduce_mean(tf.reduce_sum(p * (p_log - q_log), axis=1),
name=name)
tf.losses.add_loss(loss, loss_collection)
return loss
def clip_eta(eta, ord, eps):
"""
Helper function to clip the perturbation to epsilon norm ball.
:param eta: A tensor with the current perturbation.
:param ord: Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param eps: Epilson, bound of the perturbation.
"""
# Clipping perturbation eta to self.ord norm ball
if ord not in [np.inf, 1, 2]:
raise ValueError('ord must be np.inf, 1, or 2.')
if ord == np.inf:
eta = tf.clip_by_value(eta, -eps, eps)
elif ord in [1, 2]:
reduc_ind = list(xrange(1, len(eta.get_shape())))
if ord == 1:
norm = tf.reduce_sum(tf.abs(eta),
reduction_indices=reduc_ind,
keep_dims=True)
elif ord == 2:
norm = tf.sqrt(tf.reduce_sum(tf.square(eta),
reduction_indices=reduc_ind,
keep_dims=True))
eta = eta * eps / norm
return eta
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions for keeping track of the version of CleverHans.
These functions provide a finer level of granularity than the
manually specified version string attached to each release.
"""
import hashlib
from cleverhans.devtools.list_files import list_files
def dev_version():
"""
Returns a hexdigest of all the python files in the module.
"""
m = hashlib.md5()
py_files = sorted(list_files(suffix=".py"))
for filename in py_files:
with open(filename, 'rb') as f:
content = f.read()
m.update(content)
return m.hexdigest()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality for building tests.
We have to call this file "checks" and not anything with "test" as a
substring or nosetests will execute it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import time
import unittest
class CleverHansTest(unittest.TestCase):
def setUp(self):
self.test_start = time.time()
# seed the randomness
np.random.seed(1234)
def tearDown(self):
print(self.id(), "took", time.time() - self.test_start, "seconds")
def assertClose(self, x, y, *args, **kwargs):
# self.assertTrue(np.allclose(x, y)) doesn't give a useful message
# on failure
assert np.allclose(x, y, *args, **kwargs), (x, y)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for listing files that belong to the library."""
import logging
import cleverhans
import os
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
logger = logging.getLogger(__name__)
def list_files(suffix=""):
"""
Returns a list of all files in CleverHans with the given suffix.
Parameters
----------
suffix : str
Returns
-------
file_list : list
A list of all files in CleverHans whose filepath ends with `suffix`
"""
cleverhans_path = os.path.abspath(cleverhans.__path__[0])
repo_path = os.path.abspath(os.path.join(cleverhans_path, os.pardir))
file_list = _list_files(cleverhans_path, suffix)
tutorials_path = os.path.join(repo_path, "cleverhans_tutorials")
tutorials_files = _list_files(tutorials_path, suffix)
tutorials_files = [os.path.join(os.pardir, path) for path in
tutorials_files]
examples_path = os.path.join(repo_path, "examples")
examples_files = _list_files(examples_path, suffix)
examples_files = [os.path.join(os.pardir, path) for path in
examples_files]
file_list = file_list + tutorials_files + examples_files
return file_list
def _list_files(path, suffix=""):
"""
Returns a list of all files ending in `suffix` contained within `path`.
Parameters
----------
path : str
a filepath
suffix : str
Returns
-------
l : list
A list of all files ending in `suffix` contained within `path`.
(If `path` is a file rather than a directory, it is considered
to "contain" itself)
"""
if os.path.isdir(path):
incomplete = os.listdir(path)
complete = [os.path.join(path, entry) for entry in incomplete]
lists = [_list_files(subpath, suffix) for subpath in complete]
flattened = []
for l in lists:
for elem in l:
flattened.append(elem)
return flattened
else:
assert os.path.exists(path), "couldn't find file '%s'" % path
if path.endswith(suffix):
return [path]
return []
if __name__ == '__main__':
# Print all .py files in the library
result = list_files('.py')
for path in result:
logger.info(path)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for mocking up tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def random_feed_dict(rng, placeholders):
"""
Returns random data to be used with `feed_dict`.
:param rng: A numpy.random.RandomState instance
:param placeholders: List of tensorflow placeholders
:return: A dict mapping placeholders to random numpy values
"""
output = {}
for placeholder in placeholders:
if placeholder.dtype != 'float32':
raise NotImplementedError()
value = rng.randn(*placeholder.shape).astype('float32')
output[placeholder] = value
return output
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract reference documentation from the NumPy source tree.
"""
from __future__ import print_function
import inspect
from nose.plugins.skip import SkipTest
import re
import sys
import six
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l + 1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
def __iter__(self):
for line in self._str:
yield line
class NumpyDocString(object):
def __init__(self, docstring, name=None):
if name:
self.name = name
docstring = docstring.split('\n')
# De-indent paragraph
try:
indent = min(len(s) - len(s.lstrip()) for s in docstring
if s.strip())
except ValueError:
indent = 0
for n, line in enumerate(docstring):
docstring[n] = docstring[n][indent:]
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
'Attributes': [],
'Methods': [],
}
self.section_order = []
self._parse()
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if key not in self._parsed_data:
raise ValueError("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ----------
return (len(l1) == len(l2) and l2 == '-' * len(l1))
def _strip(self, doc):
i = 0
j = 0
for i, line in enumerate(doc):
if line.strip():
break
for j, line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i:len(doc) - j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
for n, line in enumerate(desc):
desc[n] = line.strip()
desc = desc # '\n'.join(desc)
params.append((arg_name, arg_type, desc))
return params
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, func_name3
"""
functions = []
current_func = None
rest = []
for line in content:
if not line.strip():
continue
if ':' in line:
if current_func:
functions.append((current_func, rest))
r = line.split(':', 1)
current_func = r[0].strip()
r[1] = r[1].strip()
if r[1]:
rest = [r[1]]
else:
rest = []
elif not line.startswith(' '):
if current_func:
functions.append((current_func, rest))
current_func = None
rest = []
if ',' in line:
for func in line.split(','):
func = func.strip()
if func:
functions.append((func, []))
elif line.strip():
current_func = line.strip()
elif current_func is not None:
rest.append(line.strip())
if current_func:
functions.append((current_func, rest))
return functions
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
summary = self._doc.read_to_next_empty_line()
summary_str = "\n".join([s.strip() for s in summary])
if re.compile('^([\w. ]+=)?[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
elif re.compile('^[\w]+\n[-]+').match(summary_str):
self['Summary'] = ''
self._doc.reset()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section, content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize()
for s in section.split(' ')])
if section in ('Parameters', 'Other Parameters', 'Returns',
'Raises', 'Warns', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
self.section_order.append(section)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
self.section_order.append('index')
elif section.lower() == 'see also':
self['See Also'] = self._parse_see_also(content)
self.section_order.append('See Also')
else:
self[section] = content
self.section_order.append(section)
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name) * symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
if not self['Signature']:
return []
return ["*%s*" % self['Signature'].replace('*', '\*')] + ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param, param_type, desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self):
if not self['See Also']:
return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc in self['See Also']:
if desc or last_had_desc:
out += ['']
out += ["`%s`_" % func]
else:
out[-1] += ", `%s`_" % func
if desc:
out += self._str_indent(desc)
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in six.iteritems(idx):
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Other Parameters',
'Returns', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_see_also()
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
out += self._str_index()
return '\n'.join(out)
# --
def get_errors(self, check_order=True):
errors = []
self._doc.reset()
for j, line in enumerate(self._doc):
if len(line) > 75:
if hasattr(self, 'name'):
errors.append("%s: Line %d exceeds 75 chars"
": \"%s\"..." % (self.name, j + 1,
line[:30]))
else:
errors.append("Line %d exceeds 75 chars"
": \"%s\"..." % (j + 1, line[:30]))
if check_order:
canonical_order = ['Signature', 'Summary', 'Extended Summary',
'Attributes', 'Methods', 'Parameters',
'Other Parameters', 'Returns', 'Raises',
'Warns',
'See Also', 'Notes', 'References', 'Examples',
'index']
canonical_order_copy = list(canonical_order)
for s in self.section_order:
while canonical_order_copy and s != canonical_order_copy[0]:
canonical_order_copy.pop(0)
if not canonical_order_copy:
errors.append(
"Sections in wrong order (starting at %s). The"
" right order is %s" % (s, canonical_order))
return errors
def indent(str, indent=4):
indent_str = ' ' * indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
class NumpyFunctionDocString(NumpyDocString):
def __init__(self, docstring, function):
super(NumpyFunctionDocString, self).__init__(docstring)
args, varargs, keywords, defaults = inspect.getargspec(function)
if (args and args != ['self']) or varargs or keywords or defaults:
self.has_parameters = True
else:
self.has_parameters = False
def _parse(self):
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
}
return NumpyDocString._parse(self)
def get_errors(self):
errors = NumpyDocString.get_errors(self)
if not self['Signature']:
# This check is currently too restrictive.
# Disabling it for now.
# errors.append("No function signature")
pass
if not self['Summary']:
errors.append("No function summary line")
if len(" ".join(self['Summary'])) > 3 * 80:
errors.append("Brief function summary is longer than 3 lines")
if not self['Parameters'] and self.has_parameters:
errors.append("No Parameters section")
return errors
class NumpyClassDocString(NumpyDocString):
def __init__(self, docstring, class_name, class_object):
super(NumpyClassDocString, self).__init__(docstring)
self.class_name = class_name
methods = dict((name, func) for name, func
in inspect.getmembers(class_object))
self.has_parameters = False
if '__init__' in methods:
# verify if __init__ is a Python function. If it isn't
# (e.g. the function is implemented in C), getargspec will fail
if not inspect.ismethod(methods['__init__']):
return
args, varargs, keywords, defaults = inspect.getargspec(
methods['__init__'])
if (args and args != ['self']) or varargs or keywords or defaults:
self.has_parameters = True
def _parse(self):
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
'Attributes': [],
'Methods': [],
}
return NumpyDocString._parse(self)
def __str__(self):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Attributes', 'Methods', 'Parameters', 'Raises',
'Warns'):
out += self._str_param_list(param_list)
out += self._str_see_also()
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
out += self._str_index()
return '\n'.join(out)
def get_errors(self):
errors = NumpyDocString.get_errors(self)
if not self['Parameters'] and self.has_parameters:
errors.append("%s class has no Parameters section"
% self.class_name)
return errors
class NumpyModuleDocString(NumpyDocString):
"""
Module doc strings: no parsing is done.
"""
def _parse(self):
self.out = []
def __str__(self):
return "\n".join(self._doc._str)
def get_errors(self):
errors = NumpyDocString.get_errors(self, check_order=False)
return errors
def header(text, style='-'):
return text + '\n' + style * len(text) + '\n'
class SphinxDocString(NumpyDocString):
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['**' + name + '**'] + [symbol * (len(name) + 4)]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['``%s``' % self['Signature'].replace('*', '\*')] + ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param, param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = self._str_indent(self[name])
out += content
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in six.iteritems(idx):
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, indent=0):
out = []
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
# out += self._str_index()
out = self._str_indent(out, indent)
return '\n'.join(out)
class FunctionDoc(object):
def __init__(self, func):
self._f = func
def __str__(self):
out = ''
doclines = inspect.getdoc(self._f) or ''
try:
doc = SphinxDocString(doclines)
except Exception as e:
print('*' * 78)
print("ERROR: '%s' while parsing `%s`" % (e, self._f))
print('*' * 78)
# print "Docstring follows:"
# print doclines
# print '='*78
return out
if doc['Signature']:
out += '%s\n' % header('**%s**' %
doc['Signature'].replace('*', '\*'), '-')
else:
try:
# try to read signature
argspec = inspect.getargspec(self._f)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*', '\*')
out += header('%s%s' % (self._f.__name__, argspec), '-')
except TypeError as e:
out += '%s\n' % header('**%s()**' % self._f.__name__, '-')
out += str(doc)
return out
class ClassDoc(object):
def __init__(self, cls, modulename=''):
if not inspect.isclass(cls):
raise ValueError("Initialise using an object")
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
self._name = cls.__name__
@property
def methods(self):
return [name for name, func in inspect.getmembers(self._cls)
if not name.startswith('_') and callable(func)]
def __str__(self):
out = ''
def replace_header(match):
return '"' * (match.end() - match.start())
for m in self.methods:
print("Parsing `%s`" % m)
out += str(FunctionDoc(getattr(self._cls, m))) + '\n\n'
out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
return out
def handle_function(val, name):
func_errors = []
docstring = inspect.getdoc(val)
if docstring is None:
func_errors.append((name, '**missing** function-level docstring'))
else:
func_errors = [
(name, e) for e in
NumpyFunctionDocString(docstring, val).get_errors()
]
return func_errors
def handle_module(val, name):
module_errors = []
docstring = val
if docstring is None:
module_errors.append((name, '**missing** module-level docstring'))
else:
module_errors = [
(name, e) for e in NumpyModuleDocString(docstring).get_errors()
]
return module_errors
def handle_method(method, method_name, class_name):
method_errors = []
# Skip out-of-library inherited methods
module = inspect.getmodule(method)
if module is not None:
if not module.__name__.startswith('pylearn2'):
return method_errors
docstring = inspect.getdoc(method)
if docstring is None:
method_errors.append((class_name, method_name,
'**missing** method-level docstring'))
else:
method_errors = [
(class_name, method_name, e) for e in
NumpyFunctionDocString(docstring, method).get_errors()
]
return method_errors
def handle_class(val, class_name):
cls_errors = []
docstring = inspect.getdoc(val)
if docstring is None:
cls_errors.append((class_name,
'**missing** class-level docstring'))
else:
cls_errors = [
(e,) for e in
NumpyClassDocString(docstring, class_name, val).get_errors()
]
# Get public methods and parse their docstrings
methods = dict(((name, func) for name, func in inspect.getmembers(val)
if not name.startswith('_') and callable(func) and
type(func) is not type))
for m_name, method in six.iteritems(methods):
# skip error check if the method was inherited
# from a parent class (which means it wasn't
# defined in this source file)
if inspect.getmodule(method) is not None:
continue
cls_errors.extend(handle_method(method, m_name, class_name))
return cls_errors
def docstring_errors(filename, global_dict=None):
"""
Run a Python file, parse the docstrings of all the classes
and functions it declares, and return them.
Parameters
----------
filename : str
Filename of the module to run.
global_dict : dict, optional
Globals dictionary to pass along to `execfile()`.
Returns
-------
all_errors : list
Each entry of the list is a tuple, of length 2 or 3, with
format either
(func_or_class_name, docstring_error_description)
or
(class_name, method_name, docstring_error_description)
"""
if global_dict is None:
global_dict = {}
if '__file__' not in global_dict:
global_dict['__file__'] = filename
if '__doc__' not in global_dict:
global_dict['__doc__'] = None
try:
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_dict)
except SystemExit:
pass
except SkipTest:
raise AssertionError("Couldn't verify format of " + filename +
"due to SkipTest")
all_errors = []
for key, val in six.iteritems(global_dict):
if not key.startswith('_'):
module_name = ""
if hasattr(inspect.getmodule(val), '__name__'):
module_name = inspect.getmodule(val).__name__
if (inspect.isfunction(val) or inspect.isclass(val)) and\
(inspect.getmodule(val) is None
or module_name == '__builtin__'):
if inspect.isfunction(val):
all_errors.extend(handle_function(val, key))
elif inspect.isclass(val):
all_errors.extend(handle_class(val, key))
elif key == '__doc__':
all_errors.extend(handle_module(val, key))
if all_errors:
all_errors.insert(0, ("%s:" % filename,))
return all_errors
if __name__ == "__main__":
all_errors = docstring_errors(sys.argv[1])
if len(all_errors) > 0:
print("*" * 30, "docstring errors", "*" * 30)
for line in all_errors:
print(':'.join(line))
sys.exit(int(len(all_errors) > 0))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for format checking
"""
from __future__ import print_function
from nose.plugins.skip import SkipTest
import os
import modified_cleverhans
from modified_cleverhans.devtools.tests.docscrape import docstring_errors
from modified_cleverhans.devtools.list_files import list_files
from pycodestyle import StyleGuide
# Enter a manual list of files that are allowed to violate PEP8 here
whitelist_pep8 = [
]
# The NIPS 2017 competition code is allowed to violate PEP8 because it
# follows the Google style guide instead (e.g., 2 spaces instead of 4)
whitelist_pep8.extend([os.path.relpath(path, cleverhans.__path__[0])
for path in list_files()
if "nips17_adversarial_competition" in path])
whitelist_docstrings = [
]
def test_format_pep8():
"""
Test if pep8 is respected.
"""
pep8_checker = StyleGuide()
files_to_check = []
for path in list_files(".py"):
rel_path = os.path.relpath(path, cleverhans.__path__[0])
if rel_path in whitelist_pep8:
continue
else:
files_to_check.append(path)
report = pep8_checker.check_files(files_to_check)
if report.total_errors > 0:
raise AssertionError("PEP8 Format not respected")
def print_files_information_pep8():
"""
Print the list of files which can be removed from the whitelist and the
list of files which do not respect PEP8 formatting that aren't in the
whitelist
"""
infracting_files = []
non_infracting_files = []
pep8_checker = StyleGuide(quiet=True)
for path in list_files(".py"):
number_of_infractions = pep8_checker.input_file(path)
rel_path = os.path.relpath(path, cleverhans.__path__[0])
if number_of_infractions > 0:
if rel_path not in whitelist_pep8:
infracting_files.append(path)
else:
if rel_path in whitelist_pep8:
non_infracting_files.append(path)
print("Files that must be corrected or added to whitelist:")
for file in infracting_files:
print(file)
print("Files that can be removed from whitelist:")
for file in non_infracting_files:
print(file)
def test_format_docstrings():
"""
Test if docstrings are well formatted.
"""
# Disabled for now
return True
try:
verify_format_docstrings()
except SkipTest as e:
import traceback
traceback.print_exc(e)
raise AssertionError(
"Some file raised SkipTest on import, and inadvertently"
" canceled the documentation testing."
)
def verify_format_docstrings():
"""
Implementation of `test_format_docstrings`. The implementation is
factored out so it can be placed inside a guard against SkipTest.
"""
format_infractions = []
for path in list_files(".py"):
rel_path = os.path.relpath(path, cleverhans.__path__[0])
if rel_path in whitelist_docstrings:
continue
try:
format_infractions.extend(docstring_errors(path))
except Exception as e:
format_infractions.append(["%s failed to run so format cannot "
"be checked. Error message:\n %s" %
(rel_path, e)])
if len(format_infractions) > 0:
msg = "\n".join(':'.join(line) for line in format_infractions)
raise AssertionError("Docstring format not respected:\n%s" % msg)
if __name__ == "__main__":
print_files_information_pep8()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from six.moves import xrange
import tensorflow as tf
from tensorflow.python.platform import flags
import time
import argparse
import logging
import os
import sys
from modified_cleverhans.utils import parse_model_settings, build_model_save_path
from modified_cleverhans.utils import set_log_level, AccuracyReport
from modified_cleverhans.utils_mnist import data_mnist
from modified_cleverhans.utils_tf import model_train, model_eval, model_eval_ensemble, batch_eval, tf_model_load
from modified_cleverhans.utils_tf import model_train_teacher, model_train_student, model_train_inpgrad_reg #for training with input gradient regularization
FLAGS = flags.FLAGS
# Scaling input to softmax
INIT_T = 1.0
#ATTACK_T = 1.0
ATTACK_T = 0.25
# enum attack types
ATTACK_CARLINI_WAGNER_L2 = 0
ATTACK_JSMA = 1
ATTACK_FGSM = 2
ATTACK_MADRYETAL = 3
ATTACK_BASICITER = 4
# enum adversarial training types
ADVERSARIAL_TRAINING_MADRYETAL = 1
ADVERSARIAL_TRAINING_FGSM = 2
MAX_EPS = 0.3
MAX_BATCH_SIZE = 100
def mnist_attack(train_start=0, train_end=60000, test_start=0,
test_end=10000, viz_enabled=True, nb_epochs=6,
batch_size=128, nb_filters=64,
nb_samples=10, learning_rate=0.001,
eps=0.3, attack=0,
attack_iterations=100, model_path=None,
targeted=False, rand=False,
stocRound=False, lowprecision=False,
wbits=0, abits=0, wbitsList=0, abitsList=0, wbits2=0, abits2=0, wbits2List=0, abits2List=0,
ensembleThree=False, model_path1=None, model_path2=None, model_path3=None,
distill = False, inpgradreg = False, l2dbl = 0, l2cs = 0,
debug=None, test=False,
data_dir=None, delay=0, adv=0, nb_iter=40):
"""
MNIST tutorial for generic attack
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param viz_enabled: (boolean) activate plots of adversarial examples
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param nb_classes: number of output classes
:param nb_samples: number of test inputs to attack
:param learning_rate: learning rate for training
:param model_path: path to the model file
:param targeted: should we run a targeted attack? or untargeted?
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# MNIST-specific dimensions
img_rows = 28
img_cols = 28
channels = 1
nb_classes = 10
# Set TF random seed to improve reproducibility
tf.set_random_seed(1237)
# Create TF session
sess = tf.Session()
print("Created TensorFlow session.")
if debug:
set_log_level(logging.DEBUG)
else:
set_log_level(logging.WARNING) # for running on sharcnet
# Get MNIST test data
X_train, Y_train, X_test, Y_test = data_mnist(datadir=data_dir,
train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end)
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, channels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
phase = tf.placeholder(tf.bool, name='phase')
# for attempting to break unscaled network.
logits_scalar = tf.placeholder_with_default(
INIT_T, shape=(), name="logits_temperature")
save = False
train_from_scratch = False
if ensembleThree:
if (model_path1 is None or model_path2 is None or model_path3 is None):
train_from_scratch = True
else:
train_from_scratch = False
elif model_path is not None:
if os.path.exists(model_path):
# check for existing model in immediate subfolder
if any(f.endswith('.meta') for f in os.listdir(model_path)):
train_from_scratch = False
else:
model_path = build_model_save_path(
model_path, batch_size, nb_filters, learning_rate, nb_epochs, adv, delay)
print(model_path)
save = True
train_from_scratch = True
else:
train_from_scratch = True # train from scratch, but don't save since no path given
# Define TF model graph
if ensembleThree:
if (wbitsList is None) or (abitsList is None): # Layer wise separate quantization not specified for first model
if (wbits==0) or (abits==0):
print("Error: the number of bits for constant precision weights and activations across layers for the first model have to specified using wbits1 and abits1 flags")
sys.exit(1)
else:
fixedPrec1 = 1
elif (len(wbitsList) != 3) or (len(abitsList) != 3):
print("Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers of the first model")
sys.exit(1)
else:
fixedPrec1 = 0
if (wbits2List is None) or (abits2List is None): # Layer wise separate quantization not specified for second model
if (wbits2==0) or (abits2==0):
print("Error: the number of bits for constant precision weights and activations across layers for the second model have to specified using wbits1 and abits1 flags")
sys.exit(1)
else:
fixedPrec2 = 1
elif (len(wbits2List) != 3) or (len(abits2List) != 3):
print("Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers of the second model")
sys.exit(1)
else:
fixedPrec2 = 0
if (fixedPrec2 != 1) or (fixedPrec1 != 1): # Atleast one of the models have separate precisions per layer
fixedPrec=0
print("Within atleast one model has separate precisions")
if (fixedPrec1 == 1): # first layer has fixed precision
abitsList = (abits, abits, abits)
wbitsList = (wbits, wbits, wbits)
if (fixedPrec2 == 1): # second layer has fixed precision
abits2List = (abits2, abits2, abits2)
wbits2List = (wbits2, wbits2, wbits2)
else:
fixedPrec=1
if (train_from_scratch):
print ("The ensemble model cannot be trained from scratch")
sys.exit(1)
if fixedPrec == 1:
from modified_cleverhans_tutorials.tutorial_models import make_ensemble_three_cnn
model = make_ensemble_three_cnn(
phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbits, abits, wbits2, abits2, nb_filters=nb_filters)
else:
from modified_cleverhans_tutorials.tutorial_models import make_layerwise_three_combined_cnn
model = make_layerwise_three_combined_cnn(
phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbitsList, abitsList, wbits2List, abits2List, nb_filters=nb_filters)
elif lowprecision: # For generic DoReFa net style low precision
if (wbitsList is None) or (abitsList is None): # Layer wise separate quantization not specified
if (wbits==0) or (abits==0):
print("Error: the number of bits for constant precision weights and activations across layers have to specified using wbits and abits flags")
sys.exit(1)
else:
fixedPrec = 1
elif (len(wbitsList) != 3) or (len(abitsList) != 3):
print("Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers")
sys.exit(1)
else:
fixedPrec = 0
if fixedPrec:
from modified_cleverhans_tutorials.tutorial_models import make_basic_lowprecision_cnn
model = make_basic_lowprecision_cnn(
phase, logits_scalar, 'lp_', wbits, abits, nb_filters=nb_filters, stocRound=stocRound)
else:
from modified_cleverhans_tutorials.tutorial_models import make_layerwise_lowprecision_cnn
model = make_layerwise_lowprecision_cnn(
phase, logits_scalar, 'lp_', wbitsList, abitsList, nb_filters=nb_filters, stocRound=stocRound)
elif distill:
from modified_cleverhans_tutorials.tutorial_models import make_distilled_cnn
model = make_distilled_cnn(phase, logits_scalar,
'teacher_fp_', 'fp_', nb_filters=nb_filters)
else:
if rand:
print('rand=True')
from modified_cleverhans_tutorials.tutorial_models import make_scaled_rand_cnn
model = make_scaled_rand_cnn(
phase, logits_scalar, 'fp_rand', nb_filters=nb_filters)
else:
from modified_cleverhans_tutorials.tutorial_models import make_basic_cnn
model = make_basic_cnn(phase, logits_scalar,
'fp_', nb_filters=nb_filters)
# separate predictions of teacher for distilled training
if distill:
teacher_preds = model.teacher_call(x, reuse=False)
teacher_logits = model.get_teacher_logits(x, reuse=False)
# separate calling function for ensemble models
if ensembleThree:
preds = model.ensemble_call(x, reuse=False)
else:
##default
preds = model(x, reuse=False) # * logits_scalar
print("Defined TensorFlow model graph.")
###########################################################################
# Training the model using TensorFlow
###########################################################################
rng = np.random.RandomState([2017, 8, 30])
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'loss_name': 'train loss',
'filename': 'model',
'reuse_global_step': False,
'train_scope': 'train',
'is_training': True
}
if adv != 0:
if adv == ADVERSARIAL_TRAINING_MADRYETAL:
from modified_cleverhans.attacks import MadryEtAl
train_attack_params = {'eps': MAX_EPS, 'eps_iter': 0.01,
'nb_iter': nb_iter}
train_attacker = MadryEtAl(model, sess=sess)
elif adv == ADVERSARIAL_TRAINING_FGSM:
from modified_cleverhans.attacks import FastGradientMethod
stddev = int(np.ceil((MAX_EPS * 255) // 2))
train_attack_params = {'eps': tf.abs(tf.truncated_normal(
shape=(batch_size, 1, 1, 1), mean=0, stddev=stddev))}
train_attacker = FastGradientMethod(model, back='tf', sess=sess)
# create the adversarial trainer
train_attack_params.update({'clip_min': 0., 'clip_max': 1.})
adv_x_train = train_attacker.generate(x, phase, **train_attack_params)
preds_adv_train = model.get_probs(adv_x_train)
eval_attack_params = {'eps': MAX_EPS, 'clip_min': 0., 'clip_max': 1.}
adv_x_eval = train_attacker.generate(x, phase, **eval_attack_params)
preds_adv_eval = model.get_probs(adv_x_eval) # * logits_scalar
def evaluate():
# Evaluate the accuracy of the MNIST model on clean test examples
eval_params = {'batch_size': batch_size}
if ensembleThree:
acc = model_eval_ensemble(
sess, x, y, preds, X_test, Y_test, phase=phase, args=eval_params)
else:
acc = model_eval(
sess, x, y, preds, X_test, Y_test, phase=phase, args=eval_params)
report.clean_train_clean_eval = acc
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
if adv != 0:
# Accuracy of the adversarially trained model on adversarial
# examples
acc = model_eval(
sess, x, y, preds_adv_eval, X_test, Y_test, phase=phase, args=eval_params)
print('Test accuracy on adversarial examples: %0.4f' % acc)
acc = model_eval(
sess, x, y, preds_adv_eval, X_test, Y_test,
phase=phase, args=eval_params, feed={logits_scalar: ATTACK_T})
print('Test accuracy on adversarial examples (scaled): %0.4f' % acc)
if train_from_scratch:
if save:
train_params.update({'log_dir': model_path})
if adv and delay > 0:
train_params.update({'nb_epochs': delay})
# do clean training for 'nb_epochs' or 'delay' epochs
if distill:
temperature = 100 # 1 means the teacher predictions are used as it is
teacher_scaled_preds_val = model_train_teacher(sess, x, y, teacher_preds, teacher_logits,
temperature, X_train, Y_train, phase=phase, args=train_params, rng=rng)
eval_params = {'batch_size': batch_size}
teacher_acc = model_eval(
sess, x, y, teacher_preds, X_test, Y_test, phase=phase, args=eval_params)
print('Test accuracy of the teacher model on legitimate examples: %0.4f' % teacher_acc)
print('Training the student model...')
student_train_params = {
'nb_epochs': 50,
'batch_size': batch_size,
'learning_rate': learning_rate,
'loss_name': 'train loss',
'filename': 'model',
'reuse_global_step': False,
'train_scope': 'train',
'is_training': True
}
if save:
student_train_params.update({'log_dir': model_path})
y_teacher = tf.placeholder(tf.float32, shape=(None, nb_classes))
model_train_student(sess, x, y, preds, temperature, X_train, Y_train, y_teacher=y_teacher,
teacher_preds=teacher_scaled_preds_val, alpha=0.5, beta=0.5, phase=phase, evaluate=evaluate, args=student_train_params, save=save, rng=rng)
elif inpgradreg:
model_train_inpgrad_reg(sess, x, y, preds, X_train, Y_train, phase=phase,
evaluate=evaluate, l2dbl = l2dbl, l2cs = l2cs, args=train_params, save=save, rng=rng)
elif test:
model_train(sess, x, y, preds, X_train, Y_train, phase=phase,
evaluate=evaluate, args=train_params, save=save, rng=rng)
else:
model_train(sess, x, y, preds, X_train, Y_train,
phase=phase, args=train_params, save=save, rng=rng)
# optionally do additional adversarial training
if adv:
print("Adversarial training for %d epochs" % (nb_epochs - delay))
train_params.update({'nb_epochs': nb_epochs - delay})
train_params.update({'reuse_global_step': True})
if test:
model_train(sess, x, y, preds, X_train, Y_train, phase=phase,
predictions_adv=preds_adv_train, evaluate=evaluate, args=train_params,
save=save, rng=rng)
else:
model_train(sess, x, y, preds, X_train, Y_train, phase=phase,
predictions_adv=preds_adv_train, args=train_params,
save=save, rng=rng)
else:
if ensembleThree: ## Ensemble models have to loaded from different paths
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
stored_variables = ['lp_conv1_init/k', 'lp_conv2_bin_init/k', 'lp_conv3_bin_init/k', 'lp_logits_init/W']
variable_dict = dict(zip(stored_variables, variables[:4]))
# Restore the first set of variables from model_path1
saver = tf.train.Saver(variable_dict)
saver.restore(sess, tf.train.latest_checkpoint(model_path1))
# Restore the second set of variables from model_path2
variable_dict = dict(zip(stored_variables, variables[4:8]))
saver2 = tf.train.Saver(variable_dict)
saver2.restore(sess, tf.train.latest_checkpoint(model_path2))
stored_variables = ['fp_conv1_init/k', 'fp_conv2_init/k', 'fp_conv3_init/k', 'fp_logits_init/W']
variable_dict = dict(zip(stored_variables, variables[8:]))
saver3 = tf.train.Saver(variable_dict)
saver3.restore(sess, tf.train.latest_checkpoint(model_path3))
else: #default below
tf_model_load(sess, model_path)
print('Restored model from %s' % model_path)
evaluate()
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
if ensembleThree: ## Ensemble models have to be evaluated with a separate function
accuracy = model_eval_ensemble(sess, x, y, preds, X_test, Y_test, phase=phase, feed={phase: False}, args=eval_params)
else: #default below
accuracy = model_eval(sess, x, y, preds, X_test, Y_test, phase=phase,
feed={phase: False}, args=eval_params)
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
report.clean_train_clean_eval = accuracy
###########################################################################
# Build dataset
###########################################################################
if viz_enabled:
assert nb_samples == nb_classes
idxs = [np.where(np.argmax(Y_test, axis=1) == i)[0][0]
for i in range(nb_classes)]
viz_rows = nb_classes if targeted else 2
# Initialize our array for grid visualization
grid_shape = (nb_classes, viz_rows, img_rows, img_cols, channels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
if targeted:
from modified_cleverhans.utils import build_targeted_dataset
if viz_enabled:
from modified_cleverhans.utils import grid_visual
adv_inputs, true_labels, adv_ys = build_targeted_dataset(
X_test, Y_test, idxs, nb_classes, img_rows, img_cols, channels)
else:
adv_inputs, true_labels, adv_ys = build_targeted_dataset(
X_test, Y_test, np.arange(nb_samples), nb_classes, img_rows, img_cols, channels)
else:
if viz_enabled:
from modified_cleverhans.utils import pair_visual
adv_inputs = X_test[idxs]
else:
adv_inputs = X_test[:nb_samples]
###########################################################################
# Craft adversarial examples using generic approach
###########################################################################
if targeted:
att_batch_size = np.clip(
nb_samples * (nb_classes - 1), a_max=MAX_BATCH_SIZE, a_min=1)
nb_adv_per_sample = nb_classes - 1
yname = "y_target"
else:
att_batch_size = np.minimum(nb_samples, MAX_BATCH_SIZE)
nb_adv_per_sample = 1
adv_ys = None
yname = "y"
print('Crafting ' + str(nb_samples) + ' * ' + str(nb_adv_per_sample) +
' adversarial examples')
print("This could take some time ...")
if ensembleThree:
model_type = 'ensembleThree'
else:
model_type = 'default'
if attack == ATTACK_CARLINI_WAGNER_L2:
print('Attack: CarliniWagnerL2')
from modified_cleverhans.attacks import CarliniWagnerL2
attacker = CarliniWagnerL2(model, back='tf', model_type=model_type, num_classes=nb_classes, sess=sess)
attack_params = {'binary_search_steps': 1,
'max_iterations': attack_iterations,
'learning_rate': 0.1,
'batch_size': att_batch_size,
'initial_const': 10,
}
elif attack == ATTACK_JSMA:
print('Attack: SaliencyMapMethod')
from modified_cleverhans.attacks import SaliencyMapMethod
attacker = SaliencyMapMethod(model, back='tf', model_type=model_type, num_classes=nb_classes, sess=sess)
attack_params = {'theta': 1., 'gamma': 0.1}
elif attack == ATTACK_FGSM:
print('Attack: FastGradientMethod')
from modified_cleverhans.attacks import FastGradientMethod
attacker = FastGradientMethod(model, back='tf', model_type=model_type, num_classes=nb_classes, sess=sess)
attack_params = {'eps': eps}
elif attack == ATTACK_MADRYETAL:
print('Attack: MadryEtAl')
from modified_cleverhans.attacks import MadryEtAl
attacker = MadryEtAl(model, back='tf', model_type=model_type, num_classes=nb_classes, sess=sess)
attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
elif attack == ATTACK_BASICITER:
print('Attack: BasicIterativeMethod')
from modified_cleverhans.attacks import BasicIterativeMethod
attacker = BasicIterativeMethod(model, back='tf', model_type=model_type, num_classes=nb_classes, sess=sess)
attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
else:
print("Attack undefined")
sys.exit(1)
attack_params.update({yname: adv_ys, 'clip_min': 0., 'clip_max': 1.})
adv_np = attacker.generate_np(adv_inputs, phase, **attack_params)
'''
name = 'm_fgsm_eps%s_n%s.npy' % (eps, nb_samples)
fpath = os.path.join(
'/scratch/gallowaa/mnist/adversarial_examples/modified_cleverhans/', name)
np.savez(fpath, x=adv_np, y=Y_test[:nb_samples])
'''
'''
adv_x = attacker.generate(x, phase, **attack_params)
adv_np, = batch_eval(sess, [x], [adv_x], [adv_inputs], feed={
phase: False}, args=eval_params)
'''
eval_params = {'batch_size': att_batch_size}
if targeted:
print("Evaluating targeted results")
adv_accuracy = model_eval(sess, x, y, preds, adv_np, true_labels, phase=phase,
args=eval_params)
else:
print("Evaluating untargeted results")
if viz_enabled:
if ensembleThree:
adv_accuracy = model_eval_ensemble(sess, x, y, preds, adv_np, Y_test[idxs], phase=phase, args=eval_params)
else: #default below
adv_accuracy = model_eval(sess, x, y, preds, adv_np, Y_test[
idxs], phase=phase, args=eval_params)
else:
if ensembleThree:
adv_accuracy = model_eval_ensemble(sess, x, y, preds, adv_np, Y_test[:nb_samples], phase=phase, args=eval_params)
else: #default below
adv_accuracy = model_eval(sess, x, y, preds, adv_np, Y_test[
:nb_samples], phase=phase, args=eval_params)
if viz_enabled:
n = nb_classes - 1
for i in range(nb_classes):
if targeted:
for j in range(nb_classes):
if i != j:
if j != 0 and i != n:
grid_viz_data[i, j] = adv_np[j * n + i]
if j == 0 and i > 0 or i == n and j > 0:
grid_viz_data[i, j] = adv_np[j * n + i - 1]
else:
grid_viz_data[i, j] = adv_inputs[j * n]
else:
grid_viz_data[j, 0] = adv_inputs[j]
grid_viz_data[j, 1] = adv_np[j]
print(grid_viz_data.shape)
print('--------------------------------------')
# Compute the number of adversarial examples that were successfully found
print('Test accuracy on adversarial examples {0:.4f}'.format(adv_accuracy))
report.clean_train_adv_eval = 1. - adv_accuracy
# Compute the average distortion introduced by the algorithm
percent_perturbed = np.mean(np.sum((adv_np - adv_inputs)**2,
axis=(1, 2, 3))**.5)
print('Avg. L_2 norm of perturbations {0:.4f}'.format(percent_perturbed))
# Compute number of modified features (L_0 norm)
nb_changed = np.where(adv_np != adv_inputs)[0].shape[0]
percent_perturb = np.mean(float(nb_changed) / adv_np.reshape(-1).shape[0])
# Compute the average distortion introduced by the algorithm
print('Avg. rate of perturbed features {0:.4f}'.format(percent_perturb))
# Friendly output for pasting into spreadsheet
print('{0:.4f}'.format(accuracy))
print('{0:.4f}'.format(adv_accuracy))
print('{0:.4f}'.format(percent_perturbed))
print('{0:.4f}'.format(percent_perturb))
# Close TF session
sess.close()
# Finally, block & display a grid of all the adversarial examples
if viz_enabled:
import matplotlib.pyplot as plt
_ = grid_visual(grid_viz_data)
return report
def main(argv=None):
mnist_attack(viz_enabled=FLAGS.viz_enabled,
nb_epochs=FLAGS.nb_epochs,
batch_size=FLAGS.batch_size,
nb_samples=FLAGS.nb_samples,
nb_filters=FLAGS.nb_filters,
learning_rate=FLAGS.lr,
eps=FLAGS.eps,
attack=FLAGS.attack,
attack_iterations=FLAGS.attack_iterations,
model_path=FLAGS.model_path,
targeted=FLAGS.targeted,
rand=FLAGS.rand,
debug=FLAGS.debug,
test=FLAGS.test,
data_dir=FLAGS.data_dir,
lowprecision=FLAGS.lowprecision,
abits=FLAGS.abits,
wbits=FLAGS.wbits,
abitsList=FLAGS.abitsList,
wbitsList=FLAGS.wbitsList,
abits2=FLAGS.abits2,
wbits2=FLAGS.wbits2,
abits2List=FLAGS.abits2List,
wbits2List=FLAGS.wbits2List,
stocRound=FLAGS.stocRound,
model_path1=FLAGS.model_path1,
model_path2=FLAGS.model_path2,
model_path3=FLAGS.model_path3,
ensembleThree=FLAGS.ensembleThree,
distill = FLAGS.distill,
inpgradreg = FLAGS.inpgradreg,
l2dbl = FLAGS.l2dbl,
l2cs = FLAGS.l2cs,
delay=FLAGS.delay,
adv=FLAGS.adv,
nb_iter=FLAGS.nb_iter)
if __name__ == '__main__':
par = argparse.ArgumentParser()
# Generic flags
par.add_argument('--gpu', help='id of GPU to use')
par.add_argument('--model_path', help='Path to save or load model')
par.add_argument('--data_dir', help='Path to training data',
default='/tmp/mnist')
par.add_argument(
'--viz_enabled', help='Visualize adversarial ex.', action="store_true")
par.add_argument(
'--debug', help='Sets log level to DEBUG, otherwise INFO', action="store_true")
par.add_argument(
'--test', help='Test while training, takes longer', action="store_true")
# Architecture and training specific flags
par.add_argument('--nb_epochs', type=int, default=15,
help='Number of epochs to train model')
par.add_argument('--nb_filters', type=int, default=64,
help='Number of filters in first layer')
par.add_argument('--batch_size', type=int, default=128,
help='Size of training batches')
par.add_argument('--lr', type=float, default=0.001,
help='Learning rate')
par.add_argument('--rand', help='Stochastic weight layer?',
action="store_true")
# Attack specific flags
par.add_argument('--attack', type=int, default=0,
help='Attack type, 0=CW, 1=JSMA')
par.add_argument("--eps", type=float, default=0.3)
par.add_argument('--attack_iterations', type=int, default=50,
help='Number of iterations to run CW attack; 1000 is good')
par.add_argument('--nb_samples', type=int,
default=10000, help='Nb of inputs to attack')
par.add_argument(
'--targeted', help='Run a targeted attack?', action="store_true")
# EMPIR specific flags
par.add_argument('--lowprecision', help='Use other low precision models absed on DoReFa net', action="store_true") # For DoReFa net style quantization
par.add_argument('--wbits', type=int, default=0, help='No. of bits in weight representation')
par.add_argument('--abits', type=int, default=0, help='No. of bits in activation representation')
par.add_argument('--wbitsList', type=int, nargs='+', help='List of No. of bits in weight representation for different layers')
par.add_argument('--abitsList', type=int, nargs='+', help='List of No. of bits in activation representation for different layers')
par.add_argument('--stocRound', help='Stochastic rounding for weights (only in training) and activations?',
action="store_true")
par.add_argument('--model_path1', help='Path where saved model1 is stored and can be loaded')
par.add_argument('--model_path2', help='Path where saved model2 is stored and can be loaded')
par.add_argument('--model_path3', help='Path where saved model3 is stored and can be loaded')
par.add_argument('--ensembleThree', help='Use an ensemble of full precision and two low precision models', action="store_true")
par.add_argument('--wbits2', type=int, default=0, help='No. of bits in weight representation of model2, model1 specified using wbits')
par.add_argument('--abits2', type=int, default=0, help='No. of bits in activation representation of model2, model2 specified using abits')
par.add_argument('--wbits2List', type=int, nargs='+', help='List of No. of bits in weight representation for different layers of model2')
par.add_argument('--abits2List', type=int, nargs='+', help='List of No. of bits in activation representation for different layers of model2')
# extra flags for defensive distillation
par.add_argument('--distill', help='Train the model using distillation', action="store_true")
par.add_argument('--student_epochs', type=int, default=50, help='No. of epochs for which the student model is trained')
# extra flags for input gradient regularization
par.add_argument('--inpgradreg', help='Train the model using input gradient regularization', action="store_true")
par.add_argument('--l2dbl', type=int, default=0, help='l2 double backprop penalty')
par.add_argument('--l2cs', type=int, default=0, help='l2 certainty sensitivity penalty')
# Adversarial training flags
par.add_argument(
'--adv', help='Adversarial training type?', type=int, default=0)
par.add_argument('--delay', type=int,
default=10, help='Nb of epochs to delay adv training by')
par.add_argument('--nb_iter', type=int,
default=40, help='Nb of iterations of PGD')
FLAGS = par.parse_args()
if FLAGS.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
tf.app.run()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import keras
from keras import backend
from keras.datasets import cifar10
from keras.utils import np_utils
import os
import argparse
import logging
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
import sys
# from modified_cleverhans.attacks import fgsm
from modified_cleverhans.utils import set_log_level, parse_model_settings, \
build_model_save_path
from modified_cleverhans.utils_tf import model_train, model_eval, \
model_eval_ensemble, batch_eval, tf_model_load
FLAGS = flags.FLAGS
ATTACK_CARLINI_WAGNER_L2 = 0
ATTACK_JSMA = 1
ATTACK_FGSM = 2
ATTACK_MADRYETAL = 3
ATTACK_BASICITER = 4
MAX_BATCH_SIZE = 100
MAX_BATCH_SIZE = 100
# enum adversarial training types
ADVERSARIAL_TRAINING_MADRYETAL = 1
ADVERSARIAL_TRAINING_FGSM = 2
MAX_EPS = 0.3
# Scaling input to softmax
INIT_T = 1.0
# ATTACK_T = 1.0
ATTACK_T = 0.25
def data_cifar10():
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
if keras.backend.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def setup_model():
# CIFAR10-specific dimensions
img_rows = 32
img_cols = 32
channels = 3
nb_classes = 10
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if not hasattr(backend, "tf"):
raise RuntimeError("This tutorial requires keras to be configured"
" to use the TensorFlow backend.")
# Image dimensions ordering should follow the Theano convention
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
"'th', temporarily setting to 'tf'")
# Create TF session and set as Keras backend session
sess = tf.Session()
keras.backend.set_session(sess)
set_log_level(logging.WARNING)
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, channels))
y = tf.placeholder(tf.float32, shape=(None, 10))
phase = tf.placeholder(tf.bool, name="phase")
logits_scalar = tf.placeholder_with_default(
INIT_T, shape=(), name="logits_temperature")
model_path = FLAGS.model_path
nb_filters = FLAGS.nb_filters
batch_size = FLAGS.batch_size
#### EMPIR extra flags
lowprecision = FLAGS.lowprecision
abits = FLAGS.abits
wbits = FLAGS.wbits
abitsList = FLAGS.abitsList
wbitsList = FLAGS.wbitsList
stocRound = True if FLAGS.stocRound else False
model_path2 = FLAGS.model_path2
model_path1 = FLAGS.model_path1
model_path3 = FLAGS.model_path3
ensembleThree = True if FLAGS.ensembleThree else False
abits2 = FLAGS.abits2
wbits2 = FLAGS.wbits2
abits2List = FLAGS.abits2List
wbits2List = FLAGS.wbits2List
distill = True if FLAGS.distill else False
####
if ensembleThree:
if (model_path1 is None or model_path2 is None or model_path3 is None):
raise ValueError()
elif model_path is not None:
if os.path.exists(model_path):
# check for existing model in immediate subfolder
if not any(f.endswith('.meta') for f in os.listdir(model_path)):
raise ValueError()
else:
raise ValueError()
if ensembleThree:
if (wbitsList is None) or (
abitsList is None): # Layer wise separate quantization not specified for first model
if (wbits == 0) or (abits == 0):
print(
"Error: the number of bits for constant precision weights and activations across layers for the first model have to specified using wbits1 and abits1 flags")
sys.exit(1)
else:
fixedPrec1 = 1
elif (len(wbitsList) != 3) or (len(abitsList) != 3):
print(
"Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers of the first model")
sys.exit(1)
else:
fixedPrec1 = 0
if (wbits2List is None) or (
abits2List is None): # Layer wise separate quantization not specified for second model
if (wbits2 == 0) or (abits2 == 0):
print(
"Error: the number of bits for constant precision weights and activations across layers for the second model have to specified using wbits1 and abits1 flags")
sys.exit(1)
else:
fixedPrec2 = 1
elif (len(wbits2List) != 3) or (len(abits2List) != 3):
print(
"Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers of the second model")
sys.exit(1)
else:
fixedPrec2 = 0
if (fixedPrec2 != 1) or (
fixedPrec1 != 1): # Atleast one of the models have separate precisions per layer
fixedPrec = 0
print("Within atleast one model has separate precisions")
if (fixedPrec1 == 1): # first layer has fixed precision
abitsList = (abits, abits, abits)
wbitsList = (wbits, wbits, wbits)
if (fixedPrec2 == 1): # second layer has fixed precision
abits2List = (abits2, abits2, abits2)
wbits2List = (wbits2, wbits2, wbits2)
else:
fixedPrec = 1
if fixedPrec == 1:
from cleverhans_tutorials.tutorial_models import \
make_ensemble_three_cifar_cnn
model = make_ensemble_three_cifar_cnn(
phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbits, abits, wbits2,
abits2, input_shape=(None, img_rows, img_cols, channels),
nb_filters=nb_filters)
else:
from cleverhans_tutorials.tutorial_models import \
make_ensemble_three_cifar_cnn_layerwise
model = make_ensemble_three_cifar_cnn_layerwise(
phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbitsList, abitsList,
wbits2List, abits2List,
input_shape=(None, img_rows, img_cols, channels),
nb_filters=nb_filters)
elif lowprecision:
if (wbitsList is None) or (
abitsList is None): # Layer wise separate quantization not specified
if (wbits == 0) or (abits == 0):
print(
"Error: the number of bits for constant precision weights and activations across layers have to specified using wbits and abits flags")
sys.exit(1)
else:
fixedPrec = 1
elif (len(wbitsList) != 3) or (len(abitsList) != 3):
print(
"Error: Need to specify the precisions for activations and weights for the atleast the three convolutional layers")
sys.exit(1)
else:
fixedPrec = 0
if fixedPrec:
from cleverhans_tutorials.tutorial_models import \
make_basic_lowprecision_cifar_cnn
model = make_basic_lowprecision_cifar_cnn(
phase, logits_scalar, 'lp_', wbits, abits, input_shape=(
None, img_rows, img_cols, channels), nb_filters=nb_filters,
stocRound=stocRound)
else:
from cleverhans_tutorials.tutorial_models import \
make_layerwise_lowprecision_cifar_cnn
model = make_layerwise_lowprecision_cifar_cnn(
phase, logits_scalar, 'lp_', wbitsList, abitsList, input_shape=(
None, img_rows, img_cols, channels), nb_filters=nb_filters,
stocRound=stocRound)
elif distill:
from cleverhans_tutorials.tutorial_models import make_distilled_cifar_cnn
model = make_distilled_cifar_cnn(phase, logits_scalar,
'teacher_fp_', 'fp_',
nb_filters=nb_filters, input_shape=(
None, img_rows, img_cols, channels))
####
else:
from cleverhans_tutorials.tutorial_models import make_basic_cifar_cnn
model = make_basic_cifar_cnn(phase, logits_scalar, 'fp_', input_shape=(
None, img_rows, img_cols, channels), nb_filters=nb_filters)
# separate calling function for ensemble models
if ensembleThree:
preds = model.ensemble_call(x, reuse=False)
else:
##default
preds = model(x, reuse=False)
print("Defined TensorFlow model graph.")
if ensembleThree:
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
stored_variables = ['lp_conv1_init/k', 'lp_conv2_init/k', 'lp_conv3_init/k',
'lp_ip1init/W', 'lp_logits_init/W']
variable_dict = dict(zip(stored_variables, variables[:5]))
# Restore the first set of variables from model_path1
saver = tf.train.Saver(variable_dict)
saver.restore(sess, tf.train.latest_checkpoint(model_path1))
# Restore the second set of variables from model_path2
variable_dict = dict(zip(stored_variables, variables[5:10]))
saver2 = tf.train.Saver(variable_dict)
saver2.restore(sess, tf.train.latest_checkpoint(model_path2))
stored_variables = ['fp_conv1_init/k', 'fp_conv2_init/k', 'fp_conv3_init/k',
'fp_ip1init/W', 'fp_logits_init/W']
variable_dict = dict(zip(stored_variables, variables[10:]))
saver3 = tf.train.Saver(variable_dict)
saver3.restore(sess, tf.train.latest_checkpoint(model_path3))
else:
tf_model_load(sess, model_path)
print('Restored model from %s' % model_path)
return sess, model, preds, x, y, phase
def build_adversarial_attack(sess, model, attack, targeted, nb_classes,
ensembleThree,
nb_samples, nb_iter, eps, robust_attack):
if targeted:
att_batch_size = np.clip(
nb_samples * (nb_classes - 1), a_max=MAX_BATCH_SIZE, a_min=1)
yname = "y_target"
else:
att_batch_size = np.minimum(nb_samples, MAX_BATCH_SIZE)
adv_ys = None
yname = "y"
if ensembleThree:
model_type = 'ensembleThree'
else:
model_type = 'default'
if attack == ATTACK_CARLINI_WAGNER_L2:
from modified_cleverhans.attacks import CarliniWagnerL2
attacker = CarliniWagnerL2(model, back='tf', model_type=model_type,
num_classes=nb_classes, sess=sess)
attack_params = {'binary_search_steps': 1,
'max_iterations': nb_iter,
'learning_rate': 0.1,
'batch_size': att_batch_size,
'initial_const': 10,
}
elif attack == ATTACK_JSMA:
from modified_cleverhans.attacks import SaliencyMapMethod
attacker = SaliencyMapMethod(model, back='tf', model_type=model_type,
sess=sess, num_classes=nb_classes)
attack_params = {'theta': 1., 'gamma': 0.1}
elif attack == ATTACK_FGSM:
from modified_cleverhans.attacks import FastGradientMethod
attacker = FastGradientMethod(model, back='tf', model_type=model_type,
sess=sess, num_classes=nb_classes)
attack_params = {'eps': eps}
elif attack == ATTACK_MADRYETAL:
from modified_cleverhans.attacks import MadryEtAl
attacker = MadryEtAl(model, back='tf', model_type=model_type, sess=sess,
num_classes=nb_classes, attack_type="robust" if robust_attack else "vanilla")
attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
elif attack == ATTACK_BASICITER:
from modified_cleverhans.attacks import BasicIterativeMethod
attacker = BasicIterativeMethod(model, back='tf', sess=sess,
model_type=model_type,
num_classes=nb_classes)
attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
else:
print("Attack undefined")
sys.exit(1)
attack_params.update({yname: adv_ys, 'clip_min': 0., 'clip_max': 1.})
return attacker, attack_params
def main(argv=None):
"""
CIFAR10 modified_cleverhans tutorial
:return:
"""
img_rows = 32
img_cols = 32
channels = 3
nb_classes = 10
targeted = True if FLAGS.targeted else False
batch_size = FLAGS.batch_size
nb_samples = FLAGS.nb_samples
eps = FLAGS.eps
attack = FLAGS.attack
nb_iter = FLAGS.nb_iter
ensembleThree = True if FLAGS.ensembleThree else False
sess, model, preds, x, y, phase = setup_model()
# Get CIFAR10 test data
X_train, Y_train, X_test, Y_test = data_cifar10()
def evaluate():
# Evaluate the accuracy of the CIFAR10 model on legitimate test
# examples
eval_params = {'batch_size': batch_size}
if ensembleThree:
acc = model_eval_ensemble(
sess, x, y, preds, X_test, Y_test, phase=phase, args=eval_params)
else:
acc = model_eval(
sess, x, y, preds, X_test, Y_test, phase=phase, args=eval_params)
assert X_test.shape[0] == 10000, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
evaluate()
# Evaluate the accuracy of the CIFAR10 model on legitimate test examples
eval_params = {'batch_size': batch_size}
if ensembleThree:
accuracy = model_eval_ensemble(sess, x, y, preds, X_test, Y_test,
phase=phase, feed={phase: False},
args=eval_params)
else:
accuracy = model_eval(sess, x, y, preds, X_test, Y_test, phase=phase,
feed={phase: False}, args=eval_params)
print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
###########################################################################
# Build dataset
###########################################################################
if targeted:
from modified_cleverhans.utils import build_targeted_dataset
adv_inputs, true_labels, adv_ys = build_targeted_dataset(
X_test, Y_test, np.arange(nb_samples), nb_classes, img_rows, img_cols,
channels)
else:
adv_inputs = X_test[:nb_samples]
true_labels = Y_test[:nb_samples]
###########################################################################
# Craft adversarial examples using generic approach
###########################################################################
attacker, attack_params = build_adversarial_attack(sess, model, attack,
targeted, nb_classes,
ensembleThree,
nb_samples, nb_iter, eps,
robust_attack=FLAGS.robust_attack)
if FLAGS.use_labels:
attack_params['y'] = true_labels
X_test_adv = attacker.generate_np(adv_inputs, phase, **attack_params)
#x_adv = attacker.generate(x, phase, **attack_params)
adv_accuracy = model_eval_ensemble(sess, x, y, preds, X_test_adv, Y_test,
phase=phase, args=eval_params)
# Friendly output for pasting into spreadsheet
print('Accuracy: {0:.4f},'.format(accuracy))
print('Adversarial Accuracy {0:.4f},'.format(adv_accuracy))
sess.close()
if __name__ == '__main__':
par = argparse.ArgumentParser()
# Generic flags
par.add_argument('--gpu', help='id of GPU to use')
par.add_argument('--model_path', help='Path to save or load model')
par.add_argument('--data_dir', help='Path to training data',
default='cifar10_data')
# Architecture and training specific flags
par.add_argument('--nb_epochs', type=int, default=6,
help='Number of epochs to train model')
par.add_argument('--nb_filters', type=int, default=32,
help='Number of filters in first layer')
par.add_argument('--batch_size', type=int, default=128,
help='Size of training batches')
par.add_argument('--learning_rate', type=float, default=0.001,
help='Learning rate')
par.add_argument('--rand', help='Stochastic weight layer?',
action="store_true")
# Attack specific flags
par.add_argument('--eps', type=float, default=0.1,
help='epsilon')
par.add_argument('--attack', type=int, default=0,
help='Attack type, 0=CW, 2=FGSM')
par.add_argument('--nb_samples', type=int,
default=10000, help='Nb of inputs to attack')
par.add_argument(
'--targeted', help='Run a targeted attack?', action="store_true")
# Adversarial training flags
par.add_argument(
'--adv', help='Adversarial training type?', type=int, default=0)
par.add_argument('--delay', type=int,
default=10, help='Nb of epochs to delay adv training by')
par.add_argument('--nb_iter', type=int,
default=40,
help='Nb of iterations of PGD (set to 50 for CW)')
# EMPIR specific flags
par.add_argument('--lowprecision', help='Use other low precision models',
action="store_true")
par.add_argument('--wbits', type=int, default=0,
help='No. of bits in weight representation')
par.add_argument('--abits', type=int, default=0,
help='No. of bits in activation representation')
par.add_argument('--wbitsList', type=int, nargs='+',
help='List of No. of bits in weight representation for different layers')
par.add_argument('--abitsList', type=int, nargs='+',
help='List of No. of bits in activation representation for different layers')
par.add_argument('--stocRound',
help='Stochastic rounding for weights (only in training) and activations?',
action="store_true")
par.add_argument('--model_path1',
help='Path where saved model1 is stored and can be loaded')
par.add_argument('--model_path2',
help='Path where saved model2 is stored and can be loaded')
par.add_argument('--ensembleThree',
help='Use an ensemble of full precision and two low precision models that can be attacked directly',
action="store_true")
par.add_argument('--model_path3',
help='Path where saved model3 in case of combinedThree model is stored and can be loaded')
par.add_argument('--wbits2', type=int, default=0,
help='No. of bits in weight representation of model2, model1 specified using wbits')
par.add_argument('--abits2', type=int, default=0,
help='No. of bits in activation representation of model2, model2 specified using abits')
par.add_argument('--wbits2List', type=int, nargs='+',
help='List of No. of bits in weight representation for different layers of model2')
par.add_argument('--abits2List', type=int, nargs='+',
help='List of No. of bits in activation representation for different layers of model2')
# extra flags for defensive distillation
par.add_argument('--distill', help='Train the model using distillation',
action="store_true")
par.add_argument('--student_epochs', type=int, default=50,
help='No. of epochs for which the student model is trained')
# extra flags for input gradient regularization
par.add_argument('--inpgradreg',
help='Train the model using input gradient regularization',
action="store_true")
par.add_argument('--l2dbl', type=int, default=0,
help='l2 double backprop penalty')
par.add_argument('--l2cs', type=int, default=0,
help='l2 certainty sensitivity penalty')
par.add_argument("--robust-attack", action="store_true")
par.add_argument("--use-labels", action="store_true")
FLAGS = par.parse_args()
if FLAGS.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
tf.app.run()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images.
Training images are sampled using the provided bounding boxes, and subsequently
cropped to the sampled bounding box. Images are additionally flipped randomly,
then resized to the target output size (without aspect-ratio preservation).
Images used during evaluation are resized (with aspect-ratio preservation) and
centrally cropped.
All images undergo mean color subtraction.
Note that these steps are colloquially referred to as "ResNet preprocessing,"
and they differ from "VGG preprocessing," which does not use bounding boxes
and instead does an aspect-preserving resize followed by random crop during
training. (These both differ from "Inception preprocessing," which introduces
color distortion steps.)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
## SANCHARI: copied standard deviation values from dorefanet
_R_STD = 0.229*255
_G_STD = 0.224*255
_B_STD = 0.225*255
_CHANNEL_STDS = [_R_STD, _G_STD, _B_STD]
#####
# The lower bound for the smallest side of the image for aspect-preserving
# resizing. For example, if an image is 500 x 1000, it will be resized to
# _RESIZE_MIN x (_RESIZE_MIN * 2).
_RESIZE_MIN = 256
def _decode_crop_and_flip(image_buffer, bbox, num_channels):
"""Crops the given image to a random part of the image, and randomly flips.
We use the fused decode_and_crop op, which performs better than the two ops
used separately in series, but note that this requires that the image be
passed in as an un-decoded string Tensor.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
num_channels: Integer depth of the image buffer for decoding.
Returns:
3-D tensor with cropped image.
"""
# A large fraction of image datasets contain a human-annotated bounding box
# delineating the region of the image containing the object of interest. We
# choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.image.extract_jpeg_shape(image_buffer),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Reassemble the bounding box in the format the crop op requires.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
# Use the fused decode and crop op here, which is faster than each in series.
cropped = tf.image.decode_and_crop_jpeg(
image_buffer, crop_window, channels=num_channels)
# Flip to add a little more random distortion in.
cropped = tf.image.random_flip_left_right(cropped)
return cropped
def _central_crop(image, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image: a 3-D image tensor
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
3-D tensor with cropped image.
"""
shape = tf.shape(image)
height, width = shape[0], shape[1]
amount_to_be_cropped_h = (height - crop_height)
crop_top = amount_to_be_cropped_h // 2
amount_to_be_cropped_w = (width - crop_width)
crop_left = amount_to_be_cropped_w // 2
return tf.slice(
image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])
def _mean_image_subtraction(image, means, num_channels):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
num_channels: number of color channels in the image that will be distorted.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
means = tf.expand_dims(tf.expand_dims(means, 0), 0)
return image - means
def _smallest_size_at_least(height, width, resize_min):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
resize_min: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: an int32 scalar tensor indicating the new width.
"""
resize_min = tf.cast(resize_min, tf.float32)
# Convert to floats to make subsequent calculations go smoothly.
height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
smaller_dim = tf.minimum(height, width)
scale_ratio = resize_min / smaller_dim
# Convert back to ints to make heights and widths that TF ops will accept.
new_height = tf.cast(height * scale_ratio, tf.int32)
new_width = tf.cast(width * scale_ratio, tf.int32)
return new_height, new_width
def _aspect_preserving_resize(image, resize_min):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
resize_min: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
shape = tf.shape(image)
height, width = shape[0], shape[1]
new_height, new_width = _smallest_size_at_least(height, width, resize_min)
return _resize_image(image, new_height, new_width)
def _resize_image(image, height, width):
"""Simple wrapper around tf.resize_images.
This is primarily to make sure we use the same `ResizeMethod` and other
details each time.
Args:
image: A 3-D image `Tensor`.
height: The target height for the resized image.
width: The target width for the resized image.
Returns:
resized_image: A 3-D tensor containing the resized image. The first two
dimensions have the shape [height, width].
"""
return tf.image.resize_images(
image, [height, width], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
def _lighting_noise(image):
eigval = np.asarray([0.2175, 0.0188, 0.0045][::1])*255.0
eigvec = np.asarray([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.814],
[-0.5836, -0.6948, 0.4203]])[::-1, ::-1]
std = 0.1
v = np.random.randn(3)*std #random number
v = eigval*v
inc = np.dot(eigvec, v).reshape((3,))
inc = tf.convert_to_tensor(inc, dtype=tf.float32)
# image = np.add(image, inc)
image = tf.add(image, inc)
return image
def preprocess_image(image_buffer, bbox, output_height, output_width,
num_channels, is_training=False):
"""Preprocesses the given image.
Preprocessing includes decoding, cropping, and resizing for both training
and eval images. Training preprocessing, however, introduces some random
distortion of the image to improve accuracy.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
num_channels: Integer depth of the image buffer for decoding.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
if is_training:
# For training, we want to randomize some of the distortions.
image = _decode_crop_and_flip(image_buffer, bbox, num_channels)
image = _resize_image(image, output_height, output_width)
else:
# For validation, we want to decode, resize, then just crop the middle.
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
image = _aspect_preserving_resize(image, _RESIZE_MIN)
image = _central_crop(image, output_height, output_width)
image.set_shape([output_height, output_width, num_channels])
return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
def preprocess_image2(image_buffer, bbox, output_height, output_width,
num_channels, is_training=False):
"""Preprocesses the given image.
Preprocessing includes decoding, cropping, and resizing for both training
and eval images. Training preprocessing, however, introduces some random
distortion of the image to improve accuracy.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
num_channels: Integer depth of the image buffer for decoding.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
if is_training:
# For training, we want to randomize some of the distortions.
image = _decode_crop_and_flip(image_buffer, bbox, num_channels)
image = _resize_image(image, output_height, output_width)
else:
# For validation, we want to decode, resize, then just crop the middle.
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
image = _aspect_preserving_resize(image, _RESIZE_MIN)
image = _central_crop(image, output_height, output_width)
image = tf.to_float(image)
image.set_shape([output_height, output_width, num_channels])
image = tf.slice(image, [0, 0, 0], [output_height, output_width, -1])
# Slice the image into different channels
image_channel1 = tf.slice(image, [0, 0, 0], [-1, -1, 1])
image_channel2 = tf.slice(image, [0, 0, 1], [-1, -1, 1])
image_channel3 = tf.slice(image, [0, 0, 2], [-1, -1, 1])
# Change RGB to BGR based on the preprocessing in myalexnet_forward_newtf.py ==> helps in increasing accuracy on the pretrained model
image = tf.concat([image_channel3, image_channel2, image_channel1], 2)
return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
def preprocess_image3(image_buffer, bbox, output_height, output_width,
num_channels, is_training=False):
"""Preprocesses the given image.
Preprocessing includes decoding, cropping, and resizing for both training
and eval images. Training preprocessing, however, introduces some random
distortion of the image to improve accuracy. Also changes RGB to BGR
and divides by the standard dev
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
num_channels: Integer depth of the image buffer for decoding.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
if is_training:
# For training, we want to randomize some of the distortions.
image = _decode_crop_and_flip(image_buffer, bbox, num_channels)
image = _resize_image(image, output_height, output_width)
else:
# For validation, we want to decode, resize, then just crop the middle.
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
image = _aspect_preserving_resize(image, _RESIZE_MIN)
image = _central_crop(image, output_height, output_width)
image = tf.to_float(image)
image.set_shape([output_height, output_width, num_channels])
image = tf.slice(image, [0, 0, 0], [output_height, output_width, -1])
# Slice the image into different channels
image_channel1 = tf.slice(image, [0, 0, 0], [-1, -1, 1])
image_channel2 = tf.slice(image, [0, 0, 1], [-1, -1, 1])
image_channel3 = tf.slice(image, [0, 0, 2], [-1, -1, 1])
# Change RGB to BGR based on the preprocessing in myalexnet_forward_newtf.py ==> helps in increasing accuracy
image = tf.concat([image_channel3, image_channel2, image_channel1], 2)
image = _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
image = tf.divide(image, _CHANNEL_STDS)
return image
def preprocess_image4(image_buffer, bbox, output_height, output_width,
num_channels, is_training=False):
"""Preprocesses the given image.
Preprocessing includes decoding, cropping, and resizing for both training
and eval images. Training preprocessing, however, introduces some random
distortion of the image to improve accuracy. Also adds lighting noise,
changes RGB to BGR and divides by the standard dev
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
num_channels: Integer depth of the image buffer for decoding.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
if is_training:
# For training, we want to randomize some of the distortions.
image = _decode_crop_and_flip(image_buffer, bbox, num_channels)
image = _resize_image(image, output_height, output_width)
else:
# For validation, we want to decode, resize, then just crop the middle.
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
image = _aspect_preserving_resize(image, _RESIZE_MIN)
image = _central_crop(image, output_height, output_width)
image = tf.to_float(image)
image.set_shape([output_height, output_width, num_channels])
image = tf.slice(image, [0, 0, 0], [output_height, output_width, -1])
# Slice the image into different channels
image_channel1 = tf.slice(image, [0, 0, 0], [-1, -1, 1])
image_channel2 = tf.slice(image, [0, 0, 1], [-1, -1, 1])
image_channel3 = tf.slice(image, [0, 0, 2], [-1, -1, 1])
# Change RGB to BGR based on the preprocessing in myalexnet_forward_newtf.py ==> helps in increasing accuracy
image = tf.concat([image_channel3, image_channel2, image_channel1], 2)
if is_training: # add lighting noise
image = _lighting_noise(image)
image = _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
image = tf.divide(image, _CHANNEL_STDS)
return image
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import keras
from keras import backend
from keras.utils import np_utils
import os
import argparse
import logging
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
import sys
sys.path.insert(0, "/home/consus/a/sen9/verifiedAI/cleverhans_EMPIR")
#from modified_cleverhans.attacks import fgsm
from modified_cleverhans.utils import set_log_level, parse_model_settings, build_model_save_path
from modified_cleverhans.attacks import FastGradientMethod
from modified_cleverhans.utils_keras import cnn_model
from modified_cleverhans.utils_tf import batch_eval, tf_model_load
from modified_cleverhans.utils_tf import model_train_imagenet, model_eval_imagenet, model_eval_ensemble_imagenet, model_eval_adv_imagenet, model_eval_ensemble_adv_imagenet
from examples import imagenet_preprocessing #for imagenet preprocessing
from collections import OrderedDict
FLAGS = flags.FLAGS
ATTACK_CARLINI_WAGNER_L2 = 0
ATTACK_JSMA = 1
ATTACK_FGSM = 2
ATTACK_MADRYETAL = 3
ATTACK_BASICITER = 4
MAX_BATCH_SIZE = 100
# enum adversarial training types
ADVERSARIAL_TRAINING_MADRYETAL = 1
ADVERSARIAL_TRAINING_FGSM = 2
MAX_EPS = 0.3
# Scaling input to softmax
INIT_T = 1.0
#ATTACK_T = 1.0
ATTACK_T = 0.25
_DEFAULT_IMAGE_SIZE = 224
_NUM_CHANNELS = 3
_NUM_CLASSES = 1000
_NUM_TRAIN_FILES = 1024
_SHUFFLE_BUFFER = 10000
def get_filenames(is_training, data_dir):
"""Return filenames for dataset."""
if is_training:
return [
os.path.join(data_dir, 'Train-%05d-of-01024' % i)
for i in range(_NUM_TRAIN_FILES)]
else:
return [
os.path.join(data_dir, 'Val-%05d-of-00128' % i)
for i in range(128)]
def _parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields (values are included as examples):
image/height': _int64_feature(height),
image/width': _int64_feature(width),
image/colorspace': _bytes_feature(colorspace),
image/channels': _int64_feature(channels),
image/class/label': _int64_feature(label),
image/class/synset': _bytes_feature(synset),
image/format': _bytes_feature(image_format),
image/filename': _bytes_feature(os.path.basename(filename)),
image/encoded': _bytes_feature(image_buffer)}))
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
"""
# Dense features in Example proto.
feature_map = {
'image/height': tf.FixedLenFeature([], dtype=tf.int64),
'image/width': tf.FixedLenFeature([], dtype=tf.int64),
'image/colorspace': tf.VarLenFeature(dtype=tf.string),
'image/channels': tf.FixedLenFeature([], dtype=tf.int64),
'image/class/label': tf.FixedLenFeature([], dtype=tf.int64),
'image/class/synset': tf.VarLenFeature(dtype=tf.string),
'image/format': tf.VarLenFeature(dtype=tf.string),
'image/filename': tf.VarLenFeature(dtype=tf.string),
'image/encoded': tf.FixedLenFeature([], dtype=tf.string),
}
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
one_hot_label = tf.one_hot(label, _NUM_CLASSES, 1, 0) #convert it to a one_hot vector
# Directly fixing values of min and max
xmin = tf.expand_dims([0.0], 0)
ymin = tf.expand_dims([0.0], 0)
xmax = tf.expand_dims([1.0], 0)
ymax = tf.expand_dims([1.0], 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return features['image/encoded'], one_hot_label, bbox
# variant of the above to parse training datasets which have labels from 1 to 1000 instead of 0 to 999
def _parse_train_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields (values are included as examples):
image/height': _int64_feature(height),
image/width': _int64_feature(width),
image/colorspace': _bytes_feature(colorspace),
image/channels': _int64_feature(channels),
image/class/label': _int64_feature(label),
image/class/synset': _bytes_feature(synset),
image/format': _bytes_feature(image_format),
image/filename': _bytes_feature(os.path.basename(filename)),
image/encoded': _bytes_feature(image_buffer)}))
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
"""
# Dense features in Example proto.
feature_map = {
'image/height': tf.FixedLenFeature([], dtype=tf.int64),
'image/width': tf.FixedLenFeature([], dtype=tf.int64),
'image/colorspace': tf.VarLenFeature(dtype=tf.string),
'image/channels': tf.FixedLenFeature([], dtype=tf.int64),
'image/class/label': tf.FixedLenFeature([], dtype=tf.int64),
'image/class/synset': tf.VarLenFeature(dtype=tf.string),
'image/format': tf.VarLenFeature(dtype=tf.string),
'image/filename': tf.VarLenFeature(dtype=tf.string),
'image/encoded': tf.FixedLenFeature([], dtype=tf.string),
}
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32) -1
one_hot_label = tf.one_hot(label, _NUM_CLASSES, 1, 0) #convert it to a one_hot vector
# Directly fixing values of min and max
xmin = tf.expand_dims([0.0], 0)
ymin = tf.expand_dims([0.0], 0)
xmax = tf.expand_dims([1.0], 0)
ymax = tf.expand_dims([1.0], 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return features['image/encoded'], one_hot_label, bbox
def parse_record(raw_record, is_training, dtype):
"""Parses a record containing a training example of an image.
The input record is parsed into a label and image, and the image is passed
through preprocessing steps (cropping, flipping, and so on).
Args:
raw_record: scalar Tensor tf.string containing a serialized
Example protocol buffer.
is_training: A boolean denoting whether the input is for training.
dtype: data type to use for images/features.
Returns:
Tuple with processed image tensor and one-hot-encoded label tensor.
"""
if is_training:
image_buffer, label, bbox = _parse_train_example_proto(raw_record)
else:
image_buffer, label, bbox = _parse_example_proto(raw_record)
image = imagenet_preprocessing.preprocess_image4( # For pretrained Dorefanet network with division by standard deviation
image_buffer=image_buffer,
bbox=bbox,
output_height=_DEFAULT_IMAGE_SIZE,
output_width=_DEFAULT_IMAGE_SIZE,
num_channels=_NUM_CHANNELS,
is_training=is_training)
image = tf.cast(image, dtype)
return image, label
def process_record_dataset(dataset,
is_training,
batch_size,
shuffle_buffer,
parse_record_fn,
num_epochs=1,
dtype=tf.float32,
datasets_num_private_threads=None,
num_parallel_batches=1):
"""Given a Dataset with raw records, return an iterator over the records.
Args:
dataset: A Dataset representing raw records
is_training: A boolean denoting whether the input is for training.
batch_size: The number of samples per batch.
shuffle_buffer: The buffer size to use when shuffling records. A larger
value results in better randomness, but smaller values reduce startup
time and use less memory.
parse_record_fn: A function that takes a raw record and returns the
corresponding (image, label) pair.
num_epochs: The number of epochs to repeat the dataset.
dtype: Data type to use for images/features.
datasets_num_private_threads: Number of threads for a private
threadpool created for all datasets computation.
num_parallel_batches: Number of parallel batches for tf.data.
Returns:
Dataset of (image, label) pairs ready for iteration.
"""
# Prefetches a batch at a time to smooth out the time taken to load input
# files for shuffling and processing.
dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
# Shuffles records before repeating to respect epoch boundaries.
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# Repeats the dataset for the number of epochs to train.
# Parses the raw records into images and labels.
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
lambda value: parse_record_fn(value, is_training, dtype),
batch_size=batch_size,
num_parallel_batches=num_parallel_batches))
# Operations between the final prefetch and the get_next call to the iterator
# will happen synchronously during run time. We prefetch here again to
# background all of the above processing work and keep it out of the
# critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
# allows DistributionStrategies to adjust how many batches to fetch based
# on how many devices are present.
# dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
dataset = dataset.prefetch(buffer_size=1)
return dataset
def data_imagenet(nb_epochs, batch_size, imagenet_path):
"""
Preprocess Imagenet dataset
:return:
"""
# Load images from dataset
test_dataset =tf.data.TFRecordDataset(get_filenames(is_training=False, data_dir=imagenet_path+'/Val'))
train_dataset = tf.data.TFRecordDataset(get_filenames(is_training=True, data_dir=imagenet_path+'/Train'))
train_processed = process_record_dataset(dataset=train_dataset, is_training=True, batch_size=batch_size, shuffle_buffer=_SHUFFLE_BUFFER, num_epochs = nb_epochs, parse_record_fn=parse_record)
test_processed = process_record_dataset(dataset=test_dataset, is_training=False, batch_size=batch_size, shuffle_buffer=_SHUFFLE_BUFFER, parse_record_fn=parse_record)
return train_processed, test_processed
def main(argv=None):
model_path = FLAGS.model_path
targeted = True if FLAGS.targeted else False
scale = True if FLAGS.scale else False
learning_rate = FLAGS.learning_rate
nb_filters = FLAGS.nb_filters
batch_size = FLAGS.batch_size
nb_epochs = FLAGS.nb_epochs
delay = FLAGS.delay
eps = FLAGS.eps
adv = FLAGS.adv
attack = FLAGS.attack
attack_iterations = FLAGS.attack_iterations
nb_iter = FLAGS.nb_iter
#### EMPIR extra flags
lowprecision=FLAGS.lowprecision
abits=FLAGS.abits
wbits=FLAGS.wbits
abitsList=FLAGS.abitsList
wbitsList=FLAGS.wbitsList
stocRound=True if FLAGS.stocRound else False
rand=FLAGS.rand
model_path2 = FLAGS.model_path2
model_path1 = FLAGS.model_path1
model_path3 = FLAGS.model_path3
ensembleThree=True if FLAGS.ensembleThree else False
abits2=FLAGS.abits2
wbits2=FLAGS.wbits2
abits2List=FLAGS.abits2List
wbits2List=FLAGS.wbits2List
####
save = False
train_from_scratch = False
#### Imagenet flags
imagenet_path = FLAGS.imagenet_path
if imagenet_path is None:
print("Error: Imagenet data path not specified")
sys.exit(1)
# Imagenet specific dimensions
img_rows = _DEFAULT_IMAGE_SIZE
img_cols = _DEFAULT_IMAGE_SIZE
channels = _NUM_CHANNELS
nb_classes = _NUM_CLASSES
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if not hasattr(backend, "tf"):
raise RuntimeError("This tutorial requires keras to be configured"
" to use the TensorFlow backend.")
# Image dimensions ordering should follow the Theano convention
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
"'th', temporarily setting to 'tf'")
# Create TF session and set as Keras backend session
sess = tf.Session()
keras.backend.set_session(sess)
set_log_level(logging.WARNING)
# Get imagenet datasets
train_dataset, test_dataset = data_imagenet(nb_epochs, batch_size, imagenet_path)
# Creating a initializable iterators
train_iterator = train_dataset.make_initializable_iterator()
test_iterator = test_dataset.make_initializable_iterator()
# Getting next elements from the iterators
next_test_element = test_iterator.get_next()
next_train_element = train_iterator.get_next()
train_x, train_y = train_iterator.get_next()
test_x, test_y = test_iterator.get_next()
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, channels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
phase = tf.placeholder(tf.bool, name="phase")
logits_scalar = tf.placeholder_with_default(
INIT_T, shape=(), name="logits_temperature")
if ensembleThree:
if (model_path1 is None or model_path2 is None or model_path3 is None):
train_from_scratch = True
else:
train_from_scratch = False
elif model_path is not None:
if os.path.exists(model_path):
# check for existing model in immediate subfolder
if any(f.endswith('.meta') for f in os.listdir(model_path)):
train_from_scratch = False
else:
model_path = build_model_save_path(
model_path, batch_size, nb_filters, learning_rate, nb_epochs, adv, delay)
print(model_path)
save = True
train_from_scratch = True
else:
train_from_scratch = True # train from scratch, but don't save since no path given
if ensembleThree:
if (wbitsList is None) or (abitsList is None): # Layer wise separate quantization not specified for first model
if (wbits==0) or (abits==0):
print("Error: the number of bits for constant precision weights and activations across layers for the first model have to specified using wbits1 and abits1 flags")
sys.exit(1)
else:
fixedPrec1 = 1
elif (len(wbitsList) != 6) or (len(abitsList) != 6):
print("Error: Need to specify the precisions for activations and weights for the atleast the four convolutional layers of alexnet excluding the first layer and 2 fully connected layers excluding the last layer of the first model")
sys.exit(1)
else:
fixedPrec1 = 0
if (wbits2List is None) or (abits2List is None): # Layer wise separate quantization not specified for second model
if (wbits2==0) or (abits2==0):
print("Error: the number of bits for constant precision weights and activations across layers for the second model have to specified using wbits1 and abits1 flags")
sys.exit(1)
else:
fixedPrec2 = 1
elif (len(wbits2List) != 6) or (len(abits2List) != 6):
print("Error: Need to specify the precisions for activations and weights for the atleast the four convolutional layers of alexnet excluding the first layer and 2 fully connected layers excluding the last layer of the second model")
sys.exit(1)
else:
fixedPrec2 = 0
if (fixedPrec2 != 1) or (fixedPrec1 != 1): # Atleast one of the models have separate precisions per layer
fixedPrec=0
print("Within atleast one model has separate precisions")
if (fixedPrec1 == 1): # first layer has fixed precision
abitsList = (abits, abits, abits, abits, abits, abits)
wbitsList = (wbits, wbits, wbits, wbits, wbits, wbits)
if (fixedPrec2 == 1): # second layer has fixed precision
abits2List = (abits2, abits2, abits2, abits2, abits2, abits2)
wbits2List = (wbits2, wbits2, wbits2, wbits2, wbits2, wbits2)
else:
fixedPrec=1
if (train_from_scratch):
print ("The ensemble model cannot be trained from scratch")
sys.exit(1)
if fixedPrec == 1:
from modified_cleverhans_tutorials.tutorial_models import make_ensemble_three_alexnet
model = make_ensemble_three_alexnet(
phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbits, abits, wbits2, abits2, input_shape=(None, img_rows, img_cols, channels), nb_filters=nb_filters, nb_classes=nb_classes)
else:
from modified_cleverhans_tutorials.tutorial_models import make_layerwise_three_combined_alexnet
model = make_layerwise_three_combined_alexnet(
phase, logits_scalar, 'lp1_', 'lp2_', 'fp_', wbitsList, abitsList, wbits2List, abits2List, input_shape=(None, img_rows, img_cols, channels), nb_filters=nb_filters, nb_classes=nb_classes)
elif lowprecision:
if (wbitsList is None) or (abitsList is None): # Layer wise separate quantization not specified
if (wbits==0) or (abits==0):
print("Error: the number of bits for constant precision weights and activations across layers have to specified using wbits and abits flags")
sys.exit(1)
else:
fixedPrec = 1
elif (len(wbitsList) != 6) or (len(abitsList) != 6):
print("Error: Need to specify the precisions for activations and weights for the atleast the four convolutional layers of alexnet excluding the first layer and 2 fully connected layers excluding the last layer")
sys.exit(1)
else:
fixedPrec = 0
if fixedPrec:
### For training from scratch
from modified_cleverhans_tutorials.tutorial_models import make_basic_lowprecision_alexnet
model = make_basic_lowprecision_alexnet(phase, logits_scalar, 'lp_', wbits, abits, input_shape=(
None, img_rows, img_cols, channels), nb_filters=nb_filters, nb_classes=nb_classes)
else:
from modified_cleverhans_tutorials.tutorial_models import make_layerwise_lowprecision_alexnet
model = make_layerwise_lowprecision_alexnet(phase, logits_scalar, 'lp_', wbitsList, abitsList,
input_shape=(None, img_rows, img_cols, channels), nb_filters=nb_filters, nb_classes=nb_classes)
else:
### For training from scratch
from modified_cleverhans_tutorials.tutorial_models import make_basic_alexnet_from_scratch
model = make_basic_alexnet_from_scratch(phase, logits_scalar, 'fp_', input_shape=(
None, img_rows, img_cols, channels), nb_filters=nb_filters, nb_classes=nb_classes)
# separate calling function for ensemble models
if ensembleThree:
preds = model.ensemble_call(x, reuse=False)
else:
##default
preds = model(x, reuse=False)
print("Defined TensorFlow model graph.")
rng = np.random.RandomState([2017, 8, 30])
def evaluate():
# Evaluate the accuracy of the CIFAR10 model on legitimate test
# examples
eval_params = {'batch_size': batch_size}
if ensembleThree:
acc = model_eval_ensemble_imagenet(
sess, x, y, preds, test_iterator, test_x, test_y, phase=phase, args=eval_params)
else: #default below
acc = model_eval_imagenet(
sess, x, y, preds, test_iterator, test_x, test_y, phase=phase, args=eval_params)
print('Test accuracy on legitimate examples: %0.4f' % acc)
# Train an Imagenet model
train_params = {
'lowprecision': lowprecision,
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'loss_name': 'train loss',
'filename': 'model',
'reuse_global_step': False,
'train_scope': 'train',
'is_training': True
}
if adv != 0:
if adv == ADVERSARIAL_TRAINING_MADRYETAL:
from modified_cleverhans.attacks import MadryEtAl
train_attack_params = {'eps': MAX_EPS, 'eps_iter': 0.01,
'nb_iter': nb_iter}
train_attacker = MadryEtAl(model, sess=sess)
elif adv == ADVERSARIAL_TRAINING_FGSM:
from modified_cleverhans.attacks import FastGradientMethod
stddev = int(np.ceil((MAX_EPS * 255) // 2))
train_attack_params = {'eps': tf.abs(tf.truncated_normal(
shape=(batch_size, 1, 1, 1), mean=0, stddev=stddev))}
train_attacker = FastGradientMethod(model, back='tf', sess=sess)
# create the adversarial trainer
train_attack_params.update({'clip_min': 0., 'clip_max': 1.})
adv_x_train = train_attacker.generate(x, phase, **train_attack_params)
preds_adv_train = model.get_probs(adv_x_train)
eval_attack_params = {'eps': MAX_EPS, 'clip_min': 0., 'clip_max': 1.}
adv_x_eval = train_attacker.generate(x, phase, **eval_attack_params)
preds_adv_eval = model.get_probs(adv_x_eval) # * logits_scalar
# if adv:
# from modified_cleverhans.attacks import FastGradientMethod
# fgsm = FastGradientMethod(model, back='tf', sess=sess)
# fgsm_params = {'eps': eps, 'clip_min': 0., 'clip_max': 1.}
# adv_x_train = fgsm.generate(x, phase, **fgsm_params)
# preds_adv = model.get_probs(adv_x_train)
if train_from_scratch:
if save:
train_params.update({'log_dir': model_path})
if adv and delay > 0:
train_params.update({'nb_epochs': delay})
# do clean training for 'nb_epochs' or 'delay' epochs with learning rate reducing with time
model_train_imagenet2(sess, x, y, preds, train_iterator, train_x, train_y, phase=phase,
evaluate=evaluate, args=train_params, save=save, rng=rng)
# optionally do additional adversarial training
if adv:
print("Adversarial training for %d epochs" % (nb_epochs - delay))
train_params.update({'nb_epochs': nb_epochs - delay})
train_params.update({'reuse_global_step': True})
model_train_imagenet(sess, x, y, preds, train_iterator, train_x, train_y, phase=phase,
predictions_adv=preds_adv_train, evaluate=evaluate, args=train_params, save=save, rng=rng)
else:
if ensembleThree: ## ensembleThree models have to loaded from different paths
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
# First 11 variables from path1
stored_variables = ['lp_conv1_init/k', 'lp_conv1_init/b', 'lp_conv2_init/k', 'lp_conv3_init/k', 'lp_conv4_init/k', 'lp_conv5_init/k', 'lp_ip1init/W', 'lp_ip1init/b', 'lp_ip2init/W', 'lp_logits_init/W', 'lp_logits_init/b']
variable_dict = dict(OrderedDict(zip(stored_variables, variables[:11]))) # only dict was messing with the order
# Restore the first set of variables from model_path1
saver = tf.train.Saver(variable_dict)
saver.restore(sess, tf.train.latest_checkpoint(model_path1))
# Restore the second set of variables from model_path2
# Second 11 variables from path2
variable_dict = dict(OrderedDict(zip(stored_variables, variables[11:22])))
saver2 = tf.train.Saver(variable_dict)
saver2.restore(sess, tf.train.latest_checkpoint(model_path2))
# Third 11 variables from path3
stored_variables = ['fp_conv1_init/k', 'fp_conv1_init/b', 'fp_conv2_init/k', 'fp_conv3_init/k', 'fp_conv4_init/k', 'fp_conv5_init/k', 'fp_ip1init/W', 'fp_ip1init/b', 'fp_ip2init/W', 'fp_logits_init/W', 'fp_logits_init/b']
variable_dict = dict(OrderedDict(zip(stored_variables, variables[22:33])))
saver3 = tf.train.Saver(variable_dict)
saver3.restore(sess, tf.train.latest_checkpoint(model_path3))
# Next 24 batch norm variables from path1
stored_variables = ['lp__batchNorm1/batch_normalization/gamma', 'lp__batchNorm1/batch_normalization/beta', 'lp__batchNorm1/batch_normalization/moving_mean', 'lp__batchNorm1/batch_normalization/moving_variance', 'lp__batchNorm2/batch_normalization/gamma', 'lp__batchNorm2/batch_normalization/beta', 'lp__batchNorm2/batch_normalization/moving_mean', 'lp__batchNorm2/batch_normalization/moving_variance', 'lp__batchNorm3/batch_normalization/gamma', 'lp__batchNorm3/batch_normalization/beta', 'lp__batchNorm3/batch_normalization/moving_mean', 'lp__batchNorm3/batch_normalization/moving_variance', 'lp__batchNorm4/batch_normalization/gamma', 'lp__batchNorm4/batch_normalization/beta', 'lp__batchNorm4/batch_normalization/moving_mean', 'lp__batchNorm4/batch_normalization/moving_variance', 'lp__batchNorm5/batch_normalization/gamma', 'lp__batchNorm5/batch_normalization/beta', 'lp__batchNorm5/batch_normalization/moving_mean', 'lp__batchNorm5/batch_normalization/moving_variance', 'lp__batchNorm6/batch_normalization/gamma', 'lp__batchNorm6/batch_normalization/beta', 'lp__batchNorm6/batch_normalization/moving_mean', 'lp__batchNorm6/batch_normalization/moving_variance']
variable_dict = dict(OrderedDict(zip(stored_variables, variables[33:57])))
saver = tf.train.Saver(variable_dict)
saver.restore(sess, tf.train.latest_checkpoint(model_path1))
# Next 24 batch norm variables from path2
variable_dict = dict(OrderedDict(zip(stored_variables, variables[57:81])))
saver = tf.train.Saver(variable_dict)
saver.restore(sess, tf.train.latest_checkpoint(model_path2))
# Final 24 batch norm variables from path1
stored_variables = ['fp__batchNorm1/batch_normalization/gamma', 'fp__batchNorm1/batch_normalization/beta', 'fp__batchNorm1/batch_normalization/moving_mean', 'fp__batchNorm1/batch_normalization/moving_variance', 'fp__batchNorm2/batch_normalization/gamma', 'fp__batchNorm2/batch_normalization/beta', 'fp__batchNorm2/batch_normalization/moving_mean', 'fp__batchNorm2/batch_normalization/moving_variance', 'fp__batchNorm3/batch_normalization/gamma', 'fp__batchNorm3/batch_normalization/beta', 'fp__batchNorm3/batch_normalization/moving_mean', 'fp__batchNorm3/batch_normalization/moving_variance', 'fp__batchNorm4/batch_normalization/gamma', 'fp__batchNorm4/batch_normalization/beta', 'fp__batchNorm4/batch_normalization/moving_mean', 'fp__batchNorm4/batch_normalization/moving_variance', 'fp__batchNorm5/batch_normalization/gamma', 'fp__batchNorm5/batch_normalization/beta', 'fp__batchNorm5/batch_normalization/moving_mean', 'fp__batchNorm5/batch_normalization/moving_variance', 'fp__batchNorm6/batch_normalization/gamma', 'fp__batchNorm6/batch_normalization/beta', 'fp__batchNorm6/batch_normalization/moving_mean', 'fp__batchNorm6/batch_normalization/moving_variance']
variable_dict = dict(OrderedDict(zip(stored_variables, variables[81:105])))
saver = tf.train.Saver(variable_dict)
saver.restore(sess, tf.train.latest_checkpoint(model_path3))
else: # restoring the model trained using this setup, not a downloaded one
tf_model_load(sess, model_path)
print('Restored model from %s' % model_path)
# evaluate()
# Evaluate the accuracy of the model on legitimate test examples
eval_params = {'batch_size': batch_size}
if ensembleThree:
accuracy = model_eval_ensemble_imagenet(sess, x, y, preds, test_iterator, test_x, test_y, phase=phase, feed={phase: False}, args=eval_params)
else: #default below
accuracy = model_eval_imagenet(sess, x, y, preds, test_iterator, test_x, test_y, phase=phase, feed={phase: False}, args=eval_params)
print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
###########################################################################
# Build dataset
###########################################################################
adv_inputs = test_x #adversarial inputs can be generated from any of the test examples
###########################################################################
# Craft adversarial examples using generic approach
###########################################################################
nb_adv_per_sample = 1
adv_ys = None
yname = "y"
print('Crafting adversarial examples')
print("This could take some time ...")
if ensembleThree:
model_type = 'ensembleThree'
else:
model_type = 'default'
if attack == ATTACK_CARLINI_WAGNER_L2:
from modified_cleverhans.attacks import CarliniWagnerL2
attacker = CarliniWagnerL2(model, back='tf', sess=sess, model_type=model_type, num_classes=nb_classes)
attack_params = {'binary_search_steps': 1,
'max_iterations': attack_iterations,
'learning_rate': 0.1,
'batch_size': batch_size,
'initial_const': 10,
}
elif attack == ATTACK_JSMA:
from modified_cleverhans.attacks import SaliencyMapMethod
attacker = SaliencyMapMethod(model, back='tf', sess=sess, model_type=model_type, num_classes=nb_classes)
attack_params = {'theta': 1., 'gamma': 0.1}
elif attack == ATTACK_FGSM:
from modified_cleverhans.attacks import FastGradientMethod
attacker = FastGradientMethod(model, back='tf', sess=sess, model_type=model_type, num_classes=nb_classes)
attack_params = {'eps': eps}
elif attack == ATTACK_MADRYETAL:
from modified_cleverhans.attacks import MadryEtAl
attacker = MadryEtAl(model, back='tf', sess=sess, model_type=model_type, num_classes=nb_classes)
attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
elif attack == ATTACK_BASICITER:
print('Attack: BasicIterativeMethod')
from modified_cleverhans.attacks import BasicIterativeMethod
attacker = BasicIterativeMethod(model, back='tf', sess=sess, model_type=model_type, num_classes=nb_classes)
attack_params = {'eps': eps, 'eps_iter': 0.01, 'nb_iter': nb_iter}
else:
print("Attack undefined")
sys.exit(1)
attack_params.update({'clip_min': -2.2, 'clip_max': 2.7}) # Since max and min for imagenet turns out to be around -2.11 and 2.12
eval_params = {'batch_size': batch_size}
'''
adv_x = attacker.generate(x, phase, **attack_params)
# Craft adversarial examples using Fast Gradient Sign Method (FGSM)
eval_params = {'batch_size': batch_size}
X_test_adv, = batch_eval(sess, [x], [adv_x], [adv_inputs], feed={
phase: False}, args=eval_params)
'''
print("Evaluating un-targeted results")
if ensembleThree:
adv_accuracy = model_eval_ensemble_adv_imagenet(sess, x, y, preds, test_iterator,
test_x, test_y, phase=phase, args=eval_params, attacker=attacker, attack_params=attack_params)
else:
adv_accuracy = model_eval_adv_imagenet(sess, x, y, preds, test_iterator,
test_x, test_y, phase=phase, args=eval_params, attacker=attacker, attack_params=attack_params)
# Compute the number of adversarial examples that were successfully found
print('Test accuracy on adversarial examples {0:.4f}'.format(adv_accuracy))
# Close TF session
sess.close()
if __name__ == '__main__':
par = argparse.ArgumentParser()
# Generic flags
par.add_argument('--gpu', help='id of GPU to use')
par.add_argument('--model_path', help='Path to save or load model')
par.add_argument('--data_dir', help='Path to training data',
default='/scratch/gallowaa/cifar10/cifar10_data')
# Architecture and training specific flags
par.add_argument('--nb_epochs', type=int, default=6,
help='Number of epochs to train model')
par.add_argument('--nb_filters', type=int, default=32,
help='Number of filters in first layer')
par.add_argument('--batch_size', type=int, default=100,
help='Size of training batches')
par.add_argument('--learning_rate', type=float, default=0.001,
help='Learning rate')
par.add_argument('--scale', help='Scale activations of the binary model?',
action="store_true")
par.add_argument('--rand', help='Stochastic weight layer?',
action="store_true")
# EMPIR specific flags
par.add_argument('--lowprecision', help='Use other low precision models', action="store_true")
par.add_argument('--wbits', type=int, default=0, help='No. of bits in weight representation')
par.add_argument('--abits', type=int, default=0, help='No. of bits in activation representation')
par.add_argument('--wbitsList', type=int, nargs='+', help='List of No. of bits in weight representation for different layers')
par.add_argument('--abitsList', type=int, nargs='+', help='List of No. of bits in activation representation for different layers')
par.add_argument('--stocRound', help='Stochastic rounding for weights (only in training) and activations?', action="store_true")
par.add_argument('--model_path1', help='Path where saved model1 is stored and can be loaded')
par.add_argument('--model_path2', help='Path where saved model2 is stored and can be loaded')
par.add_argument('--ensembleThree', help='Use an ensemble of full precision and two low precision models that can be attacked directly and potentially trained', action="store_true")
par.add_argument('--model_path3', help='Path where saved model3 in case of combinedThree model is stored and can be loaded')
par.add_argument('--wbits2', type=int, default=0, help='No. of bits in weight representation of model2, model1 specified using wbits')
par.add_argument('--abits2', type=int, default=0, help='No. of bits in activation representation of model2, model2 specified using abits')
par.add_argument('--wbits2List', type=int, nargs='+', help='List of No. of bits in weight representation for different layers of model2')
par.add_argument('--abits2List', type=int, nargs='+', help='List of No. of bits in activation representation for different layers of model2')
# Attack specific flags
par.add_argument('--eps', type=float, default=0.1,
help='epsilon')
par.add_argument('--attack', type=int, default=0,
help='Attack type, 0=CW, 2=FGSM')
par.add_argument('--attack_iterations', type=int, default=50,
help='Number of iterations to run CW attack; 1000 is good')
par.add_argument(
'--targeted', help='Run a targeted attack?', action="store_true")
# Adversarial training flags
par.add_argument(
'--adv', help='Adversarial training type?', type=int, default=0)
par.add_argument('--delay', type=int,
default=10, help='Nb of epochs to delay adv training by')
par.add_argument('--nb_iter', type=int,
default=40, help='Nb of iterations of PGD')
# imagenet flags
par.add_argument('--imagenet_path', help='Path where imagenet tfrecords are stored and can be loaded, both Val and Train')
FLAGS = par.parse_args()
if FLAGS.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
tf.app.run()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import torch
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from active_tests.decision_boundary_binarization import LogitRescalingType
from active_tests.decision_boundary_binarization import \
_train_logistic_regression_classifier
from active_tests.decision_boundary_binarization import \
interior_boundary_discrimination_attack, format_result
from cifar10_attack import setup_model
logging.getLogger('tensorflow').setLevel(logging.FATAL)
from functools import partial
import tensorflow as tf
from keras.utils.np_utils import to_categorical
tf.logging.set_verbosity(tf.logging.ERROR)
import os
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
class Layer(object):
def get_output_shape(self):
return self.output_shape
class Linear(Layer):
def __init__(self, num_hid, name, useBias=False):
self.__dict__.update(locals())
# self.num_hid = num_hid
def set_input_shape(self, input_shape, reuse):
# with tf.variable_scope(self.scope_name+ 'init', reuse): # this works
# with black box, but now can't load checkpoints from wb
# this works with white-box
with tf.variable_scope(self.name + '_init', reuse):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.output_shape = [batch_size, self.num_hid]
if self.useBias:
self.bias_shape = self.num_hid
init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
keep_dims=True))
self.W = tf.get_variable(
"W", initializer=init)
if self.useBias:
bias_init = tf.zeros(self.bias_shape)
self.bias = tf.get_variable("b", initializer= bias_init)
self.bias_ph = tf.placeholder(tf.float32, shape=self.bias_shape)
self.set_bias = self.bias.assign(self.bias_ph)
self.W_ph = tf.placeholder(tf.float32, shape=[dim, self.num_hid])
self.set_weight = self.W.assign(self.W_ph)
def fprop(self, x, reuse):
# with tf.variable_scope(self.scope_name + '_fprop', reuse):
# this works with white-box
with tf.variable_scope(self.name + '_fprop', reuse):
x = tf.matmul(x, self.W) # + self.b
if self.useBias:
x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
return x
FLAGS = flags.FLAGS
ATTACK_CARLINI_WAGNER_L2 = 0
ATTACK_JSMA = 1
ATTACK_FGSM = 2
ATTACK_MADRYETAL = 3
ATTACK_BASICITER = 4
MAX_BATCH_SIZE = 100
MAX_BATCH_SIZE = 100
# Scaling input to softmax
INIT_T = 1.0
# ATTACK_T = 1.0
ATTACK_T = 0.25
from cifar10_attack import data_cifar10
from cifar10_attack import build_adversarial_attack
def main(argv=None):
"""
CIFAR10 modified_cleverhans tutorial
:return:
"""
nb_classes = 2
targeted = True if FLAGS.targeted else False
batch_size = FLAGS.batch_size
nb_samples = FLAGS.nb_samples
eps = FLAGS.eps
attack = FLAGS.attack
nb_iter = FLAGS.nb_iter
ensembleThree = True if FLAGS.ensembleThree else False
sess, model, preds, x, y, phase = setup_model()
# Get CIFAR10 test data
X_train, Y_train, X_test, Y_test = data_cifar10()
del X_train, Y_train
X_test = np.transpose(X_test, (0, 3, 1, 2))
print(X_test.shape)
def run_attack(m, l, sess, attack):
for x_batch, y_batch in l:
assert len(x_batch) == 1
x_batch = x_batch.cpu().numpy()
y_batch = y_batch.cpu().numpy()
x_batch = x_batch.transpose(0, 2, 3, 1)
y_batch_oh = to_categorical(y_batch, num_classes=2)
x_batch_adv = attack(x_batch, y_batch_oh)
probs = m(x_batch_adv)
preds = probs.argmax(-1)
is_adv = preds != y_batch
return is_adv, (torch.tensor(x_batch_adv.transpose(0, 3, 1, 2), dtype=torch.float32),\
torch.tensor(probs, dtype=torch.float32))
def train_classifier(
n_features: int,
train_loader: DataLoader,
raw_train_loader: DataLoader,
logits: torch.Tensor,
device: str,
rescale_logits: LogitRescalingType,
binarized_ensemble,
set_weight_ops,
set_bias_ops,
sess,
weights_phs,
biases_phs
):
#del raw_train_loader
# fit a linear readout for each of the submodels of the ensemble
assert len(train_loader.dataset.tensors[0].shape) == 3
assert train_loader.dataset.tensors[0].shape[1] == len(weights_phs) == len(
biases_phs)
classifier_weights = []
classifier_biases = []
for i in range(3):
x_ = train_loader.dataset.tensors[0][:, i]
y_ = train_loader.dataset.tensors[1]
cls = _train_logistic_regression_classifier(
n_features,
DataLoader(TensorDataset(x_, y_), batch_size=train_loader.batch_size),
logits[:, i] if logits is not None else None,
"sklearn",
10000,
device,
n_classes=2,
rescale_logits=rescale_logits
)
classifier_weights.append(cls.weight.data.cpu().numpy().transpose())
classifier_biases.append(cls.bias.data.cpu().numpy())
# update weights of the binary models
for op, ph, v in zip(set_weight_ops, weights_phs, classifier_weights):
sess.run(op, {ph: v})
for op, ph, v in zip(set_bias_ops, biases_phs, classifier_biases):
sess.run(op, {ph: v})
""" n_corr1 = 0
n_corr2 = 0
n_total = 0
for x, y in raw_train_loader:
preds1 = binarized_model(x)
preds2 = binarized_model(x, averaged=False)
import pdb; pdb.set_trace()
n_corr1 += (preds1 == y).sum()
n_corr2 += (preds2 == y).sum()
n_total += len(x)
"""
return binarized_ensemble
from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper, \
PyTorchToTensorFlow1Wrapper
from utils import build_dataloader_from_arrays
test_loader = build_dataloader_from_arrays(X_test, Y_test, batch_size=32)
from modified_cleverhans.model import Model
class BinarizedEnsembleModel(Model):
def __init__(self, base_classifier, input_ph):
self.num_classes = 2
self.base_classifier = base_classifier
self.layer_names = []
self.layer_names.append('combined_features')
self.layer_names.append('combined_logits')
combined_layer_name = 'combined' ## Gives the final class prediction based on max voting
self.layer_names.append(combined_layer_name)
combinedCorrectProb_layer_name = 'combinedAvgCorrectProb' ## Gives average probability values of the models that decided the final prediction
self.layer_names.append(combinedCorrectProb_layer_name)
combinedProb_layer_name = 'combinedAvgProb' ## Gives average probability values of all the models
self.layer_names.append(combinedProb_layer_name)
self.readout_1 = Linear(2, "binarized_ensemble_readout_1", useBias=True)
self.readout_2 = Linear(2, "binarized_ensemble_readout_2", useBias=True)
self.readout_3 = Linear(2, "binarized_ensemble_readout_3", useBias=True)
self.readout_1.set_input_shape((-1, 64), True)
self.readout_2.set_input_shape((-1, 64), True)
self.readout_3.set_input_shape((-1, 64), True)
self.set_weight_ops = [
self.readout_1.set_weight,
self.readout_2.set_weight,
self.readout_3.set_weight
]
self.set_bias_ops = [
self.readout_1.set_bias,
self.readout_2.set_bias,
self.readout_3.set_bias,
]
self.weights_phs = [
self.readout_1.W_ph,
self.readout_2.W_ph,
self.readout_3.W_ph
]
self.biases_phs = [
self.readout_1.bias_ph,
self.readout_2.bias_ph,
self.readout_3.bias_ph
]
self.input_ph = input_ph
self.ensemble_op = self.get_ensemblepreds(self.input_ph)
self.averaged_op = self.get_combinedAvgCorrectProbs(self.input_ph)
def __call__(self, x_, averaged=True, *args, **kwargs):
return_torch = False
return_numpy = False
if isinstance(x_, torch.Tensor):
x_ = x_.cpu().numpy()
return_torch = True
if isinstance(x_, np.ndarray):
return_numpy = True
if x_.shape[1] == 3:
x_ = x_.transpose(0, 2, 3, 1)
x = self.input_ph
if averaged:
op = self.averaged_op
else:
op = self.ensemble_op
else:
raise NotImplementedError("Calling this with a tf tensor is not supported yet"
" (wasn't necessary).")
#if averaged:
# op = self.get_combinedAvgCorrectProbs(x_, *args, **kwargs)
#else:
# op = self.get_ensemblepreds(x_, *args, **kwargs)
if return_numpy or return_torch:
x_ = sess.run(op, {x: x_})
if return_torch:
x_ = torch.tensor(x_, dtype=torch.float32)
return x_
def fprop(self, x, reuse):
base_states = self.base_classifier.fprop(x, reuse)
features1 = base_states["Model1_HiddenLinear10"]
features2 = base_states["Model2_HiddenLinear10"]
features3 = base_states["Model3_HiddenLinear10"]
output1 = self.readout_1.fprop(features1, reuse)
output2 = self.readout_2.fprop(features2, reuse)
output3 = self.readout_3.fprop(features3, reuse)
states = []
states.append(tf.stack((features1, features2, features3), 1))
states.append(tf.stack((output1, output2, output3), 1))
# Find class predictions with each model
pred1 = tf.argmax(output1, axis=-1)
pred2 = tf.argmax(output2, axis=-1)
pred3 = tf.argmax(output3, axis=-1)
comb_pred = tf.stack([pred1, pred2, pred3], axis=1)
comb_pred = tf.cast(comb_pred, dtype=tf.int32) # converting to int32 as bincount requires int32
# Find how many times each of the classes are predicted among the three models and identify the max class
initial_imidx = 1
binarray = tf.bincount(comb_pred[0], minlength=self.num_classes)# initial bincount, counts number of occurences of each integer from 0 to 10 for the 1d array, returns a 1d array
max_class = tf.argmax(binarray, axis=-1)
count_max = tf.gather(binarray, max_class) # max vote count for a class
value = tf.cond(tf.less(count_max, 2), lambda: pred3[0], lambda: max_class)
in_class_array = tf.fill([1], value)
## Added below to allow better gradient calculation for max voted model
in_avgCorrectprob = tf.cond(tf.equal(value, pred3[0]), lambda: output3[0], lambda: tf.zeros_like(output3[0])) # add pred3 if it affected the final decision
in_avgCorrectprob = tf.cond(tf.equal(value, pred2[0]), lambda: tf.add(output2[0], in_avgCorrectprob), lambda: in_avgCorrectprob) # add pred2 if it affected the final decision
in_avgCorrectprob = tf.cond(tf.equal(value, pred1[0]), lambda: tf.add(output1[0], in_avgCorrectprob), lambda: in_avgCorrectprob) # add pred2 if it affected the final decision
in_avgCorrectprob_array = tf.expand_dims(tf.div(in_avgCorrectprob, tf.cast(count_max, dtype=tf.float32)), 0)
#condition check: when true the loop body executes
def idx_loop_condition(class_array, avgCorrectprob_array, im_idx):
return tf.less(im_idx, tf.shape(pred1)[0])
#loop body to calculate the max voted class for each image
def idx_loop_body(class_array, avgCorrectprob_array, im_idx):
binarray_new = tf.bincount(comb_pred[im_idx], minlength=self.num_classes) # counts number of occurences of each integer from 0 to 10 for the 1d array, returns a 1d array
max_class = tf.argmax(binarray_new, axis=-1)
count_max = tf.gather(binarray_new, max_class) # max vote count for a class
value = tf.cond(tf.less(count_max, 2), lambda: pred3[im_idx], lambda: max_class)# If the max vote is less than 2, take the prediction of the full precision model
new_array = tf.fill([1], value)
class_array = tf.concat([class_array, new_array], 0)
## Added below to allow better gradient calculation for max voted model
avgCorrectprob = tf.cond(tf.equal(value, pred3[im_idx]), lambda: output3[im_idx], lambda: tf.zeros_like(output3[im_idx])) # add pred3 if it affected the final decision
avgCorrectprob = tf.cond(tf.equal(value, pred2[im_idx]), lambda: tf.add(output2[im_idx], avgCorrectprob), lambda: avgCorrectprob) # add pred2 if it affected the final decision
avgCorrectprob = tf.cond(tf.equal(value, pred1[im_idx]), lambda: tf.add(output1[im_idx], avgCorrectprob), lambda: avgCorrectprob) # add pred2 if it affected the final decision
avgCorrectprob = tf.expand_dims(tf.div(avgCorrectprob, tf.cast(count_max, dtype=tf.float32)), 0)
avgCorrectprob_array = tf.concat([avgCorrectprob_array, avgCorrectprob], 0)
return (class_array, avgCorrectprob_array, im_idx+1)
res = tf.while_loop(
cond=idx_loop_condition,
body=idx_loop_body,
loop_vars=[in_class_array, in_avgCorrectprob_array, initial_imidx],
shape_invariants=[tf.TensorShape([None]), tf.TensorShape([None, self.num_classes]), tf.TensorShape([])], #add shape invariant saying that the first dimension of in_class_array changes and is thus None
)
pred_output = tf.cast(res[0], dtype=tf.int64) # no. of times each class is predicted for all images
states.append(pred_output)
avgCorrectprob_output = res[1] # no. of times each class is predicted for all images
states.append(avgCorrectprob_output)
avgprob = tf.div(tf.add_n([output2, output1, output3]), tf.cast(3, dtype=tf.float32)) # Average probability across all models
states.append(avgprob)
states = dict(zip(self.get_layer_names(), states))
return states
binarized_model = BinarizedEnsembleModel(model, x)
attacker, attack_params = build_adversarial_attack(
sess, binarized_model, attack,
targeted, nb_classes,
ensembleThree,
nb_samples, nb_iter, eps,
robust_attack=FLAGS.robust_attack)
base_model_outputs = model.fprop(x, reuse=True)
base_model_features = base_model_outputs["combined_features"]
base_model_logits = base_model_outputs["combined_logits"]
def _model_forward_pass(x_np, features_only=False, features_and_logits=False):
x_np = np.transpose(x_np, (0, 2, 3, 1))
if features_only:
return sess.run(base_model_features, {x : x_np})
elif features_and_logits:
targets = [base_model_features, base_model_logits]
return tuple(sess.run(targets, {x : x_np}))
else:
return sess.run(base_model_logits, {x : x_np})
feature_extractor = TensorFlow1ToPyTorchWrapper(
logit_forward_pass=_model_forward_pass,
logit_forward_and_backward_pass=None
)
y = tf.placeholder(tf.float32, shape=(None, 2))
if FLAGS.use_labels:
attack_params['y'] = y
else:
#del attack_params['y']
attack_params['y'] = tf.stop_gradient(tf.to_float(tf.one_hot(binarized_model.get_ensemblepreds(x, reuse=True), nb_classes)))
x_adv = attacker.generate(x, phase, **attack_params)
from argparse_utils import DecisionBoundaryBinarizationSettings
scores_logit_differences_and_validation_accuracies = \
interior_boundary_discrimination_attack(
feature_extractor,
test_loader,
attack_fn=lambda m, l, kwargs: run_attack(
m, l, sess, lambda x_, y_: sess.run(x_adv, {x: x_, y: y_})
),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=FLAGS.eps,
norm="linf",
lr=10000,
n_boundary_points=FLAGS.n_boundary_points,
n_inner_points=FLAGS.n_inner_points,
adversarial_attack_settings=None,
optimizer="sklearn"
),
n_samples=FLAGS.nb_samples,
device="cpu",
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
train_classifier_fn=partial(train_classifier,
binarized_ensemble=binarized_model,
set_weight_ops=binarized_model.set_weight_ops,
set_bias_ops=binarized_model.set_bias_ops,
sess=sess,
weights_phs=binarized_model.weights_phs,
biases_phs=binarized_model.biases_phs,
),
fail_on_exception=True,
rescale_logits="adaptive",
decision_boundary_closeness=0.9999,
sample_training_data_from_corners=FLAGS.sample_from_corners
)
print(format_result(scores_logit_differences_and_validation_accuracies,
FLAGS.nb_samples))
if __name__ == '__main__':
par = argparse.ArgumentParser()
# Generic flags
par.add_argument('--gpu', help='id of GPU to use')
par.add_argument('--model_path', help='Path to save or load model')
par.add_argument('--data_dir', help='Path to training data',
default='cifar10_data')
# Architecture and training specific flags
par.add_argument('--nb_epochs', type=int, default=6,
help='Number of epochs to train model')
par.add_argument('--nb_filters', type=int, default=32,
help='Number of filters in first layer')
par.add_argument('--batch_size', type=int, default=128,
help='Size of training batches')
par.add_argument('--learning_rate', type=float, default=0.001,
help='Learning rate')
par.add_argument('--rand', help='Stochastic weight layer?',
action="store_true")
# Attack specific flags
par.add_argument('--eps', type=float, default=0.1,
help='epsilon')
par.add_argument('--attack', type=int, default=0,
help='Attack type, 0=CW, 2=FGSM')
par.add_argument('--nb_samples', type=int,
default=10000, help='Nb of inputs to attack')
par.add_argument(
'--targeted', help='Run a targeted attack?', action="store_true")
# Adversarial training flags
par.add_argument(
'--adv', help='Adversarial training type?', type=int, default=0)
par.add_argument('--delay', type=int,
default=10, help='Nb of epochs to delay adv training by')
par.add_argument('--nb_iter', type=int,
default=40,
help='Nb of iterations of PGD (set to 50 for CW)')
# EMPIR specific flags
par.add_argument('--lowprecision', help='Use other low precision models',
action="store_true")
par.add_argument('--wbits', type=int, default=0,
help='No. of bits in weight representation')
par.add_argument('--abits', type=int, default=0,
help='No. of bits in activation representation')
par.add_argument('--wbitsList', type=int, nargs='+',
help='List of No. of bits in weight representation for different layers')
par.add_argument('--abitsList', type=int, nargs='+',
help='List of No. of bits in activation representation for different layers')
par.add_argument('--stocRound',
help='Stochastic rounding for weights (only in training) and activations?',
action="store_true")
par.add_argument('--model_path1',
help='Path where saved model1 is stored and can be loaded')
par.add_argument('--model_path2',
help='Path where saved model2 is stored and can be loaded')
par.add_argument('--ensembleThree',
help='Use an ensemble of full precision and two low precision models that can be attacked directly',
action="store_true")
par.add_argument('--model_path3',
help='Path where saved model3 in case of combinedThree model is stored and can be loaded')
par.add_argument('--wbits2', type=int, default=0,
help='No. of bits in weight representation of model2, model1 specified using wbits')
par.add_argument('--abits2', type=int, default=0,
help='No. of bits in activation representation of model2, model2 specified using abits')
par.add_argument('--wbits2List', type=int, nargs='+',
help='List of No. of bits in weight representation for different layers of model2')
par.add_argument('--abits2List', type=int, nargs='+',
help='List of No. of bits in activation representation for different layers of model2')
# extra flags for defensive distillation
par.add_argument('--distill', help='Train the model using distillation',
action="store_true")
par.add_argument('--student_epochs', type=int, default=50,
help='No. of epochs for which the student model is trained')
# extra flags for input gradient regularization
par.add_argument('--inpgradreg',
help='Train the model using input gradient regularization',
action="store_true")
par.add_argument('--l2dbl', type=int, default=0,
help='l2 double backprop penalty')
par.add_argument('--l2cs', type=int, default=0,
help='l2 certainty sensitivity penalty')
par.add_argument("--n-inner-points", default=999, type=int)
par.add_argument("--n-boundary-points", default=1, type=int)
par.add_argument("--robust-attack", action="store_true")
par.add_argument("--use-labels", action="store_true")
par.add_argument("--sample-from-corners", action="store_true")
FLAGS = par.parse_args()
import cifar10_attack
cifar10_attack.FLAGS = FLAGS
if FLAGS.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
tf.app.run()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A pure TensorFlow implementation of a neural network. This can be
used as a drop-in replacement for a Keras model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from modified_cleverhans.model import Model
BN_EPSILON = 1e-5
## For alexnet's local response normalization
RADIUS = 2; ALPHA = 2E-05; BETA = 0.75; BIAS = 1.0 # values copied from myalexnet_forward_newtf.py
@tf.RegisterGradient("QuantizeGrad")
def quantize_grad(op, grad):
return tf.clip_by_value(tf.identity(grad), -1, 1)
def hard_sigmoid(x):
return tf.cast(tf.clip_by_value((x + 1.) / 2., 0., 1.), tf.float32)
class MLP(Model):
"""
An example of a bare bones multilayer perceptron (MLP) class.
"""
def __init__(self, layers, input_shape):
super(MLP, self).__init__()
self.layer_names = []
self.layers = layers
self.input_shape = input_shape
if isinstance(layers[-1], Softmax):
layers[-1].name = 'probs'
layers[-2].name = 'logits'
else:
layers[-1].name = 'logits'
for i, layer in enumerate(self.layers):
if hasattr(layer, 'name'):
name = layer.name
else:
name = layer.__class__.__name__ + str(i)
self.layer_names.append(name)
layer.set_input_shape(input_shape, False)
input_shape = layer.get_output_shape()
print(self.layer_names)
def fprop(self, x, reuse, set_ref=False):
states = []
for layer in self.layers:
if set_ref:
layer.ref = x
x = layer.fprop(x, reuse)
assert x is not None
states.append(x)
states = dict(zip(self.get_layer_names(), states))
return states
# special distilled model class consisting of a teacher and a student model
class distilledModel(Model):
"""
An example of a bare bones multilayer perceptron (MLP) class.
"""
def __init__(self, teacher_layers, student_layers, input_shape):
super(distilledModel, self).__init__()
self.layer_names = []
self.teacher_layers = teacher_layers
self.student_layers = student_layers
self.input_shape = input_shape
original_input_shape = input_shape
if isinstance(teacher_layers[-1], Softmax):
teacher_layers[-1].name = 'teacher_probs'
teacher_layers[-2].name = 'teacher_logits'
else:
layers[-1].name = 'teacher_logits'
for i, layer in enumerate(self.teacher_layers):
if hasattr(layer, 'name'):
name = layer.name
else:
name = layer.__class__.__name__ + str(i)
self.layer_names.append(name)
layer.set_input_shape(input_shape, False)
input_shape = layer.get_output_shape()
input_shape = original_input_shape
if isinstance(student_layers[-1], Softmax):
student_layers[-1].name = 'probs'
student_layers[-2].name = 'logits'
else:
student_layers[-1].name = 'logits'
for i, layer in enumerate(self.student_layers):
if hasattr(layer, 'name'):
name = layer.name
else:
name = layer.__class__.__name__ + str(i)
self.layer_names.append(name)
layer.set_input_shape(input_shape, False)
input_shape = layer.get_output_shape()
print(self.layer_names)
def fprop(self, x, reuse, set_ref=False):
states = []
original_x = x
for layer in self.teacher_layers:
if set_ref:
layer.ref = x
x = layer.fprop(x, reuse)
assert x is not None
states.append(x)
x = original_x
num_student_layers = len(self.student_layers)
layer_count = 0
for layer in self.student_layers:
if set_ref:
layer.ref = x
x = layer.fprop(x, reuse)
assert x is not None
states.append(x)
layer_count = layer_count + 1
states = dict(zip(self.get_layer_names(), states))
return states
# ensembleThreeModel class build on Model class that forms ensemble of three models
class ensembleThreeModel(Model):
"""
An example ensemble model.
"""
def __init__(self, layers1, layers2, layers3, input_shape, num_classes): #layers1: layers of model1, layers2: layers of model2
super(ensembleThreeModel, self).__init__()
self.layer_names = []
self.layers1 = layers1
self.layers2 = layers2
self.layers3 = layers3
self.input_shape = input_shape
self.num_classes = num_classes
original_input_shape = input_shape
if isinstance(layers1[-1], Softmax):
layers1[-1].name = 'probs'
layers1[-2].name = 'logits'
else:
layers1[-1].name = 'logits'
# First model
for i, layer in enumerate(self.layers1):
if hasattr(layer, 'name'):
if layer.name == 'probs' or layer.name == 'logits':
name = layer.name
else:
name = 'Model1_' + layer.name
else:
name = 'Model1_' + layer.__class__.__name__ + str(i)
self.layer_names.append(name)
layer.set_input_shape(input_shape, False)
input_shape = layer.get_output_shape()
input_shape = original_input_shape
# Second model
if isinstance(layers2[-1], Softmax):
layers2[-1].name = 'probs'
layers2[-2].name = 'logits'
else:
layers2[-1].name = 'logits'
for i, layer in enumerate(self.layers2):
if hasattr(layer, 'name'):
if layer.name == 'probs' or layer.name == 'logits':
name = layer.name
else:
name = 'Model2_' + layer.name
else:
name = 'Model2_' + layer.__class__.__name__ + str(i)
self.layer_names.append(name)
layer.set_input_shape(input_shape, False)
input_shape = layer.get_output_shape()
input_shape = original_input_shape
# Third model
if isinstance(layers3[-1], Softmax):
layers3[-1].name = 'probs'
layers3[-2].name = 'logits'
else:
layers3[-1].name = 'logits'
for i, layer in enumerate(self.layers3):
if hasattr(layer, 'name'):
if layer.name == 'probs' or layer.name == 'logits':
name = layer.name
else:
name = 'Model3_' + layer.name
else:
name = 'Model3_' + layer.__class__.__name__ + str(i)
self.layer_names.append(name)
layer.set_input_shape(input_shape, False)
input_shape = layer.get_output_shape()
self.layer_names.append('combined_features')
self.layer_names.append('combined_logits')
combined_layer_name = 'combined' ## Gives the final class prediction based on max voting
self.layer_names.append(combined_layer_name)
combinedCorrectProb_layer_name = 'combinedAvgCorrectProb' ## Gives average probability values of the models that decided the final prediction
self.layer_names.append(combinedCorrectProb_layer_name)
combinedProb_layer_name = 'combinedAvgProb' ## Gives average probability values of all the models
self.layer_names.append(combinedProb_layer_name)
print(self.layer_names)
def fprop(self, x, reuse, set_ref=False):
states = []
original_x = x
for layer in self.layers1:
if set_ref:
layer.ref = x
x = layer.fprop(x, reuse)
assert x is not None
states.append(x)
output1 = states[-1]
features1 = states[-3]
x = original_x
for layer in self.layers2:
if set_ref:
layer.ref = x
x = layer.fprop(x, reuse)
assert x is not None
states.append(x)
features2 = states[-3]
output2 = states[-1]
x = original_x
for layer in self.layers3:
if set_ref:
layer.ref = x
x = layer.fprop(x, reuse)
assert x is not None
states.append(x)
output3 = states[-1]
features3 = states[-3]
states.append(tf.stack((features1, features2, features3), 1))
states.append(tf.stack((output1, output2, output3), 1))
# Find class predictions with each model
pred1 = tf.argmax(output1, axis=-1)
pred2 = tf.argmax(output2, axis=-1)
pred3 = tf.argmax(output3, axis=-1)
comb_pred = tf.stack([pred1, pred2, pred3], axis=1)
comb_pred = tf.cast(comb_pred, dtype=tf.int32) # converting to int32 as bincount requires int32
# Find how many times each of the classes are predicted among the three models and identify the max class
initial_imidx = 1
binarray = tf.bincount(comb_pred[0], minlength=self.num_classes)# initial bincount, counts number of occurences of each integer from 0 to 10 for the 1d array, returns a 1d array
max_class = tf.argmax(binarray, axis=-1)
count_max = tf.gather(binarray, max_class) # max vote count for a class
rand_idx = np.random.random_integers(3)
value = tf.cond(tf.less(count_max, 2), lambda: pred3[0], lambda: max_class)
in_class_array = tf.fill([1], value)
## Added below to allow better gradient calculation for max voted model
in_avgCorrectprob = tf.cond(tf.equal(value, pred3[0]), lambda: output3[0], lambda: tf.zeros_like(output3[0])) # add pred3 if it affected the final decision
in_avgCorrectprob = tf.cond(tf.equal(value, pred2[0]), lambda: tf.add(output2[0], in_avgCorrectprob), lambda: in_avgCorrectprob) # add pred2 if it affected the final decision
in_avgCorrectprob = tf.cond(tf.equal(value, pred1[0]), lambda: tf.add(output1[0], in_avgCorrectprob), lambda: in_avgCorrectprob) # add pred2 if it affected the final decision
in_avgCorrectprob_array = tf.expand_dims(tf.div(in_avgCorrectprob, tf.cast(count_max, dtype=tf.float32)), 0)
#condition check: when true the loop body executes
def idx_loop_condition(class_array, avgCorrectprob_array, im_idx):
return tf.less(im_idx, tf.shape(pred1)[0])
#loop body to calculate the max voted class for each image
def idx_loop_body(class_array, avgCorrectprob_array, im_idx):
binarray_new = tf.bincount(comb_pred[im_idx], minlength=self.num_classes) # counts number of occurences of each integer from 0 to 10 for the 1d array, returns a 1d array
max_class = tf.argmax(binarray_new, axis=-1)
count_max = tf.gather(binarray_new, max_class) # max vote count for a class
rand_idx = np.random.random_integers(3)
value = tf.cond(tf.less(count_max, 2), lambda: pred3[im_idx], lambda: max_class)# If the max vote is less than 2, take the prediction of the full precision model
new_array = tf.fill([1], value)
class_array = tf.concat([class_array, new_array], 0)
## Added below to allow better gradient calculation for max voted model
avgCorrectprob = tf.cond(tf.equal(value, pred3[im_idx]), lambda: output3[im_idx], lambda: tf.zeros_like(output3[im_idx])) # add pred3 if it affected the final decision
avgCorrectprob = tf.cond(tf.equal(value, pred2[im_idx]), lambda: tf.add(output2[im_idx], avgCorrectprob), lambda: avgCorrectprob) # add pred2 if it affected the final decision
avgCorrectprob = tf.cond(tf.equal(value, pred1[im_idx]), lambda: tf.add(output1[im_idx], avgCorrectprob), lambda: avgCorrectprob) # add pred2 if it affected the final decision
avgCorrectprob = tf.expand_dims(tf.div(avgCorrectprob, tf.cast(count_max, dtype=tf.float32)), 0)
avgCorrectprob_array = tf.concat([avgCorrectprob_array, avgCorrectprob], 0)
return (class_array, avgCorrectprob_array, im_idx+1)
res = tf.while_loop(
cond=idx_loop_condition,
body=idx_loop_body,
loop_vars=[in_class_array, in_avgCorrectprob_array, initial_imidx],
shape_invariants=[tf.TensorShape([None]), tf.TensorShape([None, self.num_classes]), tf.TensorShape([])], #add shape invariant saying that the first dimension of in_class_array changes and is thus None
)
pred_output = tf.cast(res[0], dtype=tf.int64) # no. of times each class is predicted for all images
states.append(pred_output)
avgCorrectprob_output = res[1] # no. of times each class is predicted for all images
states.append(avgCorrectprob_output)
avgprob = tf.div(tf.add_n([output2, output1, output3]), tf.cast(3, dtype=tf.float32)) # Average probability across all models
states.append(avgprob)
states = dict(zip(self.get_layer_names(), states))
return states
class Layer(object):
def get_output_shape(self):
return self.output_shape
class SimpleLinear(Layer):
def __init__(self, num_hid):
self.num_hid = num_hid
def set_input_shape(self, input_shape, reuse):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.output_shape = [batch_size, self.num_hid]
init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
keep_dims=True))
self.W = tf.Variable(init)
self.b = tf.Variable(np.zeros((self.num_hid,)).astype('float32'))
def fprop(self, x, reuse):
return tf.matmul(x, self.W) + self.b
class Linear(Layer):
def __init__(self, num_hid, detail, useBias=False):
self.__dict__.update(locals())
# self.num_hid = num_hid
def set_input_shape(self, input_shape, reuse):
# with tf.variable_scope(self.scope_name+ 'init', reuse): # this works
# with black box, but now can't load checkpoints from wb
# this works with white-box
with tf.variable_scope(self.detail + self.name + '_init', reuse):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.output_shape = [batch_size, self.num_hid]
if self.useBias:
self.bias_shape = self.num_hid
init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
keep_dims=True))
self.W = tf.get_variable(
"W", initializer=init)
W_summ = tf.summary.histogram('W', values=self.W)
if self.useBias:
bias_init = tf.zeros(self.bias_shape)
self.bias =tf.get_variable("b", initializer= bias_init)
def fprop(self, x, reuse):
# with tf.variable_scope(self.scope_name + '_fprop', reuse):
# this works with white-box
with tf.variable_scope(self.detail + self.name + '_fprop', reuse):
x = tf.matmul(x, self.W) # + self.b
if self.useBias:
x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
class HiddenLinear(Layer):
def __init__(self, num_hid, scope_name, useBias=False):
self.__dict__.update(locals())
def set_input_shape(self, input_shape, reuse):
with tf.variable_scope(self.scope_name+ 'init', reuse):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.output_shape = [batch_size, self.num_hid]
if self.useBias:
self.bias_shape = self.num_hid
init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
keep_dims=True))
self.W = tf.get_variable(
"W", initializer=init)
if self.useBias:
bias_init = tf.zeros(self.bias_shape)
self.bias =tf.get_variable("b", initializer= bias_init)
W_summ = tf.summary.histogram('W', values=self.W)
def fprop(self, x, reuse):
with tf.variable_scope(self.scope_name + '_fprop', reuse):
x = tf.matmul(x, self.W)
if self.useBias:
x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
class HiddenLinear_lowprecision(Layer):
# def __init__(self, num_hid, scope_name):
def __init__(self, wbits, abits, num_hid, scope_name, useBias=False):
self.__dict__.update(locals())
def quantize(self, x, k): ## k= No. of quantized bits
n = float(2**k-1) ## Max value representable with k bits
@tf.custom_gradient ## Can be used to define a custom gradient function
def _quantize(x):
return tf.round(x*n)/n, lambda dy: dy # Second part is the function evaluated during gradient, identity function
return _quantize(x)
def quantizeWt(self, x):
x = tf.tanh(x) ## Normalizing weights to [-1, 1]
x = x/tf.reduce_max(abs(x))*0.5 + 0.5 ## Normalizing weights to [0, 1]
return 2*self.quantize(x, self.wbits) - 1 ## Normalizing back to [0, 1] after quantizing
def quantizeAct(self, x):
x = tf.clip_by_value(x, 0, 1.0) ## Normalizing activations to [0, 1] --> performed in nonlin(x) function of alexnet-dorefa.py
return self.quantize(x, self.abits)
def set_input_shape(self, input_shape, reuse):
with tf.variable_scope(self.scope_name+ 'init', reuse): # this works
# with black box, but now can't load checkpoints from wb
# this works with white-box
# with tf.variable_scope(self.detail + self.name + '_init', reuse):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.output_shape = [batch_size, self.num_hid]
if self.useBias:
self.bias_shape = self.num_hid
init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
keep_dims=True))
self.W = tf.get_variable(
"W", initializer=init)
if (self.wbits < 32):
self.W = self.quantizeWt(self.W)
if self.useBias:
bias_init = tf.zeros(self.bias_shape)
self.bias =tf.get_variable("b", initializer= bias_init)
W_summ = tf.summary.histogram('W', values=self.W)
def fprop(self, x, reuse):
with tf.variable_scope(self.scope_name + '_fprop', reuse):
# this works with white-box
# with tf.variable_scope(self.detail + self.name + '_fprop', reuse):
if self.abits < 32:
x = self.quantizeAct(x)
x = tf.matmul(x, self.W) # + self.b
if self.useBias:
x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
class Conv2DRand(Layer):
def __init__(self, output_channels, kernel_shape, strides, padding, phase, scope_name):
self.__dict__.update(locals())
self.G = tf.get_default_graph()
del self.self
def quantize_rand(self, x, dist):
with self.G.gradient_override_map({"Sign": "QuantizeGrad"}):
return 2 * dist(probs=hard_sigmoid(x)).sample() - 1
def quantize(self, x):
with self.G.gradient_override_map({"Sign": "QuantizeGrad"}):
return tf.sign(x)
def set_input_shape(self, input_shape, reuse):
batch_size, rows, cols, input_channels = input_shape
kernel_shape = tuple(self.kernel_shape) + (input_channels,
self.output_channels)
assert len(kernel_shape) == 4
assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
with tf.variable_scope(self.scope_name + '_init', reuse):
init = tf.truncated_normal(
kernel_shape, stddev=0.2, dtype=tf.float32)
self.kernels = tf.get_variable("k", initializer=init)
k_summ = tf.summary.histogram(
name="k", values=self.kernels)
from tensorflow.contrib.distributions import MultivariateNormalDiag
with self.G.gradient_override_map({"MultivariateNormalDiag": "QuantizeGrad"}):
self.kernels = MultivariateNormalDiag(
loc=self.kernels).sample()
k_rand_summ = tf.summary.histogram(
name="k_rand", values=self.kernels)
orig_input_batch_size = input_shape[0]
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch, False)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = 1
self.output_shape = tuple(output_shape)
def fprop(self, x, reuse):
# need variable_scope here because the batch_norm layer creates
# variables internally
with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
x = tf.nn.conv2d(x, self.kernels, (1,) +
tuple(self.strides) + (1,), self.padding)
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
class Conv2D(Layer):
def __init__(self, output_channels, kernel_shape, strides, padding, phase, scope_name, useBias=False):
self.__dict__.update(locals())
self.G = tf.get_default_graph()
del self.self
def quantize(self, x):
with self.G.gradient_override_map({"Sign": "QuantizeGrad"}):
return tf.sign(x)
def set_input_shape(self, input_shape, reuse):
batch_size, rows, cols, input_channels = input_shape
kernel_shape = tuple(self.kernel_shape) + (input_channels,
self.output_channels)
assert len(kernel_shape) == 4
assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
with tf.variable_scope(self.scope_name + '_init', reuse):
init = tf.truncated_normal(
kernel_shape, stddev=0.1, dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
axis=(0, 1, 2)))
self.kernels = tf.get_variable("k", initializer=init)
k_summ = tf.summary.histogram(
name="k", values=self.kernels)
orig_input_batch_size = input_shape[0]
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
# Set output shape using fprop without bias if useBias set
if self.useBias:
dummy_output = self.fprop_withoutbias(dummy_batch, False)
else: #--default below
dummy_output = self.fprop(dummy_batch, False)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = 1
self.output_shape = tuple(output_shape)
if self.useBias:
self.bias_shape = self.output_shape
bias_init = tf.zeros(self.bias_shape)
self.bias =tf.get_variable("b", initializer= bias_init)
def fprop(self, x, reuse):
# need variable_scope here because the batch_norm layer creates
# variables internally
with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
x = tf.nn.conv2d(x, self.kernels, (1,) +
tuple(self.strides) + (1,), self.padding)
if self.useBias:
output_shape = tf.shape(x) # Checking output shape before bias
x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
x = tf.reshape(x, output_shape)
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
# special function without bias to get output shape
def fprop_withoutbias(self, x, reuse):
# need variable_scope here because the batch_norm layer creates
# variables internally
with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
x = tf.nn.conv2d(x, self.kernels, (1,) +
tuple(self.strides) + (1,), self.padding)
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
class Conv2D_lowprecision(Layer):
def __init__(self, wbits, abits, output_channels, kernel_shape, strides, padding, phase, scope_name, seed=1, useBatchNorm=False, stocRound=False, useBias=False):
self.__dict__.update(locals())
self.G = tf.get_default_graph()
del self.self
def quantize(self, x, k): ## k= No. of quantized bits
n = float(2**k-1) ## Max value representable with k bits
@tf.custom_gradient ## Can be used to define a custom gradient function
def _quantize(x):
if self.stocRound: # If stochastic rounding is set
xn_int = tf.floor(x*n) # Get integer part
xn_frac = tf.subtract(x*n, xn_int) # Get fractional part
xn_frac_rand = tf.distributions.Bernoulli(probs=xn_frac, dtype=tf.float32).sample() # Get random number from bernoulli distribution with prob=fractional part value
x_q = (xn_int + xn_frac_rand)/n
return x_q, lambda dy: dy # Second part is the function evaluated during gradient, identity function
else:
return tf.round(x*n)/n, lambda dy: dy # Second part is the function evaluated during gradient, identity function
return _quantize(x)
def quantizeWt(self, x):
x = tf.tanh(x) ## Normalizing weights to [-1, 1]
x = x/tf.reduce_max(abs(x))*0.5 + 0.5 ## Normalizing weights to [0, 1]
return 2*self.quantize(x, self.wbits) - 1 ## Normalizing back to [0, 1] after quantizing
def quantizeAct(self, x):
x = tf.clip_by_value(x, 0, 1.0) ## Normalizing activations to [0, 1] --> performed in nonlin(x) function of alexnet-dorefa.py
return self.quantize(x, self.abits)
def set_input_shape(self, input_shape, reuse):
batch_size, rows, cols, input_channels = input_shape
kernel_shape = tuple(self.kernel_shape) + (input_channels,
self.output_channels)
assert len(kernel_shape) == 4
assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
with tf.variable_scope(self.scope_name + '_init', reuse):
if self.wbits < 32:
init = tf.truncated_normal(
kernel_shape, stddev=0.2, dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
axis=(0, 1, 2)))
self.kernels = tf.get_variable("k", initializer=init)
else:
init = tf.truncated_normal(
kernel_shape, stddev=0.1, dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
axis=(0, 1, 2)))
self.kernels = tf.get_variable("k", initializer=init)
if (self.wbits < 32): ## Quantize if no. of bits less than 32
self.kernels = self.quantizeWt(self.kernels)
k_bin_summ = tf.summary.histogram(
name="k_bin", values=self.kernels)
k_summ = tf.summary.histogram(
name="k", values=self.kernels)
orig_input_batch_size = input_shape[0]
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
# Set output shape using fprop without bias if useBias set
if self.useBias:
dummy_output = self.fprop_withoutbias(dummy_batch, False)
else: #--default below
dummy_output = self.fprop(dummy_batch, False)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = 1
self.output_shape = tuple(output_shape)
# setting bias shape
if self.useBias:
self.bias_shape = self.output_shape
bias_init = tf.zeros(self.bias_shape)
self.bias =tf.get_variable("b", initializer= bias_init)
if self.wbits < 32: ## Quantize if no. of bits less than 32
self.bias =self.quantizeWt(self.bias)
def fprop(self, x, reuse):
# need variable_scope here because the batch_norm layer creates
# variables internally
with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
if self.abits < 32:
if self.useBatchNorm: ## Specifies whether we want to use Batch Normalization or not
x = tf.contrib.layers.batch_norm(
x, epsilon=BN_EPSILON, is_training=self.phase,
reuse=reuse, scope=scope)
x = self.quantizeAct(x)
x = tf.nn.conv2d(x, self.kernels, (1,) +
tuple(self.strides) + (1,), self.padding)
if self.useBias:
output_shape = tf.shape(x) # Checking output shape before bias
x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
x = tf.reshape(x, output_shape)
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
# special function without bias to get output shape
def fprop_withoutbias(self, x, reuse):
# need variable_scope here because the batch_norm layer creates
# variables internally
with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
x = tf.nn.conv2d(x, self.kernels, (1,) +
tuple(self.strides) + (1,), self.padding)
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
class Conv2DGroup(Layer):
def __init__(self, output_channels, kernel_shape, strides, padding, phase, scope_name, useBias=False):
self.__dict__.update(locals())
self.G = tf.get_default_graph()
del self.self
def quantize(self, x):
with self.G.gradient_override_map({"Sign": "QuantizeGrad"}):
return tf.sign(x)
def set_input_shape(self, input_shape, reuse):
self.input_shape = input_shape
batch_size, rows, cols, input_channels = input_shape
self.input_channels = input_channels
kernel_shape = tuple(self.kernel_shape) + (int(input_channels/2),
self.output_channels) # as it is 2 groups, input channel dimension is halved
assert len(kernel_shape) == 4
assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
with tf.variable_scope(self.scope_name + '_init', reuse):
init = tf.variance_scaling_initializer(scale=2., dtype=tf.float32)
self.kernels = tf.get_variable("k", shape=kernel_shape, initializer=init)
k_summ = tf.summary.histogram(
name="k", values=self.kernels)
orig_input_batch_size = input_shape[0]
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
if self.useBias:
dummy_output = self.fprop_withoutbias(dummy_batch, False)
else: #--default below
dummy_output = self.fprop(dummy_batch, False)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = 1
self.output_shape = tuple(output_shape)
# setting bias shape
self.bias_shape = self.output_shape
# initializing bias
if self.useBias:
bias_init = tf.zeros(self.bias_shape)
self.bias =tf.get_variable("b", initializer= bias_init)
def fprop(self, x, reuse):
# need variable_scope here because the batch_norm layer creates
# variables internally
with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
### groupwise convolution
x1 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.input_channels/2, tf.int32)]), tf.slice(self.kernels, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.output_channels/2, tf.int32)]), (1,) + tuple(self.strides) + (1,), self.padding)
x2 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, tf.cast(self.input_channels/2, tf.int32)], [-1, -1, -1, -1]), tf.slice(self.kernels, [0, 0, 0, (tf.cast(self.output_channels/2, tf.int32))], [-1, -1, -1, -1]), (1,) + tuple(self.strides) + (1,), self.padding)
x = tf.concat([x1, x2], 3)
# adding bias
if self.useBias:
output_shape = tf.shape(x) # Checking output shape before bias
x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
if self.padding=="SAME": # Padding same means input and output size equal
x = tf.reshape(x, output_shape)
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
# Special function without bias to get output shape
def fprop_withoutbias(self, x, reuse):
# need variable_scope here because the batch_norm layer creates
# variables internally
with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
### groupwise convolution
x1 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.input_channels/2, tf.int32)]), tf.slice(self.kernels, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.output_channels/2, tf.int32)]), (1,) + tuple(self.strides) + (1,), self.padding)
x2 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, tf.cast(self.input_channels/2, tf.int32)], [-1, -1, -1, -1]), tf.slice(self.kernels, [0, 0, 0, (tf.cast(self.output_channels/2, tf.int32))], [-1, -1, -1, -1]), (1,) + tuple(self.strides) + (1,), self.padding)
x = tf.concat([x1, x2], 3)
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
class Conv2DGroup_lowprecision(Layer):
def __init__(self, wbits, abits, output_channels, kernel_shape, strides, padding, phase, scope_name, seed=1, useBatchNorm=False, stocRound=False, useBias=False):
self.__dict__.update(locals())
self.G = tf.get_default_graph()
del self.self
def quantize(self, x, k): ## k= No. of quantized bits
n = float(2**k-1) ## Max value representable with k bits
@tf.custom_gradient ## Can be used to define a custom gradient function
def _quantize(x):
if self.stocRound: # If stochastic rounding is set
xn_int = tf.floor(x*n) # Get integer part
xn_frac = tf.subtract(x*n, xn_int) # Get fractional part
xn_frac_rand = tf.distributions.Bernoulli(probs=xn_frac, dtype=tf.float32).sample() # Get random number from bernoulli distribution with prob=fractional part value
x_q = (xn_int + xn_frac_rand)/n
return x_q, lambda dy: dy # Second part is the function evaluated during gradient, identity function
else:
return tf.round(x*n)/n, lambda dy: dy # Second part is the function evaluated during gradient, identity function
return _quantize(x)
def quantizeWt(self, x):
x = tf.tanh(x) ## Normalizing weights to [-1, 1]
x = x/tf.reduce_max(abs(x))*0.5 + 0.5 ## Normalizing weights to [0, 1]
return 2*self.quantize(x, self.wbits) - 1 ## Normalizing back to [0, 1] after quantizing
def quantizeAct(self, x):
x = tf.clip_by_value(x, 0, 1.0) ## Normalizing activations to [0, 1] --> performed in nonlin(x) function of alexnet-dorefa.py
return self.quantize(x, self.abits)
def set_input_shape(self, input_shape, reuse):
self.input_shape = input_shape
batch_size, rows, cols, input_channels = input_shape
self.input_channels = input_channels
kernel_shape = tuple(self.kernel_shape) + (int(input_channels/2),
self.output_channels) # as it is 2 groups, input channel dimension is halved
assert len(kernel_shape) == 4
assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
with tf.variable_scope(self.scope_name + '_init', reuse):
if self.wbits < 32:
init = tf.truncated_normal(
kernel_shape, stddev=0.2, dtype=tf.float32)
self.kernels = tf.get_variable("k", initializer=init)
else:
init = tf.truncated_normal(
kernel_shape, stddev=0.1, dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
axis=(0, 1, 2)))
self.kernels = tf.get_variable("k", initializer=init)
if (self.wbits < 32): ## Quantize if no. of bits less than 32
self.kernels = self.quantizeWt(self.kernels)
k_bin_summ = tf.summary.histogram(
name="k_bin", values=self.kernels)
k_summ = tf.summary.histogram(
name="k", values=self.kernels)
orig_input_batch_size = input_shape[0]
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
# Set output shape using fprop without bias if useBias set
if self.useBias:
dummy_output = self.fprop_withoutbias(dummy_batch, False)
else: #--default below
dummy_output = self.fprop(dummy_batch, False)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = 1
self.output_shape = tuple(output_shape)
self.bias_shape = self.output_shape
if self.useBias:
bias_init = tf.zeros(self.bias_shape)
self.bias =tf.get_variable("b", initializer= bias_init)
if self.wbits < 32: ## Quantize if no. of bits less than 32
self.bias =self.quantizeWt(self.bias)
def fprop(self, x, reuse):
with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
if self.abits < 32:
if self.useBatchNorm: ## Specifies whether we want to use Batch Normalization or not
x = tf.contrib.layers.batch_norm(
x, epsilon=BN_EPSILON, is_training=self.phase,
reuse=reuse, scope=scope)
x = self.quantizeAct(x)
### groupwise convolution
x1 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.input_channels/2, tf.int32)]), tf.slice(self.kernels, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.output_channels/2, tf.int32)]), (1,) + tuple(self.strides) + (1,), self.padding)
x2 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, tf.cast(self.input_channels/2, tf.int32)], [-1, -1, -1, -1]), tf.slice(self.kernels, [0, 0, 0, (tf.cast(self.output_channels/2, tf.int32))], [-1, -1, -1, -1]), (1,) + tuple(self.strides) + (1,), self.padding)
x = tf.concat([x1, x2], 3)
if self.useBias:
output_shape = tf.shape(x) # Checking output shape before bias
x = tf.nn.bias_add(tf.contrib.layers.flatten(x), tf.reshape(self.bias, [-1]))
if self.padding=="SAME": # Padding same means input and output size equal
x = tf.reshape(x, output_shape)
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
# Special function without bias to get output shape
def fprop_withoutbias(self, x, reuse):
with tf.variable_scope(self.scope_name + '_fprop', reuse=reuse) as scope:
if self.abits < 32:
if self.useBatchNorm: ## Specifies whether we want to use Batch Normalization or not
x = tf.contrib.layers.batch_norm(
x, epsilon=BN_EPSILON, is_training=self.phase,
reuse=reuse, scope=scope)
x = self.quantizeAct(x)
### groupwise convolution
x1 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.input_channels/2, tf.int32)]), tf.slice(self.kernels, [0, 0, 0, 0], [-1, -1, -1, tf.cast(self.output_channels/2, tf.int32)]), (1,) + tuple(self.strides) + (1,), self.padding)
x2 = tf.nn.conv2d(tf.slice(x, [0, 0, 0, tf.cast(self.input_channels/2, tf.int32)], [-1, -1, -1, -1]), tf.slice(self.kernels, [0, 0, 0, (tf.cast(self.output_channels/2, tf.int32))], [-1, -1, -1, -1]), (1,) + tuple(self.strides) + (1,), self.padding)
x = tf.concat([x1, x2], 3)
a_u, a_v = tf.nn.moments(tf.abs(x), axes=[0], keep_dims=False)
a_summ = tf.summary.histogram('a', values=x)
a_u_summ = tf.summary.scalar("a_u", tf.reduce_mean(a_u))
a_v_summ = tf.summary.scalar("a_v", tf.reduce_mean(a_v))
return x
class MaxPool(Layer):
def __init__ (self, pool_size, strides):
self.pool_size = pool_size
self.strides = strides
def set_input_shape(self, input_shape, reuse):
self.input_shape = input_shape
orig_input_batch_size = input_shape[0]
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch, False)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = 1
self.output_shape = tuple(output_shape)
def fprop(self, x, reuse):
return tf.layers.max_pooling2d(x, self.pool_size, self.strides)
class MaxPoolSame(Layer):
def __init__ (self, pool_size, strides):
self.pool_size = pool_size
self.strides = strides
def set_input_shape(self, input_shape, reuse):
self.input_shape = input_shape
orig_input_batch_size = input_shape[0]
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch, False)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = 1
self.output_shape = tuple(output_shape)
def fprop(self, x, reuse):
return tf.layers.max_pooling2d(x, self.pool_size, self.strides, padding='same')
class AvgPool(Layer):
def __init__ (self, pool_size, strides):
self.pool_size = pool_size
self.strides = strides
def set_input_shape(self, input_shape, reuse):
self.input_shape = input_shape
orig_input_batch_size = input_shape[0]
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch, False)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = 1
self.output_shape = tuple(output_shape)
def fprop(self, x, reuse):
return tf.layers.average_pooling2d(x, self.pool_size, self.strides)
class ReLU(Layer):
def __init__(self):
pass
def set_input_shape(self, shape, reuse):
self.input_shape = shape
self.output_shape = shape
def get_output_shape(self):
return self.output_shape
def fprop(self, x, reuse):
return tf.nn.relu(x)
class SReLU(Layer):
def __init__(self, scope_name):
self.scope_name = scope_name
pass
def set_input_shape(self, shape, reuse):
self.input_shape = shape
self.output_shape = shape
with tf.variable_scope(self.scope_name + '_init', reuse=reuse):
self.activation_scalar = tf.get_variable(
"activation_scalar", initializer=0.05, trainable=True)
def get_output_shape(self):
return self.output_shape
def fprop(self, x, reuse):
with tf.variable_scope(self.scope_name + '_init', reuse=reuse):
return tf.nn.relu(x) * self.activation_scalar
class Softmax(Layer):
def __init__(self, temperature):
self.temperature = temperature
def set_input_shape(self, shape, reuse):
self.input_shape = shape
self.output_shape = shape
def fprop(self, x, reuse):
return tf.nn.softmax(x * self.temperature)
class SoftmaxT1(Layer):
def __init__(self):
pass
def set_input_shape(self, shape, reuse):
self.input_shape = shape
self.output_shape = shape
def fprop(self, x, reuse):
return tf.nn.softmax(x)
class Flatten(Layer):
def __init__(self):
pass
def set_input_shape(self, shape, reuse):
self.input_shape = shape
output_width = 1
for factor in shape[1:]:
output_width *= factor
self.output_width = output_width
self.output_shape = [None, output_width]
def fprop(self, x, reuse):
return tf.reshape(x, [-1, self.output_width])
# Local response Norm layer for AlexNet
class LocalNorm(Layer):
def __init__(self):
self.__dict__.update(locals())
def set_input_shape(self, shape, reuse):
self.input_shape = shape
self.output_shape = shape
def get_output_shape(self):
return self.output_shape
def fprop(self, x, reuse):
x = tf.nn.local_response_normalization(x,
depth_radius=RADIUS,
alpha=ALPHA,
beta=BETA,
bias=BIAS)
return x
# BatchNorm layer for low precision alexnet
class BatchNorm(Layer):
def __init__(self, phase, scope_name, mean=None, variance=None, scale=None, offset=None):
self.__dict__.update(locals())
def set_input_shape(self, shape, reuse):
self.input_shape = shape
self.output_shape = shape
def get_output_shape(self):
return self.output_shape
def fprop(self, x, reuse):
# Batch normalization for the training phase
if (self.mean is None) and (self.variance is None) and (self.scale is None) and (self.offset is None):
with tf.variable_scope(self.scope_name, reuse=tf.AUTO_REUSE): # Adding scope here to help in restoring variables, Saves and restores model
x = tf.layers.batch_normalization(x, training=self.phase)
else:
x = tf.nn.batch_normalization(
x, mean=self.mean, variance=self.variance,
scale=self.scale, offset=self.offset, variance_epsilon=BN_EPSILON)
return x
## dropout layer for alexnet
class DropOut(Layer):
def __init__(self, keep_prob, phase):
self.__dict__.update(locals())
self.G = tf.get_default_graph()
del self.self
def set_input_shape(self, shape, reuse):
self.input_shape = shape
self.output_shape = shape
def get_output_shape(self):
return self.output_shape
def fprop(self, x, reuse):
return tf.cond(self.phase, lambda: tf.nn.dropout(x, self.keep_prob), lambda: tf.identity(x)) # Dropout during training phase but not during test phase
######################### full-precision #########################
def make_basic_cnn(phase, temperature, detail, nb_filters=64, nb_classes=10,
input_shape=(None, 28, 28, 1)):
layers = [Conv2D(nb_filters, (8, 8), (2, 2), "SAME", phase, detail + 'conv1'),
ReLU(),
Conv2D(nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail + 'conv2'),
ReLU(),
Conv2D(nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail + 'conv3'),
ReLU(),
Flatten(),
Linear(nb_classes, detail),
Softmax(temperature)]
model = MLP(layers, input_shape)
print('Finished making basic cnn')
return model
def make_scaled_rand_cnn(phase, temperature, detail, nb_filters=64, nb_classes=10,
input_shape=(None, 28, 28, 1)):
layers = [Conv2D(nb_filters, (8, 8), (2, 2), "SAME", phase, detail + 'conv1'),
ReLU(),
Conv2D(nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail + 'conv2'),
ReLU(),
Conv2DRand(nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail + 'conv3'),
SReLU(detail + 'srelu3_fp'),
Flatten(),
Linear(nb_classes, detail),
Softmax(temperature)]
model = MLP(layers, input_shape)
print('Finished making basic cnn')
return model
# distilled model
def make_distilled_cnn(phase, temperature, detail1, detail2, nb_filters=64, nb_classes=10, input_shape=(None, 28, 28, 1)):
# make one teacher low precision cnn with wbits precision weights and abits activations
teacher_layers = [Conv2D(nb_filters, (8, 8),
(2, 2), "SAME", phase, detail1 + 'conv1'),
ReLU(),
Conv2D(nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail1 + 'conv2_bin'),
ReLU(),
Conv2D(nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail1 + 'conv3_bin'),
ReLU(),
Flatten(),
Linear(nb_classes, detail1),
Softmax(temperature)] # Hard probs (default)
# make one student low precision cnn with wbits precision weights and abits activations
student_layers = [Conv2D(nb_filters, (8, 8),
(2, 2), "SAME", phase, detail2 + 'conv1'),
ReLU(),
Conv2D(nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail2 + 'conv2'),
ReLU(),
Conv2D(nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail2 + 'conv3'),
ReLU(),
Flatten(),
Linear(nb_classes, detail2),
Softmax(temperature)] # Hard probs (default)
model = distilledModel(teacher_layers, student_layers, input_shape)
print('Finished making a distilled cnn')
return model
################## low precision version of mnist cnn #################
def make_basic_lowprecision_cnn(phase, temperature, detail, wbits, abits, nb_filters=64, nb_classes=10,
input_shape=(None, 28, 28, 1), useBatchNorm=False, stocRound=False):
layers = [Conv2D_lowprecision(wbits, abits, nb_filters, (8, 8),
(2, 2), "SAME", phase, detail + 'conv1', useBatchNorm=useBatchNorm, stocRound=stocRound),
ReLU(),
Conv2D_lowprecision(wbits, abits, nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail + 'conv2_bin', useBatchNorm=useBatchNorm, stocRound=stocRound),
ReLU(),
Conv2D_lowprecision(wbits, abits, nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail + 'conv3_bin', useBatchNorm=useBatchNorm, stocRound=stocRound),
ReLU(),
Flatten(),
Linear(nb_classes, detail),
Softmax(temperature)]
model = MLP(layers, input_shape)
print('Finished making basic low precision cnn: %d weight bits, %d activation bits' %(wbits, abits))
return model
# Variant of low precision supporting different precisions for different layers
def make_layerwise_lowprecision_cnn(phase, temperature, detail, wbits, abits, nb_filters=64,
nb_classes=10, input_shape=(None, 28, 28, 1),
useBatchNorm=False, stocRound=False):
layers = [Conv2D_lowprecision(wbits[0], abits[0], nb_filters, (8, 8),
(2, 2), "SAME", phase, detail + 'conv1', useBatchNorm=useBatchNorm, stocRound=stocRound),
ReLU(),
Conv2D_lowprecision(wbits[1], abits[1], nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail + 'conv2_bin', useBatchNorm=useBatchNorm, stocRound=stocRound),
ReLU(),
Conv2D_lowprecision(wbits[2], abits[2], nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail + 'conv3_bin', useBatchNorm=useBatchNorm, stocRound=stocRound),
ReLU(),
Flatten(),
Linear(nb_classes, detail),
Softmax(temperature)]
model = MLP(layers, input_shape)
print('Finished making layerwise low precision cnn: %d %d %d weight bits, %d %d %d activation bits' %(wbits[0], wbits[1], wbits[2], abits[0], abits[1], abits[2]))
return model
################## EMPIR version of mnist cnn #################
def make_ensemble_three_cnn(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=64, nb_classes=10, input_shape=(None, 28, 28, 1), useBatchNorm=False):
# make one low precision cnn with wbits precision weights and abits activations
layers1 = [Conv2D_lowprecision(wbits1, abits1, nb_filters, (8, 8),
(2, 2), "SAME", phase, detail1 + 'conv1', useBatchNorm=useBatchNorm),
ReLU(),
Conv2D_lowprecision(wbits1, abits1, nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail1 + 'conv2_bin', useBatchNorm=useBatchNorm),
ReLU(),
Conv2D_lowprecision(wbits1, abits1, nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail1 + 'conv3_bin', useBatchNorm=useBatchNorm),
ReLU(),
Flatten(),
Linear(nb_classes, detail1),
Softmax(temperature)]
# make another low precision cnn with wbits precision weights and abits activations
layers2 = [Conv2D_lowprecision(wbits2, abits2, nb_filters, (8, 8),
(2, 2), "SAME", phase, detail2 + 'conv1', useBatchNorm=useBatchNorm),
ReLU(),
Conv2D_lowprecision(wbits2, abits2, nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail2 + 'conv2_bin', useBatchNorm=useBatchNorm),
ReLU(),
Conv2D_lowprecision(wbits2, abits2, nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail2 + 'conv3_bin', useBatchNorm=useBatchNorm),
ReLU(),
Flatten(),
Linear(nb_classes, detail2),
Softmax(temperature)]
# make a full precision cnn with full precision weights and a bits activations
layers3 = [Conv2D(nb_filters, (8, 8), (2, 2), "SAME", phase, detail3 + 'conv1'),
ReLU(),
Conv2D(nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail3 + 'conv2'),
ReLU(),
Conv2D(nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail3 + 'conv3'),
ReLU(),
Flatten(),
Linear(nb_classes, detail3),
Softmax(temperature)]
model = ensembleThreeModel(layers1, layers2, layers3, input_shape, nb_classes)
print('Finished making ensemble of three cnns')
return model
def make_ensemble_three_cnn_layerwise(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=64, nb_classes=10, input_shape=(None, 28, 28, 1), useBatchNorm=False):
# make one low precision cnn with wbits precision weights and abits activations
layers1 = [Conv2D_lowprecision(wbits1[0], abits1[0], nb_filters, (8, 8),
(2, 2), "SAME", phase, detail1 + 'conv1', useBatchNorm=useBatchNorm),
ReLU(),
Conv2D_lowprecision(wbits1[1], abits1[1], nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail1 + 'conv2_bin', useBatchNorm=useBatchNorm),
ReLU(),
Conv2D_lowprecision(wbits1[2], abits1[2], nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail1 + 'conv3_bin', useBatchNorm=useBatchNorm),
ReLU(),
Flatten(),
Linear(nb_classes, detail1),
Softmax(temperature)]
# make another low precision cnn with wbits precision weights and abits activations
layers2 = [Conv2D_lowprecision(wbits2[0], abits2[0], nb_filters, (8, 8),
(2, 2), "SAME", phase, detail2 + 'conv1', useBatchNorm=useBatchNorm),
ReLU(),
Conv2D_lowprecision(wbits2[1], abits2[1], nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail2 + 'conv2_bin', useBatchNorm=useBatchNorm),
ReLU(),
Conv2D_lowprecision(wbits2[2], abits2[2], nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail2 + 'conv3_bin', useBatchNorm=useBatchNorm),
ReLU(),
Flatten(),
Linear(nb_classes, detail2),
Softmax(temperature)]
# make a full precision cnn with full precision weights and a bits activations
layers3 = [Conv2D(nb_filters, (8, 8), (2, 2), "SAME", phase, detail3 + 'conv1'),
ReLU(),
Conv2D(nb_filters * 2, (6, 6),
(2, 2), "VALID", phase, detail3 + 'conv2'),
ReLU(),
Conv2D(nb_filters * 2, (5, 5),
(1, 1), "VALID", phase, detail3 + 'conv3'),
ReLU(),
Flatten(),
Linear(nb_classes, detail3),
Softmax(temperature)]
model = ensembleThreeModel(layers1, layers2, layers3, input_shape, avg, weightedAvg, alpha, nb_classes)
print('Finished making ensemble of three cnns')
return model
################# full-precision cifar cnn ############################
def make_basic_cifar_cnn(phase, temperature, detail, nb_filters=32, nb_classes=10,
input_shape=(None, 28, 28, 1)):
layers = [Conv2D(nb_filters, (5, 5), (1, 1), "SAME", phase, detail + 'conv1'),
MaxPool((3, 3), (2, 2)),
ReLU(),
Conv2D(nb_filters, (5, 5),
(1, 1), "SAME", phase, detail + 'conv2'),
ReLU(),
AvgPool((3, 3), (2, 2)),
Conv2D(nb_filters * 2, (5, 5),
(1, 1), "SAME", phase, detail + 'conv3'),
ReLU(),
AvgPool((3, 3), (2, 2)),
Flatten(),
HiddenLinear(64, detail + 'ip1'),
Linear(nb_classes, detail),
Softmax(temperature)]
model = MLP(layers, input_shape)
print('Finished making basic cnn')
return model
################## distilled version of cifar cnn #################
def make_distilled_cifar_cnn(phase, temperature, detail1, detail2, nb_filters=32, nb_classes=10,
input_shape=(None, 28, 28, 1)):
teacher_layers = [Conv2D(nb_filters, (5, 5), (1, 1), "SAME", phase, detail1 + 'conv1'),
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Conv2D(nb_filters, (5, 5),
(1, 1), "SAME", phase, detail1 + 'conv2'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Conv2D(nb_filters * 2, (5, 5),
(1, 1), "SAME", phase, detail1 + 'conv3'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Flatten(),
HiddenLinear(64, detail1 + 'ip1'),
Linear(nb_classes, detail1),
Softmax(temperature)]
student_layers = [Conv2D(nb_filters, (5, 5), (1, 1), "SAME", phase, detail2 + 'conv1'),
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Conv2D(nb_filters, (5, 5),
(1, 1), "SAME", phase, detail2 + 'conv2'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Conv2D(nb_filters * 2, (5, 5),
(1, 1), "SAME", phase, detail2 + 'conv3'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Flatten(),
HiddenLinear(64, detail2 + 'ip1'),
Linear(nb_classes, detail2),
Softmax(temperature)]
model = distilledModel(teacher_layers, student_layers, input_shape)
print('Finished making distilled cifar cnn')
return model
################## low precision version of cifar cnn #################
def make_basic_lowprecision_cifar_cnn(phase, temperature, detail, wbits, abits, nb_filters=64, nb_classes=10,
input_shape=(None, 28, 28, 1), stocRound=False):
layers = [Conv2D_lowprecision(wbits, abits, nb_filters, (5, 5), (1, 1), "SAME", phase, detail + 'conv1', stocRound=stocRound), # VALID padding means no padding, SAME means padding by (k-1)/2
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Conv2D_lowprecision(wbits, abits, nb_filters, (5, 5),
(1, 1), "SAME", phase, detail + 'conv2', stocRound=stocRound),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Conv2D_lowprecision(wbits, abits, nb_filters * 2, (5, 5),
(1, 1), "SAME", phase, detail + 'conv3', stocRound=stocRound),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Flatten(),
HiddenLinear(64, detail + 'ip1'),
Linear(nb_classes, detail),
Softmax(temperature)]
model = MLP(layers, input_shape)
print('Finished making basic low precision cnn: %d weight bits, %d activation bits' %(wbits, abits))
return model
def make_layerwise_lowprecision_cifar_cnn(phase, temperature, detail, wbits, abits, nb_filters=64, nb_classes=10,
input_shape=(None, 28, 28, 1), stocRound=False):
layers = [Conv2D_lowprecision(wbits[0], abits[0], nb_filters, (5, 5), (1, 1), "SAME", phase, detail + 'conv1', stocRound=stocRound),
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Conv2D_lowprecision(wbits[1], abits[1], nb_filters, (5, 5),
(1, 1), "SAME", phase, detail + 'conv2', stocRound=stocRound),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Conv2D_lowprecision(wbits[2], abits[2], nb_filters * 2, (5, 5),
(1, 1), "SAME", phase, detail + 'conv3', stocRound=stocRound),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Flatten(),
HiddenLinear(64, detail + 'ip1'), # first f.c. layer
Linear(nb_classes, detail),
Softmax(temperature)]
model = MLP(layers, input_shape)
print('Finished making layerwise low precision cnn %d %d %d weight bits, %d %d %d activation bits' %(wbits[0], wbits[1], wbits[2], abits[0], abits[1], abits[2]))
return model
################## EMPIR version of cifar cnn #################
def make_ensemble_three_cifar_cnn(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=32, nb_classes=10, input_shape=(None, 28, 28, 1)):
# make a low precision cnn with full precision weights and a bits activations
layers1 = [Conv2D_lowprecision(wbits1, abits1, nb_filters, (5, 5), (1, 1), "SAME", phase, detail1 + 'conv1'), # VALID padding means no padding, SAME means padding by (k-1)/2
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Conv2D_lowprecision(wbits1, abits1, nb_filters, (5, 5),
(1, 1), "SAME", phase, detail1 + 'conv2'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride ReLU(),
Conv2D_lowprecision(wbits1, abits1, nb_filters * 2, (5, 5),
(1, 1), "SAME", phase, detail1 + 'conv3'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Flatten(),
HiddenLinear(64, detail1 + 'ip1'), # first f.c. layer
Linear(nb_classes, detail1),
Softmax(temperature)]
# make a low precision cnn with full precision weights and a bits activations
layers2 = [Conv2D_lowprecision(wbits2, abits2, nb_filters, (5, 5), (1, 1), "SAME", phase, detail2 + 'conv1'), # VALID padding means no padding, SAME means padding by (k-1)/2
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Conv2D_lowprecision(wbits2, abits2, nb_filters, (5, 5),
(1, 1), "SAME", phase, detail2 + 'conv2'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride ReLU(),
Conv2D_lowprecision(wbits2, abits2, nb_filters * 2, (5, 5),
(1, 1), "SAME", phase, detail2 + 'conv3'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Flatten(),
HiddenLinear(64, detail2 + 'ip1'), # first f.c. layer
Linear(nb_classes, detail2),
Softmax(temperature)]
# make a full precision cnn with full precision weights and a bits activations
layers3 = [Conv2D(nb_filters, (5, 5), (1, 1), "SAME", phase, detail3 + 'conv1'), # VALID padding means no padding, SAME means padding by (k-1)/2
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Conv2D(nb_filters, (5, 5),
(1, 1), "SAME", phase, detail3 + 'conv2'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Conv2D(nb_filters * 2, (5, 5),
(1, 1), "SAME", phase, detail3 + 'conv3'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Flatten(),
HiddenLinear(64, detail3 + 'ip1'), # first f.c. layer
Linear(nb_classes, detail3),
Softmax(temperature)]
model = ensembleThreeModel(layers1, layers2, layers3, input_shape, nb_classes)
print('Finished making ensemble of three cnns')
return model
def make_ensemble_three_cifar_cnn_layerwise(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=32, nb_classes=10,
input_shape=(None, 28, 28, 1)):
# make a low precision cnn with full precision weights and a bits activations
layers1 = [Conv2D_lowprecision(wbits1[0], abits1[0], nb_filters, (5, 5), (1, 1), "SAME", phase, detail1 + 'conv1'),
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Conv2D_lowprecision(wbits1[1], abits1[1], nb_filters, (5, 5),
(1, 1), "SAME", phase, detail1 + 'conv2'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride ReLU(),
Conv2D_lowprecision(wbits1[2], abits1[2], nb_filters * 2, (5, 5),
(1, 1), "SAME", phase, detail1 + 'conv3'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Flatten(),
HiddenLinear(64, detail1 + 'ip1'), # first f.c. layer
Linear(nb_classes, detail1),
Softmax(temperature)]
# make a low precision cnn with full precision weights and a bits activations
layers2 = [Conv2D_lowprecision(wbits2[0], abits2[0], nb_filters, (5, 5), (1, 1), "SAME", phase, detail2 + 'conv1'), # VALID padding means no padding, SAME means padding by (k-1)/2
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Conv2D_lowprecision(wbits2[1], abits2[1], nb_filters, (5, 5),
(1, 1), "SAME", phase, detail2 + 'conv2'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride ReLU(),
Conv2D_lowprecision(wbits2[2], abits2[2], nb_filters * 2, (5, 5),
(1, 1), "SAME", phase, detail2 + 'conv3'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Flatten(),
HiddenLinear(64, detail2 + 'ip1'), # first f.c. layer
Linear(nb_classes, detail2),
Softmax(temperature)]
# make a full precision cnn with full precision weights and a bits activations
layers3 = [Conv2D(nb_filters, (5, 5), (1, 1), "SAME", phase, detail3 + 'conv1'), # VALID padding means no padding, SAME means padding by (k-1)/2
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Conv2D(nb_filters, (5, 5),
(1, 1), "SAME", phase, detail3 + 'conv2'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Conv2D(nb_filters * 2, (5, 5),
(1, 1), "SAME", phase, detail3 + 'conv3'),
ReLU(),
AvgPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
Flatten(),
HiddenLinear(64, detail3 + 'ip1'), # first f.c. layer
Linear(nb_classes, detail3),
Softmax(temperature)]
model = ensembleThreeModel(layers1, layers2, layers3, input_shape, avg, weightedAvg, alpha, nb_classes)
print('Finished making ensemble of three cifar cnns')
return model
######################### full-precision alexnet for Imagenet #########################
def make_basic_alexnet_from_scratch(phase, temperature, detail, nb_filters=32, nb_classes=10,
input_shape=(None, 28, 28, 1)):
layers = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail + 'conv1', useBias=True),
ReLU(),
Conv2DGroup(8*nb_filters, (5, 5),
(1, 1), "SAME", phase, detail + 'conv2'),
BatchNorm(phase, detail + '_batchNorm1'),
MaxPoolSame((3, 3), (2, 2)),
ReLU(),
Conv2D(12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail + 'conv3'),
BatchNorm(phase, detail + '_batchNorm2'),
MaxPoolSame((3, 3), (2, 2)),
ReLU(),
Conv2DGroup(12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail + 'conv4'),
BatchNorm(phase, detail + '_batchNorm3'),
ReLU(),
Conv2DGroup(8*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail + 'conv5'),
BatchNorm(phase, detail + '_batchNorm4'),
MaxPool((3, 3), (2, 2)),
ReLU(),
Flatten(),
HiddenLinear(4096, detail + 'ip1', useBias=True), # first f.c. layer
BatchNorm(phase, detail + '_batchNorm5'),
ReLU(),
HiddenLinear(4096, detail + 'ip2', useBias=False),
BatchNorm(phase, detail + '_batchNorm6'),
ReLU(),
Linear(nb_classes, detail, useBias=True),
Softmax(temperature)]
model = MLP(layers, input_shape)
print('Finished making basic alexnet')
return model
################## low precision version of alexnet #################
def make_basic_lowprecision_alexnet(phase, temperature, detail, wbits, abits, nb_filters=32, nb_classes=10,
input_shape=(None, 28, 28, 1)):
layers = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail + 'conv1', useBias=True),
ReLU(),
Conv2DGroup_lowprecision(wbits, abits, 8*nb_filters, (5, 5),
(1, 1), "SAME", phase, detail + 'conv2'), # useBatchNorm not set here
BatchNorm(phase, detail + '_batchNorm1'),
MaxPoolSame((3, 3), (2, 2)), # pool1 (3,3) pool size and (2,2) stride
ReLU(),
Conv2D_lowprecision(wbits, abits, 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail + 'conv3'),
BatchNorm(phase, detail + '_batchNorm2'),
MaxPoolSame((3, 3), (2, 2)), # pool2 (3,3) pool size and (2,2) stride
ReLU(),
Conv2DGroup_lowprecision(wbits, abits, 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail + 'conv4'),
BatchNorm(phase, detail + '_batchNorm3'),
ReLU(),
Conv2DGroup_lowprecision(wbits, abits, 8*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail + 'conv5'),
BatchNorm(phase, detail + '_batchNorm4'),
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Flatten(),
HiddenLinear_lowprecision(wbits, abits, 4096, detail + 'ip1', useBias=True), # first f.c. layer
BatchNorm(phase, detail + '_batchNorm5'),
ReLU(),
HiddenLinear_lowprecision(wbits, abits, 4096, detail + 'ip2'),
BatchNorm(phase, detail + '_batchNorm6'),
ReLU(),
Linear(nb_classes, detail, useBias=True), # Last layer is not quantized
Softmax(temperature)]
model = MLP(layers, input_shape)
print('Finished making basic alexnet of low precision')
return model
def make_layerwise_lowprecision_alexnet(phase, temperature, detail, wbits, abits, nb_filters=32, nb_classes=10,
input_shape=(None, 28, 28, 1)):
layers = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail + 'conv1', useBias=True),
ReLU(),
Conv2DGroup_lowprecision(wbits[0], abits[0], 8*nb_filters, (5, 5),
(1, 1), "SAME", phase, detail + 'conv2'), # useBatchNorm not set here
BatchNorm(phase, detail + '_batchNorm1'),
MaxPoolSame((3, 3), (2, 2)), # pool1 (3,3) pool size and (2,2) stride
ReLU(),
Conv2D_lowprecision(wbits[1], abits[1], 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail + 'conv3'),
BatchNorm(phase, detail + '_batchNorm2'),
MaxPoolSame((3, 3), (2, 2)), # pool2 (3,3) pool size and (2,2) stride
ReLU(),
Conv2DGroup_lowprecision(wbits[2], abits[2], 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail + 'conv4'),
BatchNorm(phase, detail + '_batchNorm3'),
ReLU(),
Conv2DGroup_lowprecision(wbits[3], abits[3], 8*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail + 'conv5'),
BatchNorm(phase, detail + '_batchNorm4'),
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Flatten(),
HiddenLinear_lowprecision(wbits[4], abits[4], 4096, detail + 'ip1', useBias=True), # first f.c. layer
BatchNorm(phase, detail + '_batchNorm5'),
ReLU(),
HiddenLinear_lowprecision(wbits[5], abits[5], 4096, detail + 'ip2'),
BatchNorm(phase, detail + '_batchNorm6'),
ReLU(),
Linear(nb_classes, detail, useBias=True), # Last layer is not quantized
Softmax(temperature)]
model = MLP(layers, input_shape)
print('Finished making layerwise alexnet of low precision')
return model
################## EMPIR version of alexnet #################
def make_ensemble_three_alexnet(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=32, nb_classes=10,
input_shape=(None, 28, 28, 1), useBatchNorm=False):
# make a low precision cnn
layers1 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail1 + 'conv1', useBias=True),
ReLU(),
Conv2DGroup_lowprecision(wbits1, abits1, 8*nb_filters, (5, 5),
(1, 1), "SAME", phase, detail1 + 'conv2'),
BatchNorm(phase, detail1 + '_batchNorm1'),
MaxPoolSame((3, 3), (2, 2)),
ReLU(),
Conv2D_lowprecision(wbits1, abits1, 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail1 + 'conv3'),
BatchNorm(phase, detail1 + '_batchNorm2'),
MaxPoolSame((3, 3), (2, 2)),
ReLU(),
Conv2DGroup_lowprecision(wbits1, abits1, 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail1 + 'conv4'),
BatchNorm(phase, detail1 + '_batchNorm3'),
ReLU(),
Conv2DGroup_lowprecision(wbits1, abits1, 8*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail1 + 'conv5'),
BatchNorm(phase, detail1 + '_batchNorm4'),
MaxPool((3, 3), (2, 2)),
ReLU(),
Flatten(),
HiddenLinear_lowprecision(wbits1, abits1, 4096, detail1 + 'ip1', useBias=True), # first f.c. layer
BatchNorm(phase, detail1 + '_batchNorm5'),
ReLU(),
HiddenLinear_lowprecision(wbits1, abits1, 4096, detail1 + 'ip2', useBias=False),
BatchNorm(phase, detail1 + '_batchNorm6'),
ReLU(),
Linear(nb_classes, detail1, useBias=True),
Softmax(temperature)]
# make another low precision cnn
layers2 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail2 + 'conv1', useBias=True),
ReLU(),
Conv2DGroup_lowprecision(wbits2, abits2, 8*nb_filters, (5, 5),
(1, 1), "SAME", phase, detail2 + 'conv2'),
BatchNorm(phase, detail2 + '_batchNorm1'),
MaxPoolSame((3, 3), (2, 2)),
ReLU(),
Conv2D_lowprecision(wbits2, abits2, 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail2 + 'conv3'),
BatchNorm(phase, detail2 + '_batchNorm2'),
MaxPoolSame((3, 3), (2, 2)),
ReLU(),
Conv2DGroup_lowprecision(wbits2, abits2, 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail2 + 'conv4'),
BatchNorm(phase, detail2 + '_batchNorm3'),
ReLU(),
Conv2DGroup_lowprecision(wbits2, abits2, 8*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail2 + 'conv5'),
BatchNorm(phase, detail2 + '_batchNorm4'),
MaxPool((3, 3), (2, 2)),
ReLU(),
Flatten(),
HiddenLinear_lowprecision(wbits2, abits2, 4096, detail2 + 'ip1', useBias=True), # first f.c. layer
BatchNorm(phase, detail2 + '_batchNorm5'),
ReLU(),
HiddenLinear_lowprecision(wbits2, abits2, 4096, detail2 + 'ip2', useBias=False),
BatchNorm(phase, detail2 + '_batchNorm6'),
ReLU(),
Linear(nb_classes, detail2, useBias=True), # Last layer is not quantized
Softmax(temperature)]
# make a full precision cnn with full precision weights and activations
layers3 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail3 + 'conv1', useBias=True),
ReLU(),
Conv2DGroup(8*nb_filters, (5, 5),
(1, 1), "SAME", phase, detail3 + 'conv2'),
BatchNorm(phase, detail3 + '_batchNorm1'),
MaxPoolSame((3, 3), (2, 2)),
ReLU(),
Conv2D(12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail3 + 'conv3'),
BatchNorm(phase, detail3 + '_batchNorm2'),
MaxPoolSame((3, 3), (2, 2)),
ReLU(),
Conv2DGroup(12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail3 + 'conv4'),
BatchNorm(phase, detail3 + '_batchNorm3'),
ReLU(),
Conv2DGroup(8*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail3 + 'conv5'),
BatchNorm(phase, detail3 + '_batchNorm4'),
MaxPool((3, 3), (2, 2)),
ReLU(),
Flatten(),
HiddenLinear(4096, detail3 + 'ip1', useBias=True), # first f.c. layer
BatchNorm(phase, detail3 + '_batchNorm5'),
ReLU(),
HiddenLinear(4096, detail3 + 'ip2', useBias=False),
BatchNorm(phase, detail3 + '_batchNorm6'),
ReLU(),
Linear(nb_classes, detail3, useBias=True),
Softmax(temperature)]
model = ensembleThreeModel(layers1, layers2, layers3, input_shape, nb_classes)
print('Finished making ensemble of three cnns')
return model
def make_ensemble_three_alexnet_layerwise(phase, temperature, detail1, detail2, detail3, wbits1, abits1, wbits2, abits2, nb_filters=32, nb_classes=10,
input_shape=(None, 28, 28, 1)):
# make a low precision cnn
layers1 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail1 + 'conv1', useBias=True),
ReLU(),
Conv2DGroup_lowprecision(wbits1[0], abits1[0], 8*nb_filters, (5, 5),
(1, 1), "SAME", phase, detail1 + 'conv2'),
BatchNorm(phase, detail1 + '_batchNorm1'),
MaxPoolSame((3, 3), (2, 2)), # pool1 (3,3) pool size and (2,2) stride
ReLU(),
Conv2D_lowprecision(wbits1[1], abits1[1], 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail1 + 'conv3'),
BatchNorm(phase, detail1 + '_batchNorm2'),
MaxPoolSame((3, 3), (2, 2)), # pool2 (3,3) pool size and (2,2) stride
ReLU(),
Conv2DGroup_lowprecision(wbits1[2], abits1[2], 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail1 + 'conv4'),
BatchNorm(phase, detail1 + '_batchNorm3'),
ReLU(),
Conv2DGroup_lowprecision(wbits1[3], abits1[3], 8*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail1 + 'conv5'),
BatchNorm(phase, detail1 + '_batchNorm4'),
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Flatten(),
HiddenLinear_lowprecision(wbits1[4], abits1[4], 4096, detail1 + 'ip1', useBias=True), # first f.c. layer
BatchNorm(phase, detail1 + '_batchNorm5'),
ReLU(),
HiddenLinear_lowprecision(wbits1[5], abits1[5], 4096, detail1 + 'ip2'),
BatchNorm(phase, detail1 + '_batchNorm6'),
ReLU(),
Linear(nb_classes, detail1, useBias=True), # Last layer is not quantized
Softmax(temperature)]
# make another low precision cnn
layers2 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail2 + 'conv1', useBias=True),
ReLU(),
Conv2DGroup_lowprecision(wbits2[0], abits2[0], 8*nb_filters, (5, 5),
(1, 1), "SAME", phase, detail2 + 'conv2'), # useBatchNorm not set here
BatchNorm(phase, detail2 + '_batchNorm1'),
MaxPoolSame((3, 3), (2, 2)), # pool1 (3,3) pool size and (2,2) stride
ReLU(),
Conv2D_lowprecision(wbits2[1], abits2[1], 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail2 + 'conv3'),
BatchNorm(phase, detail2 + '_batchNorm2'),
MaxPoolSame((3, 3), (2, 2)), # pool2 (3,3) pool size and (2,2) stride
ReLU(),
Conv2DGroup_lowprecision(wbits2[2], abits2[2], 12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail2 + 'conv4'),
BatchNorm(phase, detail2 + '_batchNorm3'),
ReLU(),
Conv2DGroup_lowprecision(wbits2[3], abits2[3], 8*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail2 + 'conv5'),
BatchNorm(phase, detail2 + '_batchNorm4'),
MaxPool((3, 3), (2, 2)), # (3,3) pool size and (2,2) stride
ReLU(),
Flatten(),
HiddenLinear_lowprecision(wbits2[4], abits2[4], 4096, detail2 + 'ip1', useBias=True), # first f.c. layer
BatchNorm(phase, detail2 + '_batchNorm5'),
ReLU(),
HiddenLinear_lowprecision(wbits2[5], abits2[5], 4096, detail2 + 'ip2'),
BatchNorm(phase, detail2 + '_batchNorm6'),
ReLU(),
Linear(nb_classes, detail2, useBias=True), # Last layer is not quantized
Softmax(temperature)]
# make a full precision cnn with full precision weights and activations
layers3 = [Conv2D(3*nb_filters, (12, 12), (4, 4), "VALID", phase, detail3 + 'conv1', useBias=True),
ReLU(),
Conv2DGroup(8*nb_filters, (5, 5),
(1, 1), "SAME", phase, detail3 + 'conv2'),
BatchNorm(phase, detail3 + '_batchNorm1'),
MaxPoolSame((3, 3), (2, 2)),
ReLU(),
Conv2D(12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail3 + 'conv3'),
BatchNorm(phase, detail3 + '_batchNorm2'),
MaxPoolSame((3, 3), (2, 2)),
ReLU(),
Conv2DGroup(12*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail3 + 'conv4'),
BatchNorm(phase, detail3 + '_batchNorm3'),
ReLU(),
Conv2DGroup(8*nb_filters, (3, 3),
(1, 1), "SAME", phase, detail3 + 'conv5'),
BatchNorm(phase, detail3 + '_batchNorm4'),
MaxPool((3, 3), (2, 2)),
ReLU(),
Flatten(),
HiddenLinear(4096, detail3 + 'ip1', useBias=True), # first f.c. layer
BatchNorm(phase, detail3 + '_batchNorm5'),
ReLU(),
HiddenLinear(4096, detail3 + 'ip2', useBias=False),
BatchNorm(phase, detail3 + '_batchNorm6'),
ReLU(),
Linear(nb_classes, detail3, useBias=True),
Softmax(temperature)]
model = ensembleThreeModel(layers1, layers2, layers3, input_shape, nb_classes)
print('Finished making ensemble of three models')
return model
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configargparse
import pdb
def pair(arg):
return [float(x) for x in arg.split(',')]
def get_args():
parser = configargparse.ArgParser(default_config_files=[])
parser.add("--config", type=str, is_config_file=True, help="You can store all the config args in a config file and pass the path here")
parser.add("--model_dir", type=str, default="models/model", help="Path to save/load the checkpoints, default=models/model")
parser.add("--data_dir", type=str, default="datasets/", help="Path to load datasets from, default=datasets")
parser.add("--model_suffix", type=str, default="", help="Suffix to append to model name, default=''")
parser.add("--dataset", "-d", type=str, default="cifar10", choices=["cifar10", "cifar100", "svhn"], help="Path to load dataset, default=cifar10")
parser.add("--tf_seed", type=int, default=451760341, help="Random seed for initializing tensor-flow variables to rule out the effect of randomness in experiments, default=45160341")
parser.add("--np_seed", type=int, default=216105420, help="Random seed for initializing numpy variables to rule out the effect of randomness in experiments, default=216105420")
parser.add("--train_steps", type=int, default=80000, help="Maximum number of training steps, default=80000")
parser.add("--out_steps", "-o", type=int, default=100, help="Number of output steps, default=100")
parser.add("--summary_steps", type=int, default=500, help="Number of summary steps, default=500")
parser.add("--checkpoint_steps", "-c", type=int, default=1000, help="Number of checkpoint steps, default=1000")
parser.add("--train_batch_size", "-b", type=int, default=128, help="The training batch size, default=128")
parser.add("--step_size_schedule", nargs='+', type=pair, default=[[0, 0.1], [40000, 0.01], [60000, 0.001]], help="The step size scheduling, default=[[0, 0.1], [40000, 0.01], [60000, 0.001]], use like: --stepsize 0,0.1 40000,0.01 60000,0.001")
parser.add("--weight_decay", "-w", type=float, default=0.0002, help="The weight decay parameter, default=0.0002")
parser.add("--momentum", type=float, default=0.9, help="The momentum parameter, default=0.9")
parser.add("--replay_m", "-m", type=int, default=8, help="Number of steps to repeat training on the same batch, default=8")
parser.add("--eval_examples", type=int, default=10000, help="Number of evaluation examples, default=10000")
parser.add("--eval_size", type=int, default=128, help="Evaluation batch size, default=128")
parser.add("--eval_cpu", dest='eval_cpu', action='store_true', help="Set True to do evaluation on CPU instead of GPU, default=False")
parser.set_defaults(eval_cpu=False)
# attack params
parser.add("--epsilon", "-e", type=float, default=8.0, help="Epsilon (Lp Norm distance from the original image) for generating adversarial examples, default=8.0")
parser.add("--pgd_steps", "-k", type=int, default=20, help="Number of steps to PGD attack, default=20")
parser.add("--step_size", "-s", type=float, default=2.0, help="Step size in PGD attack for generating adversarial examples in each step, default=2.0")
parser.add("--loss_func", "-f", type=str, default="xent", choices=["xent", "cw"], help="Loss function for the model, choices are [xent, cw], default=xent")
parser.add("--num_restarts", type=int, default=1, help="Number of resets for the PGD attack, default=1")
parser.add("--random_start", dest="random_start", action="store_true", help="Random start for PGD attack default=True")
parser.add("--no-random_start", dest="random_start", action="store_false", help="No random start for PGD attack default=True")
parser.set_defaults(random_start=True)
# input grad generation param
parser.add("--randinit_repeat", type=int, default=1, help="Number of randinit grad to generate, default=1")
parser.add("--num_gen_grad", type=int, default=0, help="Number of input grad samples to generate, 0 means all data default=0")
parser.add("--num_gen_act", type=int, default=0, help="Number of activation samples to generate, 0 means all data default=0")
# input grad reg params
parser.add("--beta", type=float, default=1, help="Weight of input gradient regularization, default=1")
parser.add("--gamma", type=float, default=1, help="Weight of disc xent term on encoder opt, default=1")
parser.add("--alpha", type=float, default=0, help="Weight of image-input gradient l2 norm regularization, default=0")
parser.add("--disc_update_steps", type=int, default=5, help="Number of classifier opt steps between each disc opt step, default=5")
parser.add("--adv_update_steps_per_iter", type=int, default=1, help="Number of classifier adv opt steps per classification xent opt step, default=1")
parser.add("--disc_layers", type=int, default=5, help="Number of conv layers in disc model, default=5")
parser.add("--disc_base_channels", type=int, default=16, help="Number of channels in first disc conv layer, default=16")
parser.add("--steps_before_adv_opt", type=int, default=0, help="Number of training steps to wait before training on adv loss, default=0")
parser.add("--adv_encoder_type", type=str, default='simple', help="Type of input grad encoder for adv training, default=simple")
parser.add("--enc_output_activation", type=str, default='tanh', help="Activation function of encoder output default=None")
parser.add("--sep_opt_version", type=int, default=1, choices=[0, 1, 2], help="Sep opt version 0: train_jan.py, 1: train_jan_sep_opt-CD.py, 2: train_jan_sep_opt2-CD.py default=1")
parser.add("--grad_image_ratio", type=float, default=1, help="Ratio of input grad to mix with image default=1")
parser.add("--final_grad_image_ratio", type=float, default=0, help="Final ratio of input grad to mix with image, set to 0 for static ratio default=0")
parser.add("--num_grad_image_ratios", type=int, default=5, help="Number of times to adjust grad_image_ratio default=4")
parser.add("--eval_adv_attack", dest="eval_adv_attack", action="store_true", help="Evaluate trained model on adv attack after training default=True")
parser.add("--no-eval_adv_attack", dest="eval_adv_attack", action="store_false", help="Evaluate trained model on adv attack after training default=True")
parser.set_defaults(eval_adv_attack=True)
parser.add("--normalize_zero_mean", dest="normalize_zero_mean", action="store_true", help="Normalize classifier input to zero mean default=True")
parser.add("--no-normalize_zero_mean", dest="normalize_zero_mean", action="store_false", help="Normalize classifier input to zero mean default=True")
parser.set_defaults(normalize_zero_mean=True)
parser.add("--same_optimizer", dest="same_optimizer", action="store_true", help="Train classifier and disc with same optimizer configuration default=True")
parser.add("--no-same_optimizer", dest="same_optimizer", action="store_false", help="Train classifier and disc with same optimizer configuration default=True")
parser.set_defaults(same_optimizer=True)
parser.add("--only_fully_connected", dest="only_fully_connected", action="store_true", help="Fully connected disc model default=False")
parser.add("--no-only_fully_connected", dest="only_fully_connected", action="store_false", help="Fully connected disc model default=False")
parser.set_defaults(only_fully_connected=False)
parser.add("--img_random_pert", dest="img_random_pert", action="store_true", help="Random start image pertubation augmentation default=False")
parser.add("--no-img_random_pert", dest="img_random_pert", action="store_false", help="No random start image pertubation augmentation default=False")
parser.set_defaults(img_random_pert=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
print(get_args())
pdb.set_trace()
# TODO Default for model_dir
# TODO Need to update the helps
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for importing the CIFAR10 dataset.
Each image in the dataset is a numpy array of shape (32, 32, 3), with the values
being unsigned integers (i.e., in the range 0,1,...,255).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import sys
import tensorflow as tf
import numpy as np
import re
version = sys.version_info
class CIFAR10Data(object):
"""
Unpickles the CIFAR10 dataset from a specified folder containing a pickled
version following the format of Krizhevsky which can be found
[here](https://www.cs.toronto.edu/~kriz/cifar.html).
Inputs to constructor
=====================
- path: path to the pickled dataset. The training data must be pickled
into five files named data_batch_i for i = 1, ..., 5, containing 10,000
examples each, the test data
must be pickled into a single file called test_batch containing 10,000
examples, and the 10 class names must be
pickled into a file called batches.meta. The pickled examples should
be stored as a tuple of two objects: an array of 10,000 32x32x3-shaped
arrays, and an array of their 10,000 true labels.
"""
def __init__(self, path, init_shuffle=True, train_size_ratio=1):
num_classes = 10
path = CIFAR10Data.rec_search(path)
train_filenames = ['data_batch_{}'.format(ii + 1) for ii in range(5)]
eval_filename = 'test_batch'
metadata_filename = 'batches.meta'
train_images = np.zeros((50000, 32, 32, 3), dtype='uint8')
train_labels = np.zeros(50000, dtype='int32')
for ii, fname in enumerate(train_filenames):
cur_images, cur_labels = self._load_datafile(os.path.join(path, fname))
train_images[ii * 10000: (ii + 1) * 10000, ...] = cur_images
train_labels[ii * 10000: (ii + 1) * 10000, ...] = cur_labels
eval_images, eval_labels = self._load_datafile(
os.path.join(path, eval_filename))
with open(os.path.join(path, metadata_filename), 'rb') as fo:
if version.major == 3:
data_dict = pickle.load(fo, encoding='bytes')
else:
data_dict = pickle.load(fo)
self.label_names = data_dict[b'label_names']
for ii in range(len(self.label_names)):
self.label_names[ii] = self.label_names[ii].decode('utf-8')
if train_size_ratio < 1:
new_train_images = []
new_train_labels = []
for class_ind in range(num_classes):
current_class_train_images = train_images[train_labels == class_ind]
num_train_per_class = int(current_class_train_images.shape[0] * train_size_ratio)
new_train_images.append(current_class_train_images[:num_train_per_class])
new_train_labels.append(np.full(num_train_per_class, class_ind, dtype='int32'))
train_images = np.concatenate(new_train_images, axis=0)
train_labels = np.concatenate(new_train_labels)
self.train_data = DataSubset(train_images, train_labels, init_shuffle=init_shuffle)
self.eval_data = DataSubset(eval_images, eval_labels, init_shuffle=init_shuffle)
@staticmethod
def rec_search(original_path):
rx = re.compile(r'data_batch_[0-9]+')
r = []
for path, _, file_names in os.walk(original_path):
r.extend([os.path.join(path, x) for x in file_names if rx.search(x)])
if len(r) is 0: # TODO: Is this the best way?
return original_path
return os.path.dirname(r[0])
@staticmethod
def _load_datafile(filename):
with open(filename, 'rb') as fo:
if version.major == 3:
data_dict = pickle.load(fo, encoding='bytes')
else:
data_dict = pickle.load(fo)
assert data_dict[b'data'].dtype == np.uint8
image_data = data_dict[b'data']
image_data = image_data.reshape((10000, 3, 32, 32)).transpose(0, 2, 3, 1)
return image_data, np.array(data_dict[b'labels'])
class AugmentedCIFAR10Data(object):
"""
Data augmentation wrapper over a loaded dataset.
Inputs to constructor
=====================
- raw_cifar10data: the loaded CIFAR10 dataset, via the CIFAR10Data class
- sess: current tensorflow session
- model: current model (needed for input tensor)
"""
def __init__(self, raw_cifar10data, sess, model):
assert isinstance(raw_cifar10data, CIFAR10Data)
self.image_size = 32
# create augmentation computational graph
self.x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
padded = tf.map_fn(lambda img: tf.image.resize_image_with_crop_or_pad(
img, self.image_size + 4, self.image_size + 4),
self.x_input_placeholder)
cropped = tf.map_fn(lambda img: tf.random_crop(img, [self.image_size,
self.image_size,
3]), padded)
flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), cropped)
self.augmented = flipped
self.train_data = AugmentedDataSubset(raw_cifar10data.train_data, sess,
self.x_input_placeholder,
self.augmented)
self.eval_data = AugmentedDataSubset(raw_cifar10data.eval_data, sess,
self.x_input_placeholder,
self.augmented)
self.label_names = raw_cifar10data.label_names
class DataSubset(object):
def __init__(self, xs, ys, init_shuffle=True):
self.xs = xs
self.n = xs.shape[0]
self.ys = ys
self.batch_start = 0
if init_shuffle:
self.cur_order = np.random.permutation(self.n)
else:
self.cur_order = np.arange(self.n)
def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
if self.n < batch_size:
raise ValueError('Batch size can be at most the dataset size')
if not multiple_passes:
actual_batch_size = min(batch_size, self.n - self.batch_start)
if actual_batch_size <= 0:
raise ValueError('Pass through the dataset is complete.')
batch_end = self.batch_start + actual_batch_size
batch_xs = self.xs[self.cur_order[self.batch_start: batch_end], ...]
batch_ys = self.ys[self.cur_order[self.batch_start: batch_end], ...]
self.batch_start += actual_batch_size
if actual_batch_size < batch_size:
print('actual_batch_size < batch_size, padding with zeros')
batch_xs_pad = np.zeros(shape=(batch_size - actual_batch_size, batch_xs.shape[1], batch_xs.shape[2], batch_xs.shape[3]), dtype=batch_xs.dtype)
batch_ys_pad = np.zeros(batch_size - actual_batch_size, dtype=batch_ys.dtype)
batch_xs = np.concatenate([batch_xs, batch_xs_pad], axis=0)
batch_ys = np.concatenate([batch_ys, batch_ys_pad], axis=0)
return batch_xs, batch_ys
actual_batch_size = min(batch_size, self.n - self.batch_start)
if actual_batch_size < batch_size:
if reshuffle_after_pass:
self.cur_order = np.random.permutation(self.n)
self.batch_start = 0
batch_end = self.batch_start + batch_size
batch_xs = self.xs[self.cur_order[self.batch_start: batch_end], ...]
batch_ys = self.ys[self.cur_order[self.batch_start: batch_end], ...]
self.batch_start += actual_batch_size
return batch_xs, batch_ys
class AugmentedDataSubset(object):
def __init__(self, raw_datasubset, sess, x_input_placeholder,
augmented):
self.sess = sess
self.raw_datasubset = raw_datasubset
self.x_input_placeholder = x_input_placeholder
self.augmented = augmented
def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
raw_batch = self.raw_datasubset.get_next_batch(batch_size, multiple_passes,
reshuffle_after_pass)
images = raw_batch[0].astype(np.float32)
return self.sess.run(self.augmented, feed_dict={self.x_input_placeholder:
raw_batch[0]}), raw_batch[1]
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configargparse
import pdb
def pair(arg):
return [float(x) for x in arg.split(',')]
def get_args():
parser = configargparse.ArgParser(default_config_files=[])
parser.add("--model_dir", type=str, default="models/adv_trained", help="Path to save/load the checkpoints, default=models/model")
parser.add("--data_path", type=str, default="datasets/cifar10", help="Path to dataset, default=datasets/cifar10")
parser.add("--tf_seed", type=int, default=451760341, help="Random seed for initializing tensor-flow variables to rule out the effect of randomness in experiments, default=45160341")
parser.add("--np_seed", type=int, default=216105420, help="Random seed for initializing numpy variables to rule out the effect of randomness in experiments, default=216105420")
parser.add("--num_eval_examples", type=int, default=10000, help="Number of eval samples, default=10000")
parser.add("--eval_batch_size", type=int, default=100, help="Eval batch size, default=100")
parser.add("--epsilon", "-e", type=float, default=8.0, help="Epsilon (Lp Norm distance from the original image) for generating adversarial examples, default=8.0")
parser.add("--num_steps", type=int, default=10, help="Number of steps to PGD attack, default=10")
parser.add("--step_size", "-s", type=float, default=2.0, help="Step size in PGD attack for generating adversarial examples in each step, default=2.0")
parser.add("--random_start", dest="random_start", action="store_true", help="Random start for PGD attack default=True")
parser.add("--no-random_start", dest="random_start", action="store_false", help="No random start for PGD attack default=True")
parser.set_defaults(random_start=True)
parser.add("--loss_func", "-f", type=str, default="xent", choices=["xent", "target_task_xent", "cw"], help="Loss function for the model, choices are [xent, cw], default=xent")
parser.add("--attack_norm", type=str, default="inf", choices=["inf", "2"], help="Lp norm type for attacks, choices are [inf, 2], default=inf")
parser.add("--dataset", "-d", type=str, default="cifar10", choices=["cifar10", "cifar100", "imagenet"], help="Path to load dataset, default=cifar10")
parser.add("--store_adv_path", type=str, default=None, help="Path to save adversarial examples, default=None")
parser.add("--attack_name", type=str, default=None, help="Path to save adversarial examples, default=''")
parser.add("--save_eval_log", dest="save_eval_log", action="store_true", help="Save txt file for attack eval")
parser.add("--no-save_eval_log", dest="save_eval_log", action="store_false", help="Save txt file for attack eval")
parser.set_defaults(save_eval_log=False)
parser.add("--xfer_attack", dest="xfer_attack", action="store_true", help="Adversarial transfer attack")
parser.add("--no-xfer_attack", dest="xfer_attack", action="store_false", help="not adversarial transfer attack")
parser.set_defaults(xfer_attack=False)
parser.add("--custom_output_model_name", type=str, default=None, help="Custom model name, default=None")
parser.add("--n_inner_points", type=int, default=999, help="")
parser.add("--n_boundary_points", type=int, default=1, help="")
parser.add("--sample_from_corners", type=bool, default=False, help="")
parser.add("--attack", type=str, default="pgd", help="either pgd or apgd")
args = parser.parse_args()
return args
if __name__ == "__main__":
print(get_args())
pdb.set_trace()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Modified according to main method in pgd_attack.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import tensorflow as tf
import numpy as np
import sys
import math
import cifar10_input
import config_attack
from pgd_attack import LinfPGDAttack
def main():
config = vars(config_attack.get_args())
tf.set_random_seed(config['tf_seed'])
np.random.seed(config['np_seed'])
model_file = tf.train.latest_checkpoint(config['model_dir'])
print("config['model_dir']: ", config['model_dir'])
if model_file is None:
print('No model found')
sys.exit()
print("JARN MODEL")
from model_jarn import Model
if "_zeromeaninput" in config['model_dir']:
model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True,
# added by AUTHOR
mode='eval')
else:
model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'],
# added by AUTHOR
mode='eval')
saver = tf.train.Saver()
data_path = config['data_path']
print("load cifar10 dataset")
cifar = cifar10_input.CIFAR10Data(data_path)
with tf.Session() as sess:
print("Using attack:", config['attack'])
if config['attack'] == 'pgd' or config['attack'] == 'pgd-ld':
attack = LinfPGDAttack(model,
config['epsilon'] / 255.0,
config['num_steps'],
config['step_size'],
config['random_start'],
config['loss_func'],
dataset=config['dataset'],
clip_max=1.0)
attack_fn = lambda x, y: attack.perturb(x, y, sess)
elif config['attack'] == 'apgd':
from autoattack import autopgd_base
from autoattack_adapter import ModelAdapter
autoattack_model = ModelAdapter(
model.pre_softmax, model.x_input,
model.y_input, sess, num_classes=10, device="cpu")
attack = autopgd_base.APGDAttack(
autoattack_model, n_restarts=5, n_iter=100, verbose=True,
eps=config["epsilon"] / 255.0, norm="Linf", eot_iter=1, rho=.99,
is_tf_model=True, device="cpu", loss='dlr')
attack_fn = lambda x, y: attack.perturb(
torch.tensor(x.transpose((0, 3, 1, 2)), device="cpu"),
torch.tensor(y, device="cpu")
).detach().cpu().numpy().transpose((0, 2, 3, 1))
else:
raise ValueError("invalid attack")
# Restore the checkpoint
saver.restore(sess, model_file)
# Iterate over the samples batch-by-batch
num_eval_examples = config['num_eval_examples']
eval_batch_size = config['eval_batch_size']
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
preds =[]
adv_preds = []
ys = []
for ibatch in range(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
x_batch = cifar.eval_data.xs[bstart:bend, :] / 255.0
y_batch = cifar.eval_data.ys[bstart:bend]
x_batch_adv = attack_fn(x_batch, y_batch)
logits = sess.run(model.pre_softmax, {model.x_input: x_batch})
adv_logits = sess.run(model.pre_softmax, {model.x_input: x_batch_adv})
preds.append(logits.argmax(-1))
adv_preds.append(adv_logits.argmax(-1))
ys.append(y_batch)
preds = np.concatenate(preds)
adv_preds = np.concatenate(adv_preds)
ys = np.concatenate(ys)
acc = np.mean(preds == ys)
adv_acc = np.mean(adv_preds == ys)
print("Accuracy:", acc)
print("Robust Accuracy:", adv_acc)
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# based on https://github.com/tensorflow/models/tree/master/resnet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import json
from collections import OrderedDict
class Model(object):
"""ResNet model."""
def __init__(self, dataset, mode='train', train_batch_size=None, normalize_zero_mean=False, zero_one=False):
"""
ResNet constructor.
"""
self.neck = None
self.y_pred = None
self.mode = mode
self.num_classes = 10
self.train_batch_size = train_batch_size
self.activations = []
self.normalize_zero_mean = normalize_zero_mean
self.zero_one = zero_one
self._build_model()
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _build_model(self):
assert self.mode == 'train' or self.mode == 'eval'
"""Build the core model within the graph."""
with tf.variable_scope('classifier'):
with tf.variable_scope('input'):
self.x_input = tf.placeholder(
tf.float32,
shape=[None, 32, 32, 3])
self.y_input = tf.placeholder(tf.int64, shape=None)
if self.zero_one:
self.final_input = self.x_input * 255.0
else:
self.final_input = self.x_input
if self.normalize_zero_mean:
final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
for i in range(3):
final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
final_input_mean = tf.tile(final_input_mean, [1,32,32,3])
zero_mean_final_input = self.final_input - final_input_mean
self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
else:
self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
x = self._conv('init_conv', self.input_standardized, 3, 3, 16, self._stride_arr(1))
self.activations.append(x)
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
res_func = self._residual
# Uncomment the following codes to use w28-10 wide residual network.
# It is more memory efficient than very deep residual network and has
# comparably good performance.
# https://arxiv.org/pdf/1605.07146v1.pdf
# filters = [16, 16, 32, 64] # for debugging
filters = [16, 160, 320, 640]
# Update hps.num_residual_units to 9
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
activate_before_residual[0])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_1_%d' % i):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
activate_before_residual[1])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_2_%d' % i):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
activate_before_residual[2])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_3_%d' % i):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, 0.1)
x = self._global_avg_pool(x)
self.neck = x
with tf.variable_scope('logit'):
self.pre_softmax = self._fully_connected(x, self.num_classes)
self.activations.append(self.pre_softmax)
self.softmax = tf.nn.softmax(self.pre_softmax)
sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
sample_indices = tf.expand_dims(sample_indices, axis=-1)
target_indices = tf.expand_dims(self.y_input, axis=-1)
self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
self.target_softmax = tf.gather_nd(self.softmax, self.gather_indices, name="targetsoftmax")
self.target_logit = tf.gather_nd(self.pre_softmax, self.gather_indices, name="targetlogit")
self.predictions = tf.argmax(self.pre_softmax, 1)
self.y_pred = self.predictions
self.correct_prediction = tf.equal(self.predictions, self.y_input)
self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
with tf.variable_scope('costs'):
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
self.loss = self.xent
self.mean_xent = tf.reduce_mean(self.y_xent)
self.y_xent_dbp = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent_dbp = tf.reduce_sum(self.y_xent_dbp, name='y_xent_dbp')
self.mean_xent_dbp = tf.reduce_mean(self.y_xent_dbp)
self.weight_decay_loss = self._decay()
# for top-2 logit diff loss
self.label_mask = tf.one_hot(self.y_input,
self.num_classes,
on_value=1.0,
off_value=0.0,
dtype=tf.float32)
self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
updates_collections=None, is_training=(self.mode == 'train'))
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, 0.1)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
class JarnConvDiscriminatorModel(object):
"""Simple conv discriminator model."""
# based on https://github.com/tensorflow/models/blob/d361076952b73706c5c7ddf9c940bf42c27a3213/research/slim/nets/dcgan.py#L41
def __init__(self, mode, dataset, train_batch_size=None, num_conv_layers=5, base_num_channels=16, x_modelgrad_input_tensor=None,
y_modelgrad_input_tensor=None, x_image_input_tensor=None, y_image_input_tensor=None, normalize_zero_mean=False, only_fully_connected=False, num_fc_layers=3, image_size=32, num_input_channels=3):
"""
conv disc constructor.
"""
self.neck = None
self.y_pred = None
self.mode = mode
self.num_classes = 2
self.train_batch_size = train_batch_size
self.num_conv_layers = num_conv_layers
self.num_fc_layers = num_fc_layers
self.base_num_channels = base_num_channels
self.x_modelgrad_input_tensor = x_modelgrad_input_tensor
self.y_modelgrad_input_tensor = y_modelgrad_input_tensor
self.x_image_input_tensor = x_image_input_tensor
self.y_image_input_tensor = y_image_input_tensor
self.normalize_zero_mean = normalize_zero_mean
self.only_fully_connected = only_fully_connected
self.image_size = image_size
self.num_input_channels = num_input_channels
self._build_model()
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _build_model(self):
assert self.mode == 'train' or self.mode == 'eval'
"""Build the core model within the graph."""
with tf.variable_scope('discriminator'):
with tf.variable_scope('input'):
if self.x_modelgrad_input_tensor == None:
self.x_modelgrad_input = tf.get_variable(name='x_modelgrad_input', initializer=tf.zeros_initializer,
shape=[self.train_batch_size, self.image_size, self.image_size, self.num_input_channels], dtype=tf.float32)
self.x_image_input = tf.placeholder(
tf.float32,
shape=[None, self.image_size, self.image_size, self.num_input_channels])
else:
self.x_modelgrad_input = self.x_modelgrad_input_tensor
self.x_image_input = self.x_image_input_tensor
self.x_input = tf.concat([self.x_modelgrad_input, self.x_image_input], axis=0)
if self.y_modelgrad_input_tensor == None:
self.y_modelgrad_input = tf.get_variable(name='y_modelgrad_input', initializer=tf.zeros_initializer,
shape=self.train_batch_size, dtype=tf.int64)
self.y_image_input = tf.placeholder(tf.int64, shape=None)
else:
self.y_modelgrad_input = self.y_modelgrad_input_tensor
self.y_image_input = self.y_image_input_tensor
self.y_input = tf.concat([self.y_modelgrad_input, self.y_image_input], axis=0)
self.final_input = self.x_input
if self.normalize_zero_mean:
final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
for i in range(3):
final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
final_input_mean = tf.tile(final_input_mean, [1,self.image_size,self.image_size,self.num_input_channels])
zero_mean_final_input = self.final_input - final_input_mean
self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
else:
self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
x = self.input_standardized
base_num_channels = self.base_num_channels
if self.only_fully_connected == False:
for i in range(self.num_conv_layers):
output_num_channels = base_num_channels * 2**i
if i == 0:
x = self._conv('conv{}'.format(i), x, 4, self.num_input_channels, output_num_channels, self._stride_arr(2), bias=True)
x = self._batch_norm('bn{}'.format(i), x)
x = self._relu(x, 0.1)
else:
x = self._conv('conv{}'.format(i), x, 4, output_num_channels // 2, output_num_channels, self._stride_arr(2), bias=True)
x = self._batch_norm('bn{}'.format(i), x)
x = self._relu(x, 0.1)
else:
for i in range(self.num_fc_layers):
if i == self.num_fc_layers -1:
x = self._fully_connected(x, base_num_channels//2, name='fc{}'.format(i))
else:
x = self._fully_connected(x, base_num_channels, name='fc{}'.format(i))
x = self._batch_norm('bn{}'.format(i), x)
x = self._relu(x, 0.1)
with tf.variable_scope('logit'):
self.pre_softmax = self._fully_connected(x, self.num_classes)
self.predictions = tf.argmax(self.pre_softmax, 1)
self.y_pred = self.predictions
self.correct_prediction = tf.equal(self.predictions, self.y_input)
self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
with tf.variable_scope('costs'):
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
self.loss = self.xent
self.mean_xent = tf.reduce_mean(self.y_xent)
self.weight_decay_loss = self._decay()
self.input_grad_standardized = self.input_standardized[:self.train_batch_size]
self.image_standardized = self.input_standardized[self.train_batch_size:]
self.ig_img_l2_norm_diff = tf.reduce_mean(tf.reduce_sum(tf.pow(tf.subtract(self.input_grad_standardized, self.image_standardized), 2.0), keepdims=True))
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
updates_collections=None, is_training=(self.mode == 'train'))
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides, bias=False, padding='SAME'):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
if bias == True:
b = tf.get_variable('biases', [out_filters],
initializer=tf.constant_initializer())
conv_out = tf.nn.conv2d(x, kernel, strides, padding=padding)
conv_out_b = tf.nn.bias_add(conv_out, b)
return conv_out_b
else:
return tf.nn.conv2d(x, kernel, strides, padding=padding)
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim, name=None):
"""FullyConnected layer for final output."""
if name == None:
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
else:
with tf.variable_scope(name):
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
class InputGradEncoderModel(object):
"""3x3 + 1x1 conv model."""
# based on https://github.com/tensorflow/models/blob/d361076952b73706c5c7ddf9c940bf42c27a3213/research/slim/nets/dcgan.py#L41
def __init__(self, mode, train_batch_size=None, encoder_type='simple', output_activation=None, x_modelgrad_input_tensor=None, normalize_zero_mean=False, pix2pix_layers=5, pix2pix_features_root=16, pix2pix_filter_size=4, image_size=32, num_input_channels=3, num_output_channels=None):
"""conv disc constructor.
"""
self.mode = mode
self.train_batch_size = train_batch_size
self.encoder_type = encoder_type
self.output_activation = output_activation
self.x_modelgrad_input_tensor = x_modelgrad_input_tensor
self.normalize_zero_mean = normalize_zero_mean
self.keep_prob = 1
self.layers = pix2pix_layers
self.features_root = pix2pix_features_root
self.filter_size = pix2pix_filter_size
self.image_size = image_size
self.num_input_channels = num_input_channels
if num_output_channels == None:
self.num_output_channels = num_input_channels
self._build_model()
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _build_model(self):
"""Build the core model within the graph."""
assert self.mode == 'train' or self.mode == 'eval'
with tf.variable_scope('encoder'):
with tf.variable_scope('input'):
if self.x_modelgrad_input_tensor == None:
self.x_modelgrad_input = tf.get_variable(name='x_modelgrad_input', initializer=tf.zeros_initializer,
shape=[self.train_batch_size, self.image_size, self.image_size, self.num_input_channels], dtype=tf.float32)
else:
self.x_modelgrad_input = self.x_modelgrad_input_tensor
if self.normalize_zero_mean:
final_input_mean = tf.reduce_mean(self.x_modelgrad_input, axis=[1,2,3])
for i in range(3):
final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
final_input_mean = tf.tile(final_input_mean, [1,self.image_size,self.image_size,self.num_input_channels])
zero_mean_final_input = self.x_modelgrad_input - final_input_mean
self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
else:
self.input_standardized = tf.math.l2_normalize(self.x_modelgrad_input, axis=[1,2,3])
if self.output_activation == 'tanh':
x_modelgrad_transformed_preact = self._conv('conv', self.input_standardized, 1, self.num_output_channels, self.num_output_channels, self._stride_arr(1), bias=True)
self.x_modelgrad_transformed = tf.tanh(x_modelgrad_transformed_preact)
else:
self.x_modelgrad_transformed = self._conv('conv', self.input_standardized, 1, self.num_output_channels, self.num_output_channels, self._stride_arr(1), bias=True)
with tf.variable_scope('costs'):
self.weight_decay_loss = self._decay()
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
updates_collections=None, is_training=(self.mode == 'train'))
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides, bias=False, padding='SAME'):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
if bias == True:
b = tf.get_variable('biases', [out_filters],
initializer=tf.constant_initializer())
conv_out = tf.nn.conv2d(x, kernel, strides, padding=padding)
conv_out_b = tf.nn.bias_add(conv_out, b)
return conv_out_b
else:
return tf.nn.conv2d(x, kernel, strides, padding=padding)
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
class GradImageMixer(object):
""" Model to mix input grad with image."""
def __init__(self, train_batch_size=None, direct_feed_input=False, grad_input_tensor=None, image_input_tensor=None, normalize_zero_mean=False, image_size=32, num_input_channels=3):
"""GradImageMixer constructor.
Args:
"""
self.train_batch_size = train_batch_size
self.direct_feed_input = direct_feed_input
self.grad_input_tensor = grad_input_tensor
self.image_input_tensor = image_input_tensor
self.normalize_zero_mean = normalize_zero_mean
self.image_size = image_size
self.num_input_channels = num_input_channels
self._build_model()
def _build_model(self):
"""Build the core model within the graph."""
with tf.variable_scope('mixer'):
with tf.variable_scope('input'):
if self.direct_feed_input:
self.grad_input = tf.placeholder(
tf.float32,
shape=[None, self.image_size, self.image_size, self.num_input_channels])
self.image_input = tf.placeholder(
tf.float32,
shape=[None, self.image_size, self.image_size, self.num_input_channels])
else:
if self.grad_input_tensor == None:
self.grad_input = tf.get_variable(name='grad_input', initializer=tf.zeros_initializer,
shape=[self.train_batch_size, self.image_size, self.image_size, self.num_input_channels], dtype=tf.float32)
self.image_input = tf.get_variable(name='image_input', initializer=tf.zeros_initializer,
shape=[self.train_batch_size, self.image_size, self.image_size, self.num_input_channels], dtype=tf.float32)
else:
self.grad_input = self.grad_input_tensor
self.image_input = self.image_input_tensor
self.grad_ratio = tf.placeholder(tf.float32, shape=())
if self.normalize_zero_mean:
final_input_mean = tf.reduce_mean(self.grad_input, axis=[1,2,3])
for i in range(3):
final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
final_input_mean = tf.tile(final_input_mean, [1, self.image_size, self.image_size,self.num_input_channels])
zero_mean_final_input = self.grad_input - final_input_mean
self.grad_input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
else:
self.grad_input_standardized = tf.math.l2_normalize(self.grad_input, axis=[1,2,3])
self.output = self.grad_input_standardized * self.grad_ratio + self.image_input * (1 - self.grad_ratio)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains a model, saving checkpoints and tensorboard summaries along
the way."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import shutil
from timeit import default_timer as timer
import tensorflow as tf
import numpy as np
import sys
from model_jarn import Model, JarnConvDiscriminatorModel, InputGradEncoderModel, GradImageMixer
import cifar10_input
import pdb
from tqdm import tqdm
import subprocess
import time
from numba import cuda
import config
def get_path_dir(data_dir, dataset, **_):
if dataset == "cifar10":
path = "../../data/cifar10/cifar-10-batches-py/"
else:
path = os.path.join(data_dir, dataset)
if os.path.islink(path):
path = os.readlink(path)
return path
def train(tf_seed, np_seed, train_steps, out_steps, summary_steps, checkpoint_steps, step_size_schedule,
weight_decay, momentum, train_batch_size, epsilon, replay_m, model_dir, model_suffix, dataset,
beta, gamma, disc_update_steps, adv_update_steps_per_iter, disc_layers, disc_base_channels, steps_before_adv_opt, adv_encoder_type, enc_output_activation,
sep_opt_version, grad_image_ratio, final_grad_image_ratio, num_grad_image_ratios, normalize_zero_mean, eval_adv_attack, same_optimizer, only_fully_connected, img_random_pert, **kwargs):
tf.set_random_seed(tf_seed)
np.random.seed(np_seed)
model_dir = model_dir + 'JARN_%s_b%d_beta_%.3f_gamma_%.3f_disc_update_steps%d_l%dbc%d' % (dataset, train_batch_size, beta, gamma, disc_update_steps, disc_layers, disc_base_channels)
if img_random_pert:
model_dir = model_dir + '_imgpert'
if steps_before_adv_opt != 0:
model_dir = model_dir + '_advdelay%d' % (steps_before_adv_opt)
if adv_encoder_type != 'simple':
model_dir = model_dir + '_%senc' % (adv_encoder_type)
if enc_output_activation != None:
model_dir = model_dir + '_%sencact' % (enc_output_activation)
if grad_image_ratio != 1:
model_dir = model_dir + '_gradmixratio%.2f' % (grad_image_ratio)
if normalize_zero_mean:
model_dir = model_dir + '_zeromeaninput'
if train_steps != 80000:
model_dir = model_dir + '_%dsteps' % (train_steps)
if same_optimizer == False:
model_dir = model_dir + '_adamDopt'
if only_fully_connected:
model_dir = model_dir + '_FCdisc'
if tf_seed != 451760341:
model_dir = model_dir + '_tf_seed%d' % (tf_seed)
if np_seed != 216105420:
model_dir = model_dir + '_np_seed%d' % (np_seed)
model_dir = model_dir + model_suffix
# Setting up the data and the model
data_path = get_path_dir(dataset=dataset, **kwargs)
raw_data = cifar10_input.CIFAR10Data(data_path)
global_step = tf.train.get_or_create_global_step()
increment_global_step_op = tf.assign(global_step, global_step+1)
model = Model(mode='train', dataset=dataset, train_batch_size=train_batch_size, normalize_zero_mean=normalize_zero_mean)
# Setting up the optimizers
boundaries = [int(sss[0]) for sss in step_size_schedule][1:]
values = [sss[1] for sss in step_size_schedule]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values)
c_optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
e_optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
if same_optimizer:
d_optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
else:
print("Using ADAM opt for DISC model")
d_optimizer = tf.train.AdamOptimizer(learning_rate = 0.001)
# Using target softmax for input grad
input_grad = tf.gradients(model.target_softmax, model.x_input, name="gradients_ig")[0]
# Setting up the gradimagemixer model
grad_image_mixer = GradImageMixer(train_batch_size=train_batch_size, grad_input_tensor=input_grad, image_input_tensor=model.input_standardized, normalize_zero_mean=normalize_zero_mean)
# Setting up the discriminator model
encoder_model = InputGradEncoderModel(mode='train', train_batch_size=train_batch_size, encoder_type=adv_encoder_type, output_activation=enc_output_activation, x_modelgrad_input_tensor=grad_image_mixer.output, normalize_zero_mean=normalize_zero_mean)
transformed_input_grad = encoder_model.x_modelgrad_transformed
labels_input_grad = tf.zeros( tf.shape(input_grad)[0] , dtype=tf.int64)
labels_disc_image_input = tf.ones( tf.shape(input_grad)[0] , dtype=tf.int64)
disc_model = JarnConvDiscriminatorModel(mode='train', dataset=dataset, train_batch_size=train_batch_size, num_conv_layers=disc_layers, base_num_channels=disc_base_channels, normalize_zero_mean=normalize_zero_mean,
x_modelgrad_input_tensor=transformed_input_grad, y_modelgrad_input_tensor=labels_input_grad, x_image_input_tensor=model.input_standardized, y_image_input_tensor=labels_disc_image_input, only_fully_connected=only_fully_connected)
t_vars = tf.trainable_variables()
C_vars = [var for var in t_vars if 'classifier' in var.name]
D_vars = [var for var in t_vars if 'discriminator' in var.name]
E_vars = [var for var in t_vars if 'encoder' in var.name]
# Classifier: Optimizing computation
# total classifier loss: Add discriminator loss into total classifier loss
total_loss = model.mean_xent + weight_decay * model.weight_decay_loss - beta * disc_model.mean_xent
classification_c_loss = model.mean_xent + weight_decay * model.weight_decay_loss
adv_c_loss = - beta * disc_model.mean_xent
# Discriminator: Optimizating computation
# discriminator loss
total_d_loss = disc_model.mean_xent + weight_decay * disc_model.weight_decay_loss
# Train classifier
# classifier opt step
# AUTHOR added the next two lines to fix batch norm
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
final_grads = c_optimizer.compute_gradients(total_loss, var_list=C_vars)
no_pert_grad = [(tf.zeros_like(v), v) if 'perturbation' in v.name else (g, v) for g, v in final_grads]
c_min_step = c_optimizer.apply_gradients(no_pert_grad)
classification_final_grads = c_optimizer.compute_gradients(classification_c_loss, var_list=C_vars)
classification_no_pert_grad = [(tf.zeros_like(v), v) if 'perturbation' in v.name else (g, v) for g, v in classification_final_grads]
c_classification_min_step = c_optimizer.apply_gradients(classification_no_pert_grad)
# Encoder: Optimizating computation
# encoder loss
total_e_loss = weight_decay * encoder_model.weight_decay_loss - gamma * disc_model.mean_xent
e_min_step = e_optimizer.minimize(total_e_loss, var_list=E_vars)
# discriminator opt step
d_min_step = d_optimizer.minimize(total_d_loss, var_list=D_vars)
# Loss gradients to the model params
logit_weights = tf.get_default_graph().get_tensor_by_name('classifier/logit/DW:0')
last_conv_weights = tf.get_default_graph().get_tensor_by_name('classifier/unit_3_4/sub2/conv2/DW:0')
first_conv_weights = tf.get_default_graph().get_tensor_by_name('classifier/input/init_conv/DW:0')
# Setting up the Tensorboard and checkpoint outputs
if not os.path.exists(model_dir):
os.makedirs(model_dir)
saver = tf.train.Saver(max_to_keep=1)
tf.summary.scalar('C accuracy', model.accuracy)
tf.summary.scalar('D accuracy', disc_model.accuracy)
tf.summary.scalar('C xent', model.xent / train_batch_size)
tf.summary.scalar('D xent', disc_model.xent / train_batch_size)
tf.summary.scalar('total C loss', total_loss / train_batch_size)
tf.summary.scalar('total D loss', total_d_loss / train_batch_size)
tf.summary.scalar('adv C loss', adv_c_loss / train_batch_size)
tf.summary.scalar('C cls xent loss', model.mean_xent)
merged_summaries = tf.summary.merge_all()
with tf.Session() as sess:
print('params >>> \n model dir: %s \n dataset: %s \n training batch size: %d \n' % (model_dir, dataset, train_batch_size))
data = cifar10_input.AugmentedCIFAR10Data(raw_data, sess, model)
# Initialize the summary writer, global variables, and our time counter.
summary_writer = tf.summary.FileWriter(model_dir + '/train', sess.graph)
eval_summary_writer = tf.summary.FileWriter(model_dir + '/eval')
sess.run(tf.global_variables_initializer())
# Main training loop
for ii in tqdm(range(train_steps)):
x_batch, y_batch = data.train_data.get_next_batch(train_batch_size, multiple_passes=True)
if img_random_pert:
x_batch = x_batch + np.random.uniform(-epsilon, epsilon, x_batch.shape)
x_batch = np.clip(x_batch, 0, 255) # ensure valid pixel range
labels_image_disc = np.ones_like( y_batch, dtype=np.int64)
nat_dict = {model.x_input: x_batch, model.y_input: y_batch,
disc_model.x_image_input: x_batch, disc_model.y_image_input: labels_image_disc, grad_image_mixer.grad_ratio: grad_image_ratio}
# Output to stdout
if ii % summary_steps == 0:
train_acc, train_disc_acc, train_c_loss, train_e_loss, train_d_loss, train_adv_c_loss, summary = sess.run([model.accuracy, disc_model.accuracy, total_loss, total_e_loss, total_d_loss, adv_c_loss, merged_summaries], feed_dict=nat_dict)
summary_writer.add_summary(summary, global_step.eval(sess))
x_eval_batch, y_eval_batch = data.eval_data.get_next_batch(train_batch_size, multiple_passes=True)
if img_random_pert:
x_eval_batch = x_eval_batch + np.random.uniform(-epsilon, epsilon, x_eval_batch.shape)
x_eval_batch = np.clip(x_eval_batch, 0, 255) # ensure valid pixel range
labels_image_disc = np.ones_like( y_eval_batch, dtype=np.int64)
eval_dict = {model.x_input: x_eval_batch, model.y_input: y_eval_batch,
disc_model.x_image_input: x_eval_batch, disc_model.y_image_input: labels_image_disc, grad_image_mixer.grad_ratio: grad_image_ratio}
val_acc, val_disc_acc, val_c_loss, val_e_loss, val_d_loss, val_adv_c_loss, summary = sess.run([model.accuracy, disc_model.accuracy, total_loss, total_e_loss, total_d_loss, adv_c_loss, merged_summaries], feed_dict=eval_dict)
eval_summary_writer.add_summary(summary, global_step.eval(sess))
print('Step {}: ({})'.format(ii, datetime.now()))
print(' training nat accuracy {:.4}% -- validation nat accuracy {:.4}%'.format(train_acc * 100,
val_acc * 100))
print(' training nat disc accuracy {:.4}% -- validation nat disc accuracy {:.4}%'.format(train_disc_acc * 100,
val_disc_acc * 100))
print(' training nat c loss: {}, e loss: {}, d loss: {}, adv c loss: {}'.format( train_c_loss, train_e_loss, train_d_loss, train_adv_c_loss))
print(' validation nat c loss: {}, e loss: {}, d loss: {}, adv c loss: {}'.format( val_c_loss, val_e_loss, val_d_loss, val_adv_c_loss))
sys.stdout.flush()
# Tensorboard summaries
elif ii % out_steps == 0:
nat_acc, nat_disc_acc, nat_c_loss, nat_e_loss, nat_d_loss, nat_adv_c_loss = sess.run([model.accuracy, disc_model.accuracy, total_loss, total_e_loss, total_d_loss, adv_c_loss], feed_dict=nat_dict)
print('Step {}: ({})'.format(ii, datetime.now()))
print(' training nat accuracy {:.4}%'.format(nat_acc * 100))
print(' training nat disc accuracy {:.4}%'.format(nat_disc_acc * 100))
print(' training nat c loss: {}, e loss: {}, d loss: {}, adv c loss: {}'.format( nat_c_loss, nat_e_loss, nat_d_loss, nat_adv_c_loss))
# Write a checkpoint
if (ii+1) % checkpoint_steps == 0:
saver.save(sess, os.path.join(model_dir, 'checkpoint'), global_step=global_step)
if ii >= steps_before_adv_opt:
# Actual training step for Classifier
sess.run([c_min_step, e_min_step], feed_dict=nat_dict)
sess.run(increment_global_step_op)
if ii % disc_update_steps == 0:
# Actual training step for Discriminator
sess.run(d_min_step, feed_dict=nat_dict)
else:
# only train on classification loss
sess.run(c_classification_min_step, feed_dict=nat_dict)
sess.run(increment_global_step_op)
# full test evaluation
raw_data = cifar10_input.CIFAR10Data(data_path)
data_size = raw_data.eval_data.n
eval_steps = data_size // train_batch_size
total_num_correct = 0
for ii in tqdm(range(eval_steps)):
x_eval_batch, y_eval_batch = raw_data.eval_data.get_next_batch(train_batch_size, multiple_passes=False)
eval_dict = {model.x_input: x_eval_batch, model.y_input: y_eval_batch}
num_correct = sess.run(model.num_correct, feed_dict=eval_dict)
total_num_correct += num_correct
eval_acc = total_num_correct / data_size
clean_eval_file_path = os.path.join(model_dir, 'full_clean_eval_acc.txt')
with open(clean_eval_file_path, "a+") as f:
f.write("Full clean eval_acc: {}%".format(eval_acc*100))
print("Full clean eval_acc: {}%".format(eval_acc*100))
devices = sess.list_devices()
for d in devices:
print("sess' device names:")
print(d.name)
return model_dir
if __name__ == '__main__':
args = config.get_args()
args_dict = vars(args)
model_dir = train(**args_dict)
if args_dict['eval_adv_attack']:
cuda.select_device(0)
cuda.close()
print("{}: Evaluating on CIFAR10 fgsm and pgd attacks".format(datetime.now()))
subprocess.run("python pgd_attack.py --attack_name fgsm --save_eval_log --num_steps 1 --no-random_start --step_size 8 --model_dir {} ; python run_attack.py --attack_name fgsm --save_eval_log --model_dir {} ; python pgd_attack.py --save_eval_log --model_dir {} ; python run_attack.py --save_eval_log --model_dir {} ; python pgd_attack.py --attack_name pgds5 --save_eval_log --num_steps 5 --model_dir {} ; python run_attack.py --attack_name pgds5 --save_eval_log --num_steps 5 --model_dir {}".format(model_dir, model_dir, model_dir, model_dir, model_dir, model_dir), shell=True)
print("{}: Ended evaluation on CIFAR10 fgsm and pgd attacks".format(datetime.now()))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of attack methods. Running this file as a program will
apply the attack to the model specified by the config file and store
the examples in an .npy file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import tensorflow as tf
import numpy as np
import cifar10_input
import config_attack
class LinfPGDAttack:
def __init__(self, model, epsilon, num_steps, step_size, random_start, loss_func, dataset='cifar10',
clip_max=255.0):
"""Attack parameter initialization. The attack performs k steps of
size a, while always staying within epsilon from the initial
point."""
self.model = model
self.epsilon = epsilon
self.num_steps = num_steps
self.step_size = step_size
self.rand = random_start
self.clip_max = clip_max
self.loss = model.loss
self.logits = model.pre_softmax
self.grad = tf.gradients(self.loss, model.x_input)[0]
def perturb(self, x_nat, y, sess):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_infinity norm."""
if self.rand:
x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
x = np.clip(x, 0, self.clip_max) # ensure valid pixel range
else:
x = np.copy(x_nat)
for i in range(self.num_steps):
loss, logits, grad = sess.run((self.loss, self.logits, self.grad), feed_dict={self.model.x_input: x,
self.model.y_input: y})
# added by AUTHOR
if np.all(logits.argmax(-1) != y):
break
print(i, loss, logits)
# x = np.add(x, self.step_size * np.sign(grad), out=x, casting='unsafe')
# changed by AUTHOR
grad = np.sign(grad)
#grad = grad / (grad.reshape(len(grad), -1)**2).sum(-1)
x = x + self.step_size * grad
x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
x = np.clip(x, 0, self.clip_max) # ensure valid pixel range
return x
def perturb_l2(self, x_nat, y, sess):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_2 norm."""
if self.rand:
pert = np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
pert_norm = np.linalg.norm(pert)
pert = pert / max(1, pert_norm)
else:
pert = np.zeros(x_nat.shape)
for i in range(self.num_steps):
x = x_nat + pert
grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
self.model.y_input: y})
normalized_grad = grad / np.linalg.norm(grad)
pert = np.add(pert, self.step_size * normalized_grad, out=pert, casting='unsafe')
# project pert to norm ball
pert_norm = np.linalg.norm(pert)
rescale_factor = pert_norm / self.epsilon
pert = pert / max(1, rescale_factor)
x = x_nat + pert
x = np.clip(x, 0, 255)
return x
if __name__ == '__main__':
import json
import sys
import math
config = vars(config_attack.get_args())
tf.set_random_seed(config['tf_seed'])
np.random.seed(config['np_seed'])
model_file = tf.train.latest_checkpoint(config['model_dir'])
print("config['model_dir']: ", config['model_dir'])
if model_file is None:
print('No model found')
sys.exit()
print("JARN MODEL")
from model_jarn import Model
if "_zeromeaninput" in config['model_dir']:
model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True)
else:
model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'])
attack = LinfPGDAttack(model,
config['epsilon'],
config['num_steps'],
config['step_size'],
config['random_start'],
config['loss_func'],
dataset=config['dataset'])
saver = tf.train.Saver()
data_path = config['data_path']
print("load cifar10 dataset")
cifar = cifar10_input.CIFAR10Data(data_path)
with tf.Session() as sess:
# Restore the checkpoint
saver.restore(sess, model_file)
# Iterate over the samples batch-by-batch
num_eval_examples = config['num_eval_examples']
eval_batch_size = config['eval_batch_size']
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
x_adv = [] # adv accumulator
print('Iterating over {} batches'.format(num_batches))
for ibatch in range(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
print('batch size: {}'.format(bend - bstart))
x_batch = cifar.eval_data.xs[bstart:bend, :]
y_batch = cifar.eval_data.ys[bstart:bend]
x_batch_adv = attack.perturb(x_batch, y_batch, sess)
x_adv.append(x_batch_adv)
print('Storing examples')
path = config['store_adv_path']
if path == None:
model_name = config['model_dir'].split('/')[1]
if config['attack_name'] == None:
path = "attacks/{}_attack.npy".format(model_name)
else:
path = "attacks/{}_{}_attack.npy".format(model_name, config['attack_name'])
if not os.path.exists("attacks/"):
os.makedirs("attacks/")
x_adv = np.concatenate(x_adv, axis=0)
np.save(path, x_adv)
print('Examples stored in {}'.format(path))
if config['save_eval_log']:
if not os.path.exists("attack_log/"):
os.makedirs("attack_log/")
date_str = datetime.now().strftime("%d_%b")
log_dir = "attack_log/" + date_str
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_filename = path.split("/")[-1].replace('.npy', '.txt')
log_file_path = os.path.join(log_dir, log_filename)
with open(log_file_path, "w") as f:
f.write('Saved model name: {} \n'.format(model_file))
print('Model name saved at ', log_file_path)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates a model against examples from a .npy file as specified
in attack_config.json"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import json
import math
import os
import sys
import time
import tensorflow as tf
import numpy as np
from tqdm import tqdm
# from model import Model
import cifar10_input
# import cifar100_input
import config_attack
# with open('attack_config.json') as config_file:
# config = json.load(config_file)
config = vars(config_attack.get_args())
# if config['model_dir'] in ["models/adv_trained", "models/naturally_trained"]:
# from free_model_original import Model
# elif 'DefPert2' in config['model_dir']:
# from model_jarn import ModelDefPert as Model
# elif 'JARN':
# from model_jarn import Model
# else:
# from free_model import Model
data_path = config['data_path']
def run_attack(checkpoint, x_adv, epsilon):
# cifar = cifar10_input.CIFAR10Data(data_path)
cifar = cifar10_input.CIFAR10Data(data_path)
# if config['dataset'] == 'cifar10':
# cifar = cifar10_input.CIFAR10Data(data_path)
# else:
# cifar = cifar100_input.CIFAR100Data(data_path)
print("JARN MODEL")
from model_jarn import Model
if "_zeromeaninput" in config['model_dir']:
model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True)
else:
model = Model(dataset=config['dataset'], train_batch_size=config['eval_batch_size'])
saver = tf.train.Saver()
num_eval_examples = 10000
eval_batch_size = 100
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
total_corr = 0
x_nat = cifar.eval_data.xs
l_inf = np.amax(np.abs(x_nat - x_adv))
if l_inf > epsilon + 0.0001:
print('maximum perturbation found: {}'.format(l_inf))
print('maximum perturbation allowed: {}'.format(epsilon))
return
y_pred = [] # label accumulator
with tf.Session() as sess:
# Restore the checkpoint
saver.restore(sess, checkpoint)
# if 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
# sess.run(tf.global_variables_initializer())
# source_model_file = tf.train.latest_checkpoint("models/model_AdvTrain-jrtsource-JRT-tinyimagenet_b16")
# source_model_saver.restore(sess, source_model_file)
# finetuned_source_model_file = tf.train.latest_checkpoint(config['model_dir'])
# finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
# elif 'mnist_adv_trained_finetuned_on_cifar10_bwtransform' in config['model_dir']:
# sess.run(tf.global_variables_initializer())
# source_model_file = tf.train.latest_checkpoint("models/mnist_adv_trained")
# source_model_saver.restore(sess, source_model_file)
# finetuned_source_model_file = tf.train.latest_checkpoint(config['model_dir'])
# finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
# elif 'finetuned_on_cifar100' in config['model_dir']:
# sess.run(tf.global_variables_initializer())
# source_model_file = tf.train.latest_checkpoint("models/adv_trained")
# source_model_saver.restore(sess, source_model_file)
# finetuned_source_model_file = tf.train.latest_checkpoint(config['model_dir'])
# finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
# # sess.run(tf.global_variables_initializer())
# # source_model_file = tf.train.latest_checkpoint("models/adv_trained")
# # source_model_saver.restore(sess, source_model_file)
# # finetuned_source_model_file = tf.train.latest_checkpoint("models/adv_trained_finetuned_on_cifar100_b32_20ep")
# # finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
# else:
# saver.restore(sess, checkpoint)
# Iterate over the samples batch-by-batch
for ibatch in range(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
x_batch = x_adv[bstart:bend, :]
y_batch = cifar.eval_data.ys[bstart:bend]
dict_adv = {model.x_input: x_batch,
model.y_input: y_batch}
cur_corr, y_pred_batch = sess.run([model.num_correct, model.predictions],
feed_dict=dict_adv)
# if 'finetuned_on_cifar10' in config['model_dir'] or 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
# cur_corr, y_pred_batch = sess.run([model.target_task_num_correct, model.target_task_predictions],
# feed_dict=dict_adv)
# else:
# cur_corr, y_pred_batch = sess.run([model.num_correct, model.predictions],
# feed_dict=dict_adv)
total_corr += cur_corr
y_pred.append(y_pred_batch)
accuracy = total_corr / num_eval_examples
print('Adv Accuracy: {:.2f}%'.format(100.0 * accuracy))
y_pred = np.concatenate(y_pred, axis=0)
store_adv_pred_path = "preds/" + adv_examples_path.split("/")[-1]
if not os.path.exists("preds/"):
os.makedirs("preds/")
np.save(store_adv_pred_path, y_pred)
print('Output saved at ', store_adv_pred_path)
if config['save_eval_log']:
date_str = datetime.now().strftime("%d_%b")
log_dir = "attack_log/" + date_str
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_filename = adv_examples_path.split("/")[-1].replace('.npy', '.txt')
model_name = config['model_dir'].split('/')[1]
# if model_name not in log_filename or config['xfer_attack']:
# print("Transfer Attack!")
# if config['custom_output_model_name'] is not None:
# new_log_filename = config['custom_output_model_name'] +"-xferattacked_by-"+ log_filename
# else:
# new_log_filename = model_name +"-xferattacked_by-"+ log_filename
# log_filename = new_log_filename
log_file_path = os.path.join(log_dir, log_filename)
with open(log_file_path, "w") as f:
f.write('Model checkpoint: {} \n'.format(checkpoint))
f.write('Adv Accuracy: {:.2f}%'.format(100.0 * accuracy))
print('Results saved at ', log_file_path)
# full test evaluation
# raw_data = cifar10_input.CIFAR10Data(data_path)
if config['dataset'] == 'cifar10':
raw_data = cifar10_input.CIFAR10Data(data_path)
else:
raw_data = cifar100_input.CIFAR100Data(data_path)
data_size = raw_data.eval_data.n
if data_size % config['eval_batch_size'] == 0:
eval_steps = data_size // config['eval_batch_size']
else:
eval_steps = data_size // config['eval_batch_size'] + 1
total_num_correct = 0
for ii in tqdm(range(eval_steps)):
x_eval_batch, y_eval_batch = raw_data.eval_data.get_next_batch(config['eval_batch_size'], multiple_passes=False)
eval_dict = {model.x_input: x_eval_batch, model.y_input: y_eval_batch}
if 'finetuned_on_cifar10' in config['model_dir'] or 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
num_correct = sess.run(model.target_task_num_correct, feed_dict=eval_dict)
else:
num_correct = sess.run(model.num_correct, feed_dict=eval_dict)
total_num_correct += num_correct
eval_acc = total_num_correct / data_size
with open(log_file_path, "a+") as f:
f.write('\nClean Accuracy: {:.2f}%'.format(100.0 * eval_acc))
print('Clean Accuracy: {:.2f}%'.format(100.0 * eval_acc))
print('Results saved at ', log_file_path)
if __name__ == '__main__':
import json
# with open('attack_config.json') as config_file:
# config = json.load(config_file)
model_dir = config['model_dir']
checkpoint = tf.train.latest_checkpoint(model_dir)
adv_examples_path = config['store_adv_path']
if adv_examples_path == None:
model_name = config['model_dir'].split('/')[1]
if config['attack_name'] == None:
adv_examples_path = "attacks/{}_attack.npy".format(model_name)
# if config['dataset'] == 'cifar10':
# adv_examples_path = "attacks/{}_attack.npy".format(model_name)
# else:
# adv_examples_path = "attacks/{}_c100attack.npy".format(model_name)
else:
adv_examples_path = "attacks/{}_{}_attack.npy".format(model_name, config['attack_name'])
# if config['dataset'] == 'cifar10':
# adv_examples_path = "attacks/{}_{}_attack.npy".format(model_name, config['attack_name'])
# else:
# adv_examples_path = "attacks/{}_{}_c100attack.npy".format(model_name, config['attack_name'])
# if config['attack_norm'] == '2':
# adv_examples_path = adv_examples_path.replace("attack.npy", "l2attack.npy")
x_adv = np.load(adv_examples_path)
tf.set_random_seed(config['tf_seed'])
np.random.seed(config['np_seed'])
if checkpoint is None:
print('No checkpoint found')
elif x_adv.shape != (10000, 32, 32, 3):
print('Invalid shape: expected (10000, 32, 32, 3), found {}'.format(x_adv.shape))
elif np.amax(x_adv) > 255.0001 or np.amin(x_adv) < -0.0001:
print('Invalid pixel range. Expected [0, 255], found [{}, {}]'.format(
np.amin(x_adv),
np.amax(x_adv)))
else:
print("adv_examples_path: ", adv_examples_path)
run_attack(checkpoint, x_adv, config['epsilon'])
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import torch
class ModelAdapter:
def __init__(self, logits, x, y, sess, num_classes=10, device="cuda"):
self.logits = logits
self.sess = sess
self.x_input = x
self.y_input = y
self.num_classes = num_classes
self.device = device
# gradients of logits
if num_classes <= 10:
self.grads = [None] * num_classes
for cl in range(num_classes):
self.grads[cl] = tf.gradients(self.logits[:, cl], self.x_input)[0]
# cross-entropy loss
self.xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.y_input)
self.grad_xent = tf.gradients(self.xent, self.x_input)[0]
# dlr loss
self.dlr = dlr_loss(self.logits, self.y_input, num_classes=self.num_classes)
self.grad_dlr = tf.gradients(self.dlr, self.x_input)[0]
# targeted dlr loss
self.y_target = tf.placeholder(tf.int64, shape=[None])
self.dlr_target = dlr_loss_targeted(self.logits, self.y_input, self.y_target, num_classes=self.num_classes)
self.grad_target = tf.gradients(self.dlr_target, self.x_input)[0]
self.la = tf.placeholder(tf.int64, shape=[None])
self.la_target = tf.placeholder(tf.int64, shape=[None])
la_mask = tf.one_hot(self.la, self.num_classes)
la_target_mask = tf.one_hot(self.la_target, self.num_classes)
la_logit = tf.reduce_sum(la_mask * self.logits, axis=1)
la_target_logit = tf.reduce_sum(la_target_mask * self.logits, axis=1)
self.diff_logits = la_target_logit - la_logit
self.grad_diff_logits = tf.gradients(self.diff_logits, self.x_input)[0]
def predict(self, x):
x = x.detach()
x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
y = self.sess.run(self.logits, {self.x_input: x2})
return torch.from_numpy(y).to(self.device)
def grad_logits(self, x):
x = x.detach()
x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
logits, g2 = self.sess.run([self.logits, self.grads], {self.x_input: x2})
g2 = np.moveaxis(np.array(g2), 0, 1)
g2 = np.transpose(g2, (0, 1, 4, 2, 3))
return torch.from_numpy(logits).cuda(), torch.from_numpy(g2).cuda()
def get_grad_diff_logits_target(self, x, y=None, y_target=None):
x = x.detach()
la = y.cpu().numpy()
la_target = y_target.cpu().numpy()
x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
dl, g2 = self.sess.run([self.diff_logits, self.grad_diff_logits], {self.x_input: x2, self.la: la, self.la_target: la_target})
g2 = np.transpose(np.array(g2), (0, 3, 1, 2))
return torch.from_numpy(dl).to(self.device), torch.from_numpy(g2).to(self.device)
def get_logits_loss_grad_xent(self, x, y):
x = x.detach()
x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
y2 = y.clone().cpu().numpy()
logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.xent, self.grad_xent], {self.x_input: x2, self.y_input: y2})
grad_val = np.moveaxis(grad_val, 3, 1)
return torch.from_numpy(logits_val).to(self.device), torch.from_numpy(loss_indiv_val).to(self.device), torch.from_numpy(grad_val).to(self.device)
def get_logits_loss_grad_dlr(self, x, y):
x = x.detach()
x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
y2 = y.clone().cpu().numpy()
logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.dlr, self.grad_dlr], {self.x_input: x2, self.y_input: y2})
grad_val = np.moveaxis(grad_val, 3, 1)
return torch.from_numpy(logits_val).to(self.device), torch.from_numpy(loss_indiv_val).to(self.device), torch.from_numpy(grad_val).to(self.device)
def get_logits_loss_grad_target(self, x, y, y_target):
x = x.detach()
x2 = np.moveaxis(x.cpu().numpy(), 1, 3)
y2 = y.clone().cpu().numpy()
y_targ = y_target.clone().cpu().numpy()
logits_val, loss_indiv_val, grad_val = self.sess.run([self.logits, self.dlr_target, self.grad_target], {self.x_input: x2, self.y_input: y2, self.y_target: y_targ})
grad_val = np.moveaxis(grad_val, 3, 1)
return torch.from_numpy(logits_val).to(self.device), torch.from_numpy(loss_indiv_val).to(self.device), torch.from_numpy(grad_val).to(self.device)
def dlr_loss(x, y, num_classes=10):
x_sort = tf.contrib.framework.sort(x, axis=1)
y_onehot = tf.one_hot(y, num_classes)
if num_classes > 2:
### TODO: adapt to the case when the point is already misclassified
loss = -(x_sort[:, -1] - x_sort[:, -2]) / (x_sort[:, -1] - x_sort[:, -3] + 1e-12)
else:
loss = (tf.reduce_max(x - y_onehot * 1e9) - tf.gather(x, y, axis=-1)) / tf.reduce_max(x - y_onehot * 1e9)
return loss
def dlr_loss_targeted(x, y, y_target, num_classes=10):
x_sort = tf.contrib.framework.sort(x, axis=1)
y_onehot = tf.one_hot(y, num_classes)
y_target_onehot = tf.one_hot(y_target, num_classes)
loss = -(tf.reduce_sum(x * y_onehot, axis=1) - tf.reduce_sum(x * y_target_onehot, axis=1)) / (x_sort[:, -1] - .5 * x_sort[:, -3] - .5 * x_sort[:, -4] + 1e-12)
return loss |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Modified according to main method in pgd_attack.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from functools import partial
import torch
import utils
from argparse_utils import DecisionBoundaryBinarizationSettings
from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from active_tests import decision_boundary_binarization as dbb
import tensorflow as tf
import numpy as np
import sys
import math
import cifar10_input
import config_attack
from pgd_attack import LinfPGDAttack
class BinarizedModel:
def __init__(self, model, logit_diff_loss=False):
self.model = model
self.x_input = model.x_input
self.y_input = model.y_input
features = model.neck
with tf.variable_scope("binarized_readout"):
# build linear readout
bro_w = tf.get_variable(
'DW', [features.shape[-1], 2],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
bro_b = tf.get_variable('biases', [2],
initializer=tf.constant_initializer())
self.bro_w_pl = tf.placeholder(tf.float32, shape=[features.shape[-1], 2])
self.bro_b_pl = tf.placeholder(tf.float32, shape=[2])
self.bro_w_set_weight = bro_w.assign(self.bro_w_pl)
self.bro_b_set_weight = bro_b.assign(self.bro_b_pl)
self.pre_softmax = tf.nn.xw_plus_b(features, bro_w, bro_b)
if logit_diff_loss:
yh = tf.one_hot(self.y_input, 2)
self.loss = tf.reduce_max(self.pre_softmax - yh * 1e9) - tf.gather(
self.pre_softmax, self.y_input, axis=-1)
else:
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.loss = tf.reduce_sum(self.y_xent, name='y_xent')
def run_attack(m, l, sess, logits, x_pl, bro_w_pl, bro_b_pl, bro_w_assign,
bro_b_assign, attack):
linear_layer = m[-1]
del m
sess.run(bro_w_assign, {bro_w_pl: linear_layer.weight.data.numpy().T})
sess.run(bro_b_assign, {bro_b_pl: linear_layer.bias.data.numpy()})
for x, y in l:
x, y = x.numpy(), y.numpy()
x = x.transpose((0, 2, 3, 1))
x_adv = attack(x, y)
clean_logits = sess.run(logits, {x_pl: x})
adv_logits = sess.run(logits, {x_pl: x_adv})
is_adv = adv_logits.argmax(-1) != y
print(is_adv, clean_logits, adv_logits)
return is_adv, (torch.tensor(x_adv.transpose((0, 3, 1, 2))),
torch.tensor(adv_logits))
def main():
config = vars(config_attack.get_args())
tf.set_random_seed(config['tf_seed'])
np.random.seed(config['np_seed'])
model_file = tf.train.latest_checkpoint(config['model_dir'])
print("config['model_dir']: ", config['model_dir'])
if model_file is None:
print('No model found')
sys.exit()
print("JARN MODEL")
from model_jarn import Model
if "_zeromeaninput" in config['model_dir']:
model = Model(dataset=config['dataset'],
train_batch_size=config['eval_batch_size'],
normalize_zero_mean=True,
zero_one=True,
# added by AUTHOR
mode='eval')
else:
model = Model(dataset=config['dataset'],
train_batch_size=config['eval_batch_size'],
zero_one=True,
# added by AUTHOR
mode='eval')
print("model eval mode:", model.mode)
sess = tf.Session()
saver = tf.train.Saver()
# Restore the checkpoint
saver.restore(sess, model_file)
binarized_model = BinarizedModel(model,
logit_diff_loss=config['attack'] == 'pgd-ld')
print("Using attack:", config['attack'])
if config['attack'] == 'pgd' or config['attack'] == 'pgd-ld':
attack = LinfPGDAttack(binarized_model,
config['epsilon'] / 255.0,
config['num_steps'],
config['step_size'] / 255.0,
config['random_start'],
config['loss_func'],
dataset=config['dataset'],
clip_max=1.0)
attack_fn = lambda x, y: attack.perturb(x, y, sess)
elif config['attack'] == 'apgd':
from autoattack import autopgd_base
from autoattack_adapter import ModelAdapter
autoattack_model = ModelAdapter(
binarized_model.pre_softmax, binarized_model.x_input,
binarized_model.y_input, sess, num_classes=2, device="cpu")
attack = autopgd_base.APGDAttack(
autoattack_model, n_restarts=5, n_iter=100, verbose=True,
eps=config["epsilon"] / 255.0, norm="Linf", eot_iter=1, rho=.99,
is_tf_model=True, device="cpu", loss='dlr')
attack_fn = lambda x, y: attack.perturb(
torch.tensor(x.transpose((0, 3, 1, 2)), device="cpu"),
torch.tensor(y, device="cpu")
).detach().cpu().numpy().transpose((0, 2, 3, 1))
else:
raise ValueError("invalid attack")
data_path = config['data_path']
print("load cifar10 dataset")
cifar = cifar10_input.CIFAR10Data(data_path)
# Iterate over the samples batch-by-batch
num_eval_examples = config['num_eval_examples']
eval_batch_size = config['eval_batch_size']
x_data = cifar.eval_data.xs[:num_eval_examples]
y_data = cifar.eval_data.ys[:num_eval_examples]
x_data = x_data.transpose((0, 3, 1, 2)) / 255.0
assert x_data.max() <= 1 and x_data.min() >= 0, (x_data.min(), x_data.max())
test_loader = utils.build_dataloader_from_arrays(x_data, y_data,
eval_batch_size)
def feature_extractor_forward_pass(x, features_and_logits: bool = False,
features_only: bool = False):
if features_and_logits:
assert not features_only, "Only one of the flags must be set."
if features_and_logits:
return sess.run(
(model.neck, model.pre_softmax),
feed_dict={model.x_input: x.transpose(0, 2, 3, 1)})
elif features_only:
return sess.run(
model.neck,
feed_dict={model.x_input: x.transpose(0, 2, 3, 1)})
else:
return sess.run(
model.pre_softmax,
feed_dict={model.x_input: x.transpose(0, 2, 3, 1)})
feature_extractor = TensorFlow1ToPyTorchWrapper(
logit_forward_pass=feature_extractor_forward_pass,
logit_forward_and_backward_pass=None,
)
attack_fn_partial = partial(
run_attack,
sess=sess, logits=binarized_model.pre_softmax,
x_pl=model.x_input,
bro_w_pl=binarized_model.bro_w_pl, bro_b_pl=binarized_model.bro_b_pl,
bro_w_assign=binarized_model.bro_w_set_weight,
bro_b_assign=binarized_model.bro_b_set_weight,
attack=attack_fn)
scores_logit_differences_and_validation_accuracies = \
dbb.interior_boundary_discrimination_attack(
feature_extractor,
test_loader,
# m, l, sess, logits, x_pl, is_train, bro_w_pl, bro_b_pl,
# bro_w_assign, bro_b_assign, attack_fn)
attack_fn=lambda m, l, kwargs: attack_fn_partial(m, l),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=config["epsilon"] / 255.0,
norm="linf",
lr=10000,
n_boundary_points=config["n_boundary_points"],
n_inner_points=config["n_inner_points"],
adversarial_attack_settings=None,
optimizer="sklearn"
),
n_samples=config["num_eval_examples"],
device="cpu",
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
#rescale_logits="adaptive",
sample_training_data_from_corners=config["sample_from_corners"],
#decision_boundary_closeness=0.9999,
fail_on_exception=False
# args.num_samples_test * 10
)
print(dbb.format_result(scores_logit_differences_and_validation_accuracies,
config["num_eval_examples"]))
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Adversarial Interpolation '''
from it_utils import cos_dist
import copy
import pickle
import torch
from torch.autograd.gradcheck import zero_gradients
from torch.autograd import Variable
def adv_interp(inputs,
y,
base_net,
num_classes,
epsilon=8,
epsilon_y=0.5,
v_min=0,
v_max=255):
# x: image batch with shape [batch_size, c, h, w]
# y: one-hot label batch with shape [batch_size, num_classes]
net = copy.deepcopy(base_net)
x = inputs.clone()
inv_index = torch.arange(x.size(0) - 1, -1, -1).long()
x_prime = x[inv_index, :, :, :].detach()
y_prime = y[inv_index, :]
x_init = x.detach() + torch.zeros_like(x).uniform_(-epsilon, epsilon)
x_init.requires_grad_()
zero_gradients(x_init)
if x_init.grad is not None:
x_init.grad.data.fill_(0)
net.eval()
fea_b = net(x_init, mode='feature')
fea_t = net(x_prime, mode='feature')
loss_adv = cos_dist(fea_b, fea_t)
net.zero_grad()
loss_adv = loss_adv.mean()
loss_adv.backward(retain_graph=True)
x_tilde = x_init.data - epsilon * torch.sign(x_init.grad.data)
x_tilde = torch.min(torch.max(x_tilde, inputs - epsilon), inputs + epsilon)
x_tilde = torch.clamp(x_tilde, v_min, v_max)
y_bar_prime = (1 - y_prime) / (num_classes - 1.0)
y_tilde = (1 - epsilon_y) * y + epsilon_y * y_bar_prime
return x_tilde.detach(), y_tilde.detach() |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Utility functions, PGD attacks and Loss functions
'''
import math
import numpy as np
import random
import scipy.io
import copy
from torch.autograd import Variable
from attacks import autopgd
from attacks import pgd
from networks import *
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class Attack_AutoPGD(nn.Module):
# Back-propogate
def __init__(self, basic_net, config, attack_net=None):
super(Attack_AutoPGD, self).__init__()
self.basic_net = basic_net
self.attack_net = attack_net
self.epsilon = config['epsilon']
self.n_restarts = 0 if "num_restarts" not in config else \
config["num_restarts"]
self.num_steps = config['num_steps']
self.loss_func = "ce" if 'loss_func' not in config.keys(
) else config['loss_func']
self.train_flag = True if 'train' not in config.keys(
) else config['train']
self.box_type = 'white' if 'box_type' not in config.keys(
) else config['box_type']
self.targeted = False if 'targeted' not in config.keys(
) else config['targeted']
self.n_classes = 10 if 'n_classes' not in config.keys(
) else config['n_classes']
def forward(self,
inputs,
targets,
attack=True,
targeted_label=-1,
batch_idx=0):
assert targeted_label == -1
def net(x):
output = self.basic_net(x)
if isinstance(output, tuple):
return output[0]
else:
return output
if attack:
temp = autopgd.auto_pgd(
model=net,
x=inputs, y=targets, n_steps=self.num_steps,
loss=self.loss_func,
epsilon=self.epsilon,
norm="linf",
n_restarts=self.n_restarts,
targeted=self.targeted,
n_averaging_steps=1,
n_classes=self.n_classes
)
x_adv = temp[0]
else:
x_adv = inputs
logits_pert = net(x_adv)
targets_prob = torch.softmax(logits_pert, -1)
return logits_pert, targets_prob.detach(), x_adv.detach()
class Attack_PGD(nn.Module):
def __init__(self, basic_net, config):
super(Attack_PGD, self).__init__()
self.basic_net = basic_net
self.train_flag = True if 'train' not in config.keys(
) else config['train']
self.attack = True if 'attack' not in config.keys(
) else config['attack']
if self.attack:
self.rand = config['random_start']
self.step_size = config['step_size']
self.v_min = config['v_min']
self.v_max = config['v_max']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
self.loss_func = torch.nn.CrossEntropyLoss(
reduction='none') if 'loss_func' not in config.keys(
) else config['loss_func']
# print(config)
def forward(self, inputs, targets):
if not self.attack:
if self.train_flag:
self.basic_net.train()
else:
self.basic_net.eval()
outputs = self.basic_net(inputs, mode="logits")
return outputs, None
#aux_net = pickle.loads(pickle.dumps(self.basic_net))
aux_net = copy.deepcopy(self.basic_net)
aux_net.eval()
logits_pred_nat = aux_net(inputs, mode="logits")
targets_prob = F.softmax(logits_pred_nat.float(), dim=1)
num_classes = targets_prob.size(1)
outputs = aux_net(inputs, mode="logits")
targets_prob = F.softmax(outputs.float(), dim=1)
y_tensor_adv = targets
x = inputs.detach()
if self.rand:
x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
x_org = x.detach()
loss_array = np.zeros((inputs.size(0), self.num_steps))
for i in range(self.num_steps):
x.requires_grad_()
if x.grad is not None and x.grad.data is not None:
x.grad.data.zero_()
if x.grad is not None:
x.grad.data.fill_(0)
aux_net.eval()
logits = aux_net(x, mode="logits")
loss = self.loss_func(logits, y_tensor_adv)
loss = loss.mean()
aux_net.zero_grad()
loss.backward()
x_adv = x.data + self.step_size * torch.sign(x.grad.data)
x_adv = torch.min(torch.max(x_adv, inputs - self.epsilon),
inputs + self.epsilon)
x_adv = torch.clamp(x_adv, self.v_min, self.v_max)
x = Variable(x_adv)
if self.train_flag:
self.basic_net.train()
else:
self.basic_net.eval()
logits_pert = self.basic_net(x.detach(), mode="logits")
return logits_pert, targets_prob.detach(), x.detach()
class Attack_BetterPGD(nn.Module):
def __init__(self, basic_net, config):
super(Attack_BetterPGD, self).__init__()
self.basic_net = basic_net
self.train_flag = True if 'train' not in config.keys(
) else config['train']
self.attack = True if 'attack' not in config.keys(
) else config['attack']
if self.attack:
self.rand = config['random_start']
self.step_size = config['step_size']
self.v_min = config['v_min']
self.v_max = config['v_max']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
self.loss_func = torch.nn.CrossEntropyLoss(
reduction='none') if 'loss_func' not in config.keys(
) else config['loss_func']
# print(config)
def forward(self, inputs, targets):
if not self.attack:
if self.train_flag:
self.basic_net.train()
else:
self.basic_net.eval()
outputs = self.basic_net(inputs, mode="logits")
return outputs, None
#aux_net = pickle.loads(pickle.dumps(self.basic_net))
aux_net = copy.deepcopy(self.basic_net)
def net(x):
return aux_net(x, mode="logits")
aux_net.eval()
outputs = aux_net(inputs, mode="logits")
targets_prob = F.softmax(outputs.float(), dim=1)
sign = 1.0
x_adv = pgd.general_pgd(
loss_fn=lambda x, y: sign * self.loss_func(net(x), y),
is_adversarial_fn=lambda x, y: net(x).argmax(-1) == y
if targets != -1 else net(x).argmax(-1) != y,
x=inputs, y=targets, n_steps=self.num_steps,
step_size=self.step_size,
epsilon=self.epsilon,
norm="linf",
random_start=self.rand
)[0]
if self.train_flag:
self.basic_net.train()
else:
self.basic_net.eval()
logits_pert = self.basic_net(x_adv.detach(), mode="logits")
return logits_pert, targets_prob.detach(), x_adv.detach()
class softCrossEntropy(nn.Module):
def __init__(self, reduce=True):
super(softCrossEntropy, self).__init__()
self.reduce = reduce
return
def forward(self, inputs, target):
"""
:param inputs: predictions
:param target: target labels in vector form
:return: loss
"""
log_likelihood = -F.log_softmax(inputs, dim=1)
sample_num, class_num = target.shape
if self.reduce:
loss = torch.sum(torch.mul(log_likelihood, target)) / sample_num
else:
loss = torch.sum(torch.mul(log_likelihood, target), 1)
return loss
class CWLoss(nn.Module):
def __init__(self, num_classes, margin=50, reduce=True):
super(CWLoss, self).__init__()
self.num_classes = num_classes
self.margin = margin
self.reduce = reduce
return
def forward(self, logits, targets):
"""
:param inputs: predictions
:param targets: target labels
:return: loss
"""
onehot_targets = one_hot_tensor(targets, self.num_classes,
targets.device)
self_loss = torch.sum(onehot_targets * logits, dim=1)
other_loss = torch.max(
(1 - onehot_targets) * logits - onehot_targets * 1000, dim=1)[0]
loss = -torch.sum(torch.clamp(self_loss - other_loss + self.margin, 0))
if self.reduce:
sample_num = onehot_targets.shape[0]
loss = loss / sample_num
return loss
def cos_dist(x, y):
cos = nn.CosineSimilarity(dim=1, eps=1e-6)
batch_size = x.size(0)
c = torch.clamp(1 - cos(x.view(batch_size, -1), y.view(batch_size, -1)),
min=0)
return c.mean()
def one_hot_tensor(y_batch_tensor, num_classes, device):
y_tensor = torch.cuda.FloatTensor(y_batch_tensor.size(0),
num_classes).fill_(0)
y_tensor[np.arange(len(y_batch_tensor)), y_batch_tensor] = 1.0
return y_tensor
def get_acc(outputs, targets):
_, predicted = outputs.max(1)
total = targets.size(0)
correct = predicted.eq(targets).sum().item()
acc = 1.0 * correct / total
return acc
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1") |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Advsersarial Interpolation Training'''
from __future__ import print_function
import time
import numpy as np
import random
import copy
import os
import argparse
import datetime
import pickle
import it_utils
from it_utils import softCrossEntropy
from it_utils import one_hot_tensor
from adv_interp import adv_interp
from tqdm import tqdm
from PIL import Image
from networks import *
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from torch.autograd.gradcheck import zero_gradients
from torch.autograd import Variable
parser = argparse.ArgumentParser(
description='Advsersarial Interpolation Training')
parser.register('type', 'bool', it_utils.str2bool)
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--label_adv_delta',
default=0.5,
type=float,
help='label_adv_delta')
parser.add_argument('--resume',
'-r',
action='store_true',
help='resume from checkpoint')
parser.add_argument('--model_dir', type=str, help='model path')
parser.add_argument('--init_model_pass',
default='-1',
type=str,
help='init model pass')
parser.add_argument('--save_epochs', default=10, type=int, help='save period')
parser.add_argument('--max_epoch', default=200, type=int, help='save period')
parser.add_argument('--decay_epoch1', default=60, type=int, help='save period')
parser.add_argument('--decay_epoch2', default=90, type=int, help='save period')
parser.add_argument('--decay_rate',
default=0.1,
type=float,
help='save period')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight_decay',
default=2e-4,
type=float,
help='weight decay factor')
parser.add_argument('--log_step', default=10, type=int, help='log_step')
parser.add_argument('--num_classes', default=10, type=int, help='num classes')
parser.add_argument('--image_size', default=32, type=int, help='image size')
parser.add_argument('--batch_size_train',
default=128,
type=int,
help='batch size for training')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
start_epoch = 1
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
trainset = torchvision.datasets.CIFAR10(root='./data',
train=True,
download=True,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size_train,
shuffle=True,
num_workers=2)
print('======= WideResenet 28-10 ========')
net = WideResNet(depth=28, num_classes=args.num_classes, widen_factor=10)
net = net.to(device)
config_adv_interp = {
'v_min': -1.0,
'v_max': 1.0,
'epsilon': 8.0 / 255 * 2,
'num_steps': 1,
'step_size': 8.0 / 255 * 2,
'label_adv_delta': args.label_adv_delta,
}
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
optimizer = optim.SGD(net.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.resume and args.init_model_pass != '-1':
print('Resume training from checkpoint..')
f_path_latest = os.path.join(args.model_dir, 'latest')
f_path = os.path.join(args.model_dir,
('checkpoint-%s' % args.init_model_pass))
if not os.path.isdir(args.model_dir):
print('train from scratch: no checkpoint directory or file found')
elif args.init_model_pass == 'latest' and os.path.isfile(f_path_latest):
checkpoint = torch.load(f_path_latest)
pretrained_dict = checkpoint['net']
model_dict = net.state_dict()
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict, strict=False)
#optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
print('resuming training from epoch %s in latest' % start_epoch)
elif os.path.isfile(f_path):
checkpoint = torch.load(f_path)
net.load_state_dict(checkpoint['net'])
#optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
print('resuming training from epoch %s' % (start_epoch - 1))
elif not os.path.isfile(f_path) or not os.path.isfile(f_path_latest):
print('train from scratch: no checkpoint directory or file found')
soft_xent_loss = softCrossEntropy()
def train_one_epoch(epoch, net):
print('\n Training for Epoch: %d' % epoch)
net.train()
# learning rate schedule
if epoch < args.decay_epoch1:
lr = args.lr
elif epoch < args.decay_epoch2:
lr = args.lr * args.decay_rate
else:
lr = args.lr * args.decay_rate * args.decay_rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
iterator = tqdm(trainloader, ncols=0, leave=False)
for batch_idx, (inputs, targets) in enumerate(iterator):
start_time = time.time()
inputs, targets = inputs.to(device), targets.to(device)
targets_onehot = one_hot_tensor(targets, args.num_classes, device)
x_tilde, y_tilde = adv_interp(inputs, targets_onehot, net,
args.num_classes,
config_adv_interp['epsilon'],
config_adv_interp['label_adv_delta'],
config_adv_interp['v_min'],
config_adv_interp['v_max'])
outputs = net(x_tilde, mode='logits')
loss = soft_xent_loss(outputs, y_tilde)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = loss.detach().item()
duration = time.time() - start_time
if batch_idx % args.log_step == 0:
adv_acc = it_utils.get_acc(outputs, targets)
# natural
net_cp = copy.deepcopy(net)
nat_outputs = net_cp(inputs, mode='logits')
nat_acc = it_utils.get_acc(nat_outputs, targets)
print(
"Epoch %d, Step %d, lr %.4f, Duration %.2f, Training nat acc %.2f, Training adv acc %.2f, Training adv loss %.4f"
% (epoch, batch_idx, lr, duration, 100 * nat_acc,
100 * adv_acc, train_loss))
if epoch % args.save_epochs == 0 or epoch >= args.max_epoch - 2:
print('Saving..')
f_path = os.path.join(args.model_dir, ('checkpoint-%s' % epoch))
state = {
'net': net.state_dict(),
'epoch': epoch,
#'optimizer': optimizer.state_dict()
}
if not os.path.isdir(args.model_dir):
os.makedirs(args.model_dir)
torch.save(state, f_path)
if epoch >= 1:
print('Saving latest model for epoch %s..' % (epoch))
f_path = os.path.join(args.model_dir, 'latest')
state = {
'net': net.state_dict(),
'epoch': epoch,
#'optimizer': optimizer.state_dict()
}
if not os.path.isdir(args.model_dir):
os.mkdir(args.model_dir)
torch.save(state, f_path)
for epoch in range(start_epoch, args.max_epoch + 1):
train_one_epoch(epoch, net)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import inspect
import os
import sys
import time
import warnings
import numpy as np
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from tqdm import tqdm
import active_tests.decision_boundary_binarization
import it_utils
from it_utils import Attack_PGD, Attack_AutoPGD
from it_utils import CWLoss
from models import *
warnings.simplefilter('once', RuntimeWarning)
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
grandarentdir = os.path.dirname(os.path.dirname(currentdir))
sys.path.insert(0, grandarentdir)
parser = argparse.ArgumentParser(
description='Adversarial Interpolation Training')
parser.register('type', 'bool', it_utils.str2bool)
parser.add_argument('--resume',
'-r',
action='store_true',
help='resume from checkpoint')
parser.add_argument('--attack', default=True, type='bool', help='attack')
parser.add_argument('--model_dir', type=str, help='model path')
parser.add_argument('--init_model_pass',
default='-1',
type=str,
help='init model pass')
parser.add_argument('--attack_method',
default='pgd',
type=str,
help='adv_mode (natural, pdg or cw)')
parser.add_argument('--attack_method_list', type=str)
parser.add_argument('--log_step', default=7, type=int, help='log_step')
parser.add_argument('--num_classes', default=10, type=int, help='num classes')
parser.add_argument('--batch_size_test',
default=100,
type=int,
help='batch size for testing')
parser.add_argument('--image_size', default=32, type=int, help='image size')
parser.add_argument('--binarization-test', action="store_true")
parser.add_argument('--model-path', type=str, help='model path', default=None)
parser.add_argument('--num_samples_test',
default=-1,
type=int)
parser.add_argument('--n-inner-points',
default=50,
type=int)
parser.add_argument('--n-boundary-points',
default=10,
type=int)
parser.add_argument("--epsilon", default=8, type=int)
parser.add_argument("--more-steps", action="store_true")
parser.add_argument("--more-more-steps", action="store_true")
parser.add_argument("--sample-from-corners", action="store_true")
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
transform_test = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
testset = torchvision.datasets.CIFAR10(root='./data',
train=False,
download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset,
batch_size=args.batch_size_test,
shuffle=False,
num_workers=2)
print('======= WideResenet 28-10 ========')
basic_net = WideResNet(depth=28, num_classes=args.num_classes, widen_factor=10)
basic_net = basic_net.to(device)
class ZeroOneOneOneNetwork(nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x, *args, **kwargs):
return self.model((x - 0.5) / 0.5, *args, **kwargs)
if device == 'cuda':
basic_net = torch.nn.DataParallel(basic_net)
cudnn.benchmark = True
if args.binarization_test:
args.num_classes = 2
if args.num_samples_test == -1:
num_samples_test = len(testset)
# load parameters
if args.resume and args.init_model_pass != '-1':
# Load checkpoint.
print('==> Resuming from saved checkpoint..')
if args.model_dir is not None:
f_path_latest = os.path.join(args.model_dir, 'latest')
f_path = os.path.join(args.model_dir,
('checkpoint-%s' % args.init_model_pass))
elif args.model_path is not None:
f_path = args.model_path
f_path_latest = args.model_path
if not os.path.isfile(f_path):
print('train from scratch: no checkpoint directory or file found')
elif args.init_model_pass == 'latest' and os.path.isfile(f_path_latest):
checkpoint = torch.load(f_path_latest)
basic_net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
print('resuming from epoch %s in latest' % start_epoch)
elif os.path.isfile(f_path):
checkpoint = torch.load(f_path)
basic_net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
print('resuming from epoch %s' % start_epoch)
elif not os.path.isfile(f_path) or not os.path.isfile(f_path_latest):
print('train from scratch: no checkpoint directory or file found')
# configs
config_natural = {'train': False, 'attack': False}
config_fgsm = {
'train': False,
'v_min': -1.0,
'v_max': 1.0,
'epsilon': args.epsilon / 255.0,
'num_steps': 1,
'step_size': args.epsilon / 255.0,
'random_start': True
}
config_pgd = {
'train': False,
'v_min': -1.0,
'v_max': 1.0,
'epsilon': args.epsilon / 255.0,
'num_steps': 20,
'step_size': args.epsilon / 4.0 / 255.0,
'random_start': True,
'loss_func': torch.nn.CrossEntropyLoss(reduction='none')
}
config_cw = {
'train': False,
'v_min': -1.0,
'v_max': 1.0,
'epsilon': args.epsilon / 255,
'num_steps': 20 * 10,
'step_size': args.epsilon / 4.0 / 255 / 5,
'random_start': True,
'loss_func': CWLoss(args.num_classes)
}
config_auto_pgd_ce = {
'train': False,
'targeted': False,
'epsilon': args.epsilon / 255.0,
'num_steps': 20,
'loss_func': "ce"
}
config_auto_pgd_dlr = {
'train': False,
'targeted': False,
'epsilon': args.epsilon / 255.0,
'num_steps': 20,
'loss_func': "logit-diff"
}
config_auto_pgd_dlr_t = {
**config_auto_pgd_dlr,
"targeted": True,
"n_classes": 10,
}
config_auto_pgd_ce_plus = {
**config_auto_pgd_ce,
"n_restarts": 4
}
config_auto_pgd_dlr_plus = {
**config_auto_pgd_dlr,
"n_restarts": 4
}
if not args.binarization_test:
config_fgsm["epsilon"] *= 2.0
config_pgd["epsilon"] *= 2.0
config_cw["epsilon"] *= 2.0
config_fgsm["step_size"] *= 2.0
config_pgd["step_size"] *= 2.0
config_cw["step_size"] *= 2.0
else:
config_auto_pgd_dlr_t["n_classes"] = 2
if args.more_steps:
config_pgd["step_size"] /= 5.0
config_cw["step_size"] /= 5.0
config_pgd["num_steps"] *= 10
config_cw["num_steps"] *= 10
config_auto_pgd_ce["num_steps"] *= 10
config_auto_pgd_dlr["num_steps"] *= 10
print("More & finer steps")
if args.more_more_steps:
config_pgd["step_size"] /= 5.0
config_cw["step_size"] /= 5.0
config_pgd["num_steps"] *= 20
config_cw["num_steps"] *= 20
config_auto_pgd_ce["num_steps"] *= 20
config_auto_pgd_dlr["num_steps"] *= 20
print("More & finer steps")
def test_test(net, feature_extractor, config):
from argparse_utils import DecisionBoundaryBinarizationSettings
class DummyModel(nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x, mode=None):
del mode
return self.model(x)
scores_logit_differences_and_validation_accuracies = active_tests.decision_boundary_binarization.interior_boundary_discrimination_attack(
feature_extractor,
testloader,
attack_fn=lambda m, l, kwargs: test_main(0, create_attack(DummyModel(m)), l, verbose=False,
inverse_acc=True, return_advs=True),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=config["epsilon"],
norm="linf",
lr=10000,
n_boundary_points=args.n_boundary_points,
n_inner_points=args.n_inner_points,
adversarial_attack_settings=None,
optimizer="sklearn"
),
n_samples=args.num_samples_test,
device=device,
n_samples_evaluation=200,#args.num_samples_test * 10,
n_samples_asr_evaluation=200,
batch_size=args.num_samples_test * 5,
rescale_logits="adaptive",
decision_boundary_closeness=0.999,
sample_training_data_from_corners=args.sample_from_corners
)
print(active_tests.decision_boundary_binarization.format_result(
scores_logit_differences_and_validation_accuracies,
args.num_samples_test))
def test_main(epoch, net, loader, verbose=False, inverse_acc=False,
return_advs=False):
net.eval()
test_loss = 0.0
correct = 0.0
total = 0.0
if verbose:
iterator = tqdm(loader, ncols=0, leave=False)
else:
iterator = loader
x_adv = []
logits_adv = []
for batch_idx, (inputs, targets) in enumerate(iterator):
start_time = time.time()
inputs, targets = inputs.to(device), targets.to(device)
pert_inputs = inputs.detach()
res = net(pert_inputs, targets)
if isinstance(res, tuple):
outputs, _, x_adv_it = res
else:
outputs = res
if return_advs:
x_adv.append(x_adv_it)
else:
del x_adv_it
logits_adv.append(outputs.detach().cpu())
duration = time.time() - start_time
_, predicted = outputs.max(1)
batch_size = targets.size(0)
total += batch_size
correct_num = predicted.eq(targets).sum().item()
correct += correct_num
if verbose:
iterator.set_description(
str(predicted.eq(targets).sum().item() / targets.size(0)))
if batch_idx % args.log_step == 0:
print(
"Step %d, Duration %.2f, Current-batch-acc %.2f, Avg-acc %.2f"
% (batch_idx, duration, 100. * correct_num / batch_size,
100. * correct / total))
if return_advs:
x_adv = torch.cat(x_adv, 0)
logits_adv = torch.cat(logits_adv, 0)
acc = 100. * correct / total
if verbose:
print('Test accuracy: %.2f' % (acc))
if inverse_acc:
acc = (100 - acc) / 100.0
return acc, (x_adv, logits_adv)
attack_list = args.attack_method_list.split('-')
attack_num = len(attack_list)
print(f"Epsilon: {args.epsilon}")
for attack_idx in range(attack_num):
args.attack_method = attack_list[attack_idx]
if args.attack_method == 'natural':
print('======Evaluation using Natural Images======')
create_attack = lambda n: Attack_PGD(n, config_natural)
elif args.attack_method.upper() == 'FGSM':
print('======Evaluation under FGSM Attack======')
create_attack = lambda n: Attack_PGD(n, config_fgsm)
elif args.attack_method.upper() == 'PGD':
print('======Evaluation under PGD Attack======')
create_attack = lambda n: Attack_PGD(n, config_pgd)
elif args.attack_method.upper() == 'CW':
print('======Evaluation under CW Attack======')
create_attack = lambda n: Attack_PGD(n, config_cw)
elif args.attack_method.upper() == 'AUTOPGDCE':
print()
print('-----Auto PGD (CE) adv mode -----')
create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_ce)
elif args.attack_method.upper() == 'AUTOPGDDLR':
print()
print('-----Auto PGD (DLR) adv mode -----')
create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr)
elif args.attack_method.upper() == 'AUTOPGDDLRT':
print()
print('-----Auto PGD (DLR, targeted) adv mode -----')
create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr_t)
elif args.attack_method.upper() == 'AUTOPGDCE+':
print()
print('-----Auto PGD+ (CE) adv mode -----')
create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_ce_plus)
elif args.attack_method.upper() == 'AUTOPGDDLR+':
print()
print('-----Auto PGD+ (DLR) adv mode -----')
create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr_plus)
else:
raise Exception(
'Should be a valid attack method. The specified attack method is: {}'
.format(args.attack_method))
if args.binarization_test:
specific_net = ZeroOneOneOneNetwork(basic_net)
net = create_attack(specific_net)
test_test(net, basic_net, config_pgd)
else:
net = create_attack(basic_net)
test_main(0, net, testloader, verbose=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.