python_code
stringlengths 0
91.3k
|
---|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/logger.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("OPENAI_LOGDIR")
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/train_util.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
import copy
import functools
import os
import blobfile as bf
import torch as th
import torch.distributed as dist
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import AdamW
from . import dist_util, logger
from .fp16_util import MixedPrecisionTrainer
from .nn import update_ema
from .resample import LossAwareSampler, UniformSampler
# For ImageNet experiments, this was a good default value.
# We found that the lg_loss_scale quickly climbed to
# 20-21 within the first ~1K steps of training.
INITIAL_LOG_LOSS_SCALE = 20.0
class TrainLoop:
def __init__(
self,
*,
model,
diffusion,
data,
batch_size,
microbatch,
lr,
ema_rate,
log_interval,
save_interval,
resume_checkpoint,
use_fp16=False,
fp16_scale_growth=1e-3,
schedule_sampler=None,
weight_decay=0.0,
lr_anneal_steps=0,
):
self.model = model
self.diffusion = diffusion
self.data = data
self.batch_size = batch_size
self.microbatch = microbatch if microbatch > 0 else batch_size
self.lr = lr
self.ema_rate = (
[ema_rate]
if isinstance(ema_rate, float)
else [float(x) for x in ema_rate.split(",")]
)
self.log_interval = log_interval
self.save_interval = save_interval
self.resume_checkpoint = resume_checkpoint
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
self.weight_decay = weight_decay
self.lr_anneal_steps = lr_anneal_steps
self.step = 0
self.resume_step = 0
self.global_batch = self.batch_size * dist.get_world_size()
self.sync_cuda = th.cuda.is_available()
self._load_and_sync_parameters()
self.mp_trainer = MixedPrecisionTrainer(
model=self.model,
use_fp16=self.use_fp16,
fp16_scale_growth=fp16_scale_growth,
)
self.opt = AdamW(
self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
)
if self.resume_step:
self._load_optimizer_state()
# Model was resumed, either due to a restart or a checkpoint
# being specified at the command line.
self.ema_params = [
self._load_ema_parameters(rate) for rate in self.ema_rate
]
else:
self.ema_params = [
copy.deepcopy(self.mp_trainer.master_params)
for _ in range(len(self.ema_rate))
]
if th.cuda.is_available():
self.use_ddp = True
self.ddp_model = DDP(
self.model,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=False,
bucket_cap_mb=128,
find_unused_parameters=False,
)
else:
if dist.get_world_size() > 1:
logger.warn(
"Distributed training requires CUDA. "
"Gradients will not be synchronized properly!"
)
self.use_ddp = False
self.ddp_model = self.model
def _load_and_sync_parameters(self):
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
if dist.get_rank() == 0:
logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
self.model.load_state_dict(
dist_util.load_state_dict(
resume_checkpoint, map_location=dist_util.dev()
)
)
dist_util.sync_params(self.model.parameters())
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.mp_trainer.master_params)
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
if ema_checkpoint:
if dist.get_rank() == 0:
logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
state_dict = dist_util.load_state_dict(
ema_checkpoint, map_location=dist_util.dev()
)
ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
dist_util.sync_params(ema_params)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
opt_checkpoint = bf.join(
bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
)
if bf.exists(opt_checkpoint):
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
state_dict = dist_util.load_state_dict(
opt_checkpoint, map_location=dist_util.dev()
)
self.opt.load_state_dict(state_dict)
def run_loop(self):
while (
not self.lr_anneal_steps
or self.step + self.resume_step < self.lr_anneal_steps
):
batch, cond = next(self.data)
self.run_step(batch, cond)
if self.step % self.log_interval == 0:
logger.dumpkvs()
if self.step % self.save_interval == 0:
self.save()
# Run for a finite amount of time in integration tests.
if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
return
self.step += 1
# Save the last checkpoint if it wasn't already saved.
if (self.step - 1) % self.save_interval != 0:
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
took_step = self.mp_trainer.optimize(self.opt)
if took_step:
self._update_ema()
self._anneal_lr()
self.log_step()
def forward_backward(self, batch, cond):
self.mp_trainer.zero_grad()
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i : i + self.microbatch].to(dist_util.dev())
micro_cond = {
k: v[i : i + self.microbatch].to(dist_util.dev())
for k, v in cond.items()
}
last_batch = (i + self.microbatch) >= batch.shape[0]
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
compute_losses = functools.partial(
self.diffusion.training_losses,
self.ddp_model,
micro,
t,
model_kwargs=micro_cond,
)
if last_batch or not self.use_ddp:
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(
t, losses["loss"].detach()
)
loss = (losses["loss"] * weights).mean()
log_loss_dict(
self.diffusion, t, {k: v * weights for k, v in losses.items()}
)
self.mp_trainer.backward(loss)
def _update_ema(self):
for rate, params in zip(self.ema_rate, self.ema_params):
update_ema(params, self.mp_trainer.master_params, rate=rate)
def _anneal_lr(self):
if not self.lr_anneal_steps:
return
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
lr = self.lr * (1 - frac_done)
for param_group in self.opt.param_groups:
param_group["lr"] = lr
def log_step(self):
logger.logkv("step", self.step + self.resume_step)
logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
def save(self):
def save_checkpoint(rate, params):
state_dict = self.mp_trainer.master_params_to_state_dict(params)
if dist.get_rank() == 0:
logger.log(f"saving model {rate}...")
if not rate:
filename = f"model{(self.step+self.resume_step):06d}.pt"
else:
filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt"
with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
th.save(state_dict, f)
save_checkpoint(0, self.mp_trainer.master_params)
for rate, params in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
if dist.get_rank() == 0:
with bf.BlobFile(
bf.join(get_blob_logdir(), f"opt{(self.step+self.resume_step):06d}.pt"),
"wb",
) as f:
th.save(self.opt.state_dict(), f)
dist.barrier()
def parse_resume_step_from_filename(filename):
"""
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
checkpoint's number of steps.
"""
split = filename.split("model")
if len(split) < 2:
return 0
split1 = split[-1].split(".")[0]
try:
return int(split1)
except ValueError:
return 0
def get_blob_logdir():
# You can change this to be a separate path to save checkpoints to
# a blobstore or some external drive.
return logger.get_dir()
def find_resume_checkpoint():
# On your infrastructure, you may want to override this to automatically
# discover the latest checkpoint on your blob storage, etc.
return None
def find_ema_checkpoint(main_checkpoint, step, rate):
if main_checkpoint is None:
return None
filename = f"ema_{rate}_{(step):06d}.pt"
path = bf.join(bf.dirname(main_checkpoint), filename)
if bf.exists(path):
return path
return None
def log_loss_dict(diffusion, ts, losses):
for key, values in losses.items():
logger.logkv_mean(key, values.mean().item())
# Log the quantiles (four quartiles, in particular).
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
quartile = int(4 * sub_t / diffusion.num_timesteps)
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/losses.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
Helpers for various likelihood-based losses. These are ported from the original
Ho et al. diffusion models codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
"""
import numpy as np
import torch as th
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ th.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/fp16_util.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
Helpers to train with 16-bit precision.
"""
import numpy as np
import torch as th
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from . import logger
INITIAL_LOG_LOSS_SCALE = 20.0
def convert_module_to_f16(l):
"""
Convert primitive modules to float16.
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
def convert_module_to_f32(l):
"""
Convert primitive modules to float32, undoing convert_module_to_f16().
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
if l.bias is not None:
l.bias.data = l.bias.data.float()
def make_master_params(param_groups_and_shapes):
"""
Copy model parameters into a (differently-shaped) list of full-precision
parameters.
"""
master_params = []
for param_group, shape in param_groups_and_shapes:
master_param = nn.Parameter(
_flatten_dense_tensors(
[param.detach().float() for (_, param) in param_group]
).view(shape)
)
master_param.requires_grad = True
master_params.append(master_param)
return master_params
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
"""
Copy the gradients from the model parameters into the master parameters
from make_master_params().
"""
for master_param, (param_group, shape) in zip(
master_params, param_groups_and_shapes
):
master_param.grad = _flatten_dense_tensors(
[param_grad_or_zeros(param) for (_, param) in param_group]
).view(shape)
def master_params_to_model_params(param_groups_and_shapes, master_params):
"""
Copy the master parameter data back into the model parameters.
"""
# Without copying to a list, if a generator is passed, this will
# silently not copy any parameters.
for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes):
for (_, param), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
param.detach().copy_(unflat_master_param)
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim <= 1],
(-1),
)
matrix_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim > 1],
(1, -1),
)
return [scalar_vector_named_params, matrix_named_params]
def master_params_to_state_dict(
model, param_groups_and_shapes, master_params, use_fp16
):
if use_fp16:
state_dict = model.state_dict()
for master_param, (param_group, _) in zip(
master_params, param_groups_and_shapes
):
for (name, _), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
assert name in state_dict
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for i, (name, _value) in enumerate(model.named_parameters()):
assert name in state_dict
state_dict[name] = master_params[i]
return state_dict
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [
(name, state_dict[name]) for name, _ in model.named_parameters()
]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for name, _ in model.named_parameters()]
return master_params
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
def zero_grad(model_params):
for param in model_params:
# Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def param_grad_or_zeros(param):
if param.grad is not None:
return param.grad.data.detach()
else:
return th.zeros_like(param)
class MixedPrecisionTrainer:
def __init__(
self,
*,
model,
use_fp16=False,
fp16_scale_growth=1e-3,
initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,
):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(
self.model.named_parameters()
)
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = 2 ** self.lg_loss_scale
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean("lg_loss_scale", self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale)
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
zero_master_grads(self.master_params)
return False
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
grad_norm, param_norm = self._compute_norms()
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2
if p.grad is not None:
grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2
return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(
self.model, self.param_groups_and_shapes, master_params, self.use_fp16
)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
def check_overflow(value):
return (value == float("inf")) or (value == -float("inf")) or (value != value)
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/gaussian_diffusion.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
import enum
import math
import numpy as np
import torch as th
from .nn import mean_flat
from .losses import normal_kl, discretized_gaussian_log_likelihood
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
:param loss_type: a LossType determining the loss function to use.
:param rescale_timesteps: if True, pass floating point timesteps into the
model so that they are always scaled like in the
original paper (0 to 1000).
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type,
rescale_timesteps=False,
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev)
* np.sqrt(alphas)
/ (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
)
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(
self.log_one_minus_alphas_cumprod, t, x_start.shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
* noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None
):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
if self.model_var_type == ModelVarType.LEARNED:
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x.shape
)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
pred_xstart = process_xstart(
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
)
model_mean = model_output
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x, t=t
)
else:
raise NotImplementedError(self.model_mean_type)
assert (
model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
)
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- _extract_into_tensor(
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
)
* x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return t.float() * (1000.0 / self.num_timesteps)
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
x, self._scale_timesteps(t), **model_kwargs
)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_next)
+ th.sqrt(1 - alpha_bar_next) * eps
)
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/dist_util.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
Helpers for distributed training.
"""
import io
import os
import socket
import blobfile as bf
from mpi4py import MPI
import torch as th
import torch.distributed as dist
# Change this to reflect your cluster layout.
# The GPU for a given rank is (rank % GPUS_PER_NODE).
GPUS_PER_NODE = 8
SETUP_RETRY_COUNT = 3
def setup_dist():
"""
Setup a distributed process group.
"""
if dist.is_initialized():
return
os.environ["CUDA_VISIBLE_DEVICES"] = f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
comm = MPI.COMM_WORLD
backend = "gloo" if not th.cuda.is_available() else "nccl"
if backend == "gloo":
hostname = "localhost"
else:
hostname = socket.gethostbyname(socket.getfqdn())
os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0)
os.environ["RANK"] = str(comm.rank)
os.environ["WORLD_SIZE"] = str(comm.size)
port = comm.bcast(_find_free_port(), root=0)
os.environ["MASTER_PORT"] = str(port)
dist.init_process_group(backend=backend, init_method="env://")
def dev():
"""
Get the device to use for torch.distributed.
"""
if th.cuda.is_available():
return th.device(f"cuda")
return th.device("cpu")
def load_state_dict(path, **kwargs):
"""
Load a PyTorch file without redundant fetches across MPI ranks.
"""
chunk_size = 2 ** 30 # MPI has a relatively small size limit
if MPI.COMM_WORLD.Get_rank() == 0:
with bf.BlobFile(path, "rb") as f:
data = f.read()
num_chunks = len(data) // chunk_size
if len(data) % chunk_size:
num_chunks += 1
MPI.COMM_WORLD.bcast(num_chunks)
for i in range(0, len(data), chunk_size):
MPI.COMM_WORLD.bcast(data[i : i + chunk_size])
else:
num_chunks = MPI.COMM_WORLD.bcast(None)
data = bytes()
for _ in range(num_chunks):
data += MPI.COMM_WORLD.bcast(None)
return th.load(io.BytesIO(data), **kwargs)
def sync_params(params):
"""
Synchronize a sequence of Tensors across ranks from rank 0.
"""
for p in params:
with th.no_grad():
dist.broadcast(p, 0)
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/yang-song/score_sde_pytorch/blob/main/sde_lib.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_SCORE_SDE).
# ---------------------------------------------------------------
"""Abstract SDE classes, Reverse SDE, and VE/VP SDEs."""
import abc
import torch
import numpy as np
class SDE(abc.ABC):
"""SDE abstract class. Functions are designed for a mini-batch of inputs."""
def __init__(self, N):
"""Construct an SDE.
Args:
N: number of discretization time steps.
"""
super().__init__()
self.N = N
@property
@abc.abstractmethod
def T(self):
"""End time of the SDE."""
pass
@abc.abstractmethod
def sde(self, x, t):
pass
@abc.abstractmethod
def marginal_prob(self, x, t):
"""Parameters to determine the marginal distribution of the SDE, $p_t(x)$."""
pass
@abc.abstractmethod
def prior_sampling(self, shape):
"""Generate one sample from the prior distribution, $p_T(x)$."""
pass
@abc.abstractmethod
def prior_logp(self, z):
"""Compute log-density of the prior distribution.
Useful for computing the log-likelihood via probability flow ODE.
Args:
z: latent code
Returns:
log probability density
"""
pass
def discretize(self, x, t):
"""Discretize the SDE in the form: x_{i+1} = x_i + f_i(x_i) + G_i z_i.
Useful for reverse diffusion sampling and probabiliy flow sampling.
Defaults to Euler-Maruyama discretization.
Args:
x: a torch tensor
t: a torch float representing the time step (from 0 to `self.T`)
Returns:
f, G
"""
dt = 1 / self.N
drift, diffusion = self.sde(x, t)
f = drift * dt
G = diffusion * torch.sqrt(torch.tensor(dt, device=t.device))
return f, G
def reverse(self, score_fn, probability_flow=False):
"""Create the reverse-time SDE/ODE.
Args:
score_fn: A time-dependent score-based model that takes x and t and returns the score.
probability_flow: If `True`, create the reverse-time ODE used for probability flow sampling.
"""
N = self.N
T = self.T
sde_fn = self.sde
discretize_fn = self.discretize
# Build the class for reverse-time SDE.
class RSDE(self.__class__):
def __init__(self):
self.N = N
self.probability_flow = probability_flow
@property
def T(self):
return T
def sde(self, x, t):
"""Create the drift and diffusion functions for the reverse SDE/ODE."""
drift, diffusion = sde_fn(x, t)
score = score_fn(x, t)
drift = drift - diffusion[:, None, None, None] ** 2 * score * (0.5 if self.probability_flow else 1.)
# Set the diffusion function to zero for ODEs.
diffusion = 0. if self.probability_flow else diffusion
return drift, diffusion
def discretize(self, x, t):
"""Create discretized iteration rules for the reverse diffusion sampler."""
f, G = discretize_fn(x, t)
rev_f = f - G[:, None, None, None] ** 2 * score_fn(x, t) * (0.5 if self.probability_flow else 1.)
rev_G = torch.zeros_like(G) if self.probability_flow else G
return rev_f, rev_G
return RSDE()
class VPSDE(SDE):
def __init__(self, beta_min=0.1, beta_max=20, N=1000):
"""Construct a Variance Preserving SDE.
Args:
beta_min: value of beta(0)
beta_max: value of beta(1)
N: number of discretization steps
"""
super().__init__(N)
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
self.alphas = 1. - self.discrete_betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
self.sqrt_1m_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
@property
def T(self):
return 1
def sde(self, x, t):
beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
drift = -0.5 * beta_t[:, None, None, None] * x
diffusion = torch.sqrt(beta_t)
return drift, diffusion
def marginal_prob(self, x, t):
log_mean_coeff = -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
mean = torch.exp(log_mean_coeff[:, None, None, None]) * x
std = torch.sqrt(1. - torch.exp(2. * log_mean_coeff))
return mean, std
def prior_sampling(self, shape):
return torch.randn(*shape)
def prior_logp(self, z):
shape = z.shape
N = np.prod(shape[1:])
logps = -N / 2. * np.log(2 * np.pi) - torch.sum(z ** 2, dim=(1, 2, 3)) / 2.
return logps
def discretize(self, x, t):
"""DDPM discretization."""
timestep = (t * (self.N - 1) / self.T).long()
beta = self.discrete_betas.to(x.device)[timestep]
alpha = self.alphas.to(x.device)[timestep]
sqrt_beta = torch.sqrt(beta)
f = torch.sqrt(alpha)[:, None, None, None] * x - x
G = sqrt_beta
return f, G
class subVPSDE(SDE):
def __init__(self, beta_min=0.1, beta_max=20, N=1000):
"""Construct the sub-VP SDE that excels at likelihoods.
Args:
beta_min: value of beta(0)
beta_max: value of beta(1)
N: number of discretization steps
"""
super().__init__(N)
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
@property
def T(self):
return 1
def sde(self, x, t):
beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
drift = -0.5 * beta_t[:, None, None, None] * x
discount = 1. - torch.exp(-2 * self.beta_0 * t - (self.beta_1 - self.beta_0) * t ** 2)
diffusion = torch.sqrt(beta_t * discount)
return drift, diffusion
def marginal_prob(self, x, t):
log_mean_coeff = -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
mean = torch.exp(log_mean_coeff)[:, None, None, None] * x
std = 1 - torch.exp(2. * log_mean_coeff)
return mean, std
def prior_sampling(self, shape):
return torch.randn(*shape)
def prior_logp(self, z):
shape = z.shape
N = np.prod(shape[1:])
return -N / 2. * np.log(2 * np.pi) - torch.sum(z ** 2, dim=(1, 2, 3)) / 2.
class VESDE(SDE):
def __init__(self, sigma_min=0.01, sigma_max=50, N=1000):
"""Construct a Variance Exploding SDE.
Args:
sigma_min: smallest sigma.
sigma_max: largest sigma.
N: number of discretization steps
"""
super().__init__(N)
self.sigma_min = sigma_min
self.sigma_max = sigma_max
self.discrete_sigmas = torch.exp(torch.linspace(np.log(self.sigma_min), np.log(self.sigma_max), N))
self.N = N
@property
def T(self):
return 1
def sde(self, x, t):
sigma = self.sigma_min * (self.sigma_max / self.sigma_min) ** t
drift = torch.zeros_like(x)
diffusion = sigma * torch.sqrt(torch.tensor(2 * (np.log(self.sigma_max) - np.log(self.sigma_min)),
device=t.device))
return drift, diffusion
def marginal_prob(self, x, t):
std = self.sigma_min * (self.sigma_max / self.sigma_min) ** t
mean = x
return mean, std
def prior_sampling(self, shape):
return torch.randn(*shape) * self.sigma_max
def prior_logp(self, z):
shape = z.shape
N = np.prod(shape[1:])
return -N / 2. * np.log(2 * np.pi * self.sigma_max ** 2) - torch.sum(z ** 2, dim=(1, 2, 3)) / (2 * self.sigma_max ** 2)
def discretize(self, x, t):
"""SMLD(NCSN) discretization."""
timestep = (t * (self.N - 1) / self.T).long()
sigma = self.discrete_sigmas.to(t.device)[timestep]
adjacent_sigma = torch.where(timestep == 0, torch.zeros_like(t),
self.discrete_sigmas[timestep - 1].to(t.device))
f = torch.zeros_like(x)
G = torch.sqrt(sigma ** 2 - adjacent_sigma ** 2)
return f, G |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
# pytype: skip-file
"""Various sampling methods."""
import functools
import torch
import numpy as np
import abc
from .models.utils import from_flattened_numpy, to_flattened_numpy, get_score_fn
from scipy import integrate
from . import sde_lib
from .models import utils as mutils
_CORRECTORS = {}
_PREDICTORS = {}
def register_predictor(cls=None, *, name=None):
"""A decorator for registering predictor classes."""
def _register(cls):
if name is None:
local_name = cls.__name__
else:
local_name = name
if local_name in _PREDICTORS:
raise ValueError(f'Already registered model with name: {local_name}')
_PREDICTORS[local_name] = cls
return cls
if cls is None:
return _register
else:
return _register(cls)
def register_corrector(cls=None, *, name=None):
"""A decorator for registering corrector classes."""
def _register(cls):
if name is None:
local_name = cls.__name__
else:
local_name = name
if local_name in _CORRECTORS:
raise ValueError(f'Already registered model with name: {local_name}')
_CORRECTORS[local_name] = cls
return cls
if cls is None:
return _register
else:
return _register(cls)
def get_predictor(name):
return _PREDICTORS[name]
def get_corrector(name):
return _CORRECTORS[name]
def get_sampling_fn(config, sde, shape, inverse_scaler, eps):
"""Create a sampling function.
Args:
config: A `ml_collections.ConfigDict` object that contains all configuration information.
sde: A `sde_lib.SDE` object that represents the forward SDE.
shape: A sequence of integers representing the expected shape of a single sample.
inverse_scaler: The inverse data normalizer function.
eps: A `float` number. The reverse-time SDE is only integrated to `eps` for numerical stability.
Returns:
A function that takes random states and a replicated training state and outputs samples with the
trailing dimensions matching `shape`.
"""
sampler_name = config.sampling.method
# Probability flow ODE sampling with black-box ODE solvers
if sampler_name.lower() == 'ode':
sampling_fn = get_ode_sampler(sde=sde,
shape=shape,
inverse_scaler=inverse_scaler,
denoise=config.sampling.noise_removal,
eps=eps,
device=config.device)
# Predictor-Corrector sampling. Predictor-only and Corrector-only samplers are special cases.
elif sampler_name.lower() == 'pc':
predictor = get_predictor(config.sampling.predictor.lower())
corrector = get_corrector(config.sampling.corrector.lower())
sampling_fn = get_pc_sampler(sde=sde,
shape=shape,
predictor=predictor,
corrector=corrector,
inverse_scaler=inverse_scaler,
snr=config.sampling.snr,
n_steps=config.sampling.n_steps_each,
probability_flow=config.sampling.probability_flow,
continuous=config.training.continuous,
denoise=config.sampling.noise_removal,
eps=eps,
device=config.device)
else:
raise ValueError(f"Sampler name {sampler_name} unknown.")
return sampling_fn
class Predictor(abc.ABC):
"""The abstract class for a predictor algorithm."""
def __init__(self, sde, score_fn, probability_flow=False):
super().__init__()
self.sde = sde
# Compute the reverse SDE/ODE
self.rsde = sde.reverse(score_fn, probability_flow)
self.score_fn = score_fn
@abc.abstractmethod
def update_fn(self, x, t):
"""One update of the predictor.
Args:
x: A PyTorch tensor representing the current state
t: A Pytorch tensor representing the current time step.
Returns:
x: A PyTorch tensor of the next state.
x_mean: A PyTorch tensor. The next state without random noise. Useful for denoising.
"""
pass
class Corrector(abc.ABC):
"""The abstract class for a corrector algorithm."""
def __init__(self, sde, score_fn, snr, n_steps):
super().__init__()
self.sde = sde
self.score_fn = score_fn
self.snr = snr
self.n_steps = n_steps
@abc.abstractmethod
def update_fn(self, x, t):
"""One update of the corrector.
Args:
x: A PyTorch tensor representing the current state
t: A PyTorch tensor representing the current time step.
Returns:
x: A PyTorch tensor of the next state.
x_mean: A PyTorch tensor. The next state without random noise. Useful for denoising.
"""
pass
@register_predictor(name='euler_maruyama')
class EulerMaruyamaPredictor(Predictor):
def __init__(self, sde, score_fn, probability_flow=False):
super().__init__(sde, score_fn, probability_flow)
def update_fn(self, x, t):
dt = -1. / self.rsde.N
z = torch.randn_like(x)
drift, diffusion = self.rsde.sde(x, t)
x_mean = x + drift * dt
x = x_mean + diffusion[:, None, None, None] * np.sqrt(-dt) * z
return x, x_mean
@register_predictor(name='reverse_diffusion')
class ReverseDiffusionPredictor(Predictor):
def __init__(self, sde, score_fn, probability_flow=False):
super().__init__(sde, score_fn, probability_flow)
def update_fn(self, x, t):
f, G = self.rsde.discretize(x, t)
z = torch.randn_like(x)
x_mean = x - f
x = x_mean + G[:, None, None, None] * z
return x, x_mean
@register_predictor(name='ancestral_sampling')
class AncestralSamplingPredictor(Predictor):
"""The ancestral sampling predictor. Currently only supports VE/VP SDEs."""
def __init__(self, sde, score_fn, probability_flow=False):
super().__init__(sde, score_fn, probability_flow)
if not isinstance(sde, sde_lib.VPSDE) and not isinstance(sde, sde_lib.VESDE):
raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.")
assert not probability_flow, "Probability flow not supported by ancestral sampling"
def vesde_update_fn(self, x, t):
sde = self.sde
timestep = (t * (sde.N - 1) / sde.T).long()
sigma = sde.discrete_sigmas[timestep]
adjacent_sigma = torch.where(timestep == 0, torch.zeros_like(t), sde.discrete_sigmas.to(t.device)[timestep - 1])
score = self.score_fn(x, t)
x_mean = x + score * (sigma ** 2 - adjacent_sigma ** 2)[:, None, None, None]
std = torch.sqrt((adjacent_sigma ** 2 * (sigma ** 2 - adjacent_sigma ** 2)) / (sigma ** 2))
noise = torch.randn_like(x)
x = x_mean + std[:, None, None, None] * noise
return x, x_mean
def vpsde_update_fn(self, x, t):
sde = self.sde
timestep = (t * (sde.N - 1) / sde.T).long()
beta = sde.discrete_betas.to(t.device)[timestep]
score = self.score_fn(x, t)
x_mean = (x + beta[:, None, None, None] * score) / torch.sqrt(1. - beta)[:, None, None, None]
noise = torch.randn_like(x)
x = x_mean + torch.sqrt(beta)[:, None, None, None] * noise
return x, x_mean
def update_fn(self, x, t):
if isinstance(self.sde, sde_lib.VESDE):
return self.vesde_update_fn(x, t)
elif isinstance(self.sde, sde_lib.VPSDE):
return self.vpsde_update_fn(x, t)
@register_predictor(name='none')
class NonePredictor(Predictor):
"""An empty predictor that does nothing."""
def __init__(self, sde, score_fn, probability_flow=False):
pass
def update_fn(self, x, t):
return x, x
@register_corrector(name='langevin')
class LangevinCorrector(Corrector):
def __init__(self, sde, score_fn, snr, n_steps):
super().__init__(sde, score_fn, snr, n_steps)
if not isinstance(sde, sde_lib.VPSDE) \
and not isinstance(sde, sde_lib.VESDE) \
and not isinstance(sde, sde_lib.subVPSDE):
raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.")
def update_fn(self, x, t):
sde = self.sde
score_fn = self.score_fn
n_steps = self.n_steps
target_snr = self.snr
if isinstance(sde, sde_lib.VPSDE) or isinstance(sde, sde_lib.subVPSDE):
timestep = (t * (sde.N - 1) / sde.T).long()
alpha = sde.alphas.to(t.device)[timestep]
else:
alpha = torch.ones_like(t)
for i in range(n_steps):
grad = score_fn(x, t)
noise = torch.randn_like(x)
grad_norm = torch.norm(grad.reshape(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
step_size = (target_snr * noise_norm / grad_norm) ** 2 * 2 * alpha
x_mean = x + step_size[:, None, None, None] * grad
x = x_mean + torch.sqrt(step_size * 2)[:, None, None, None] * noise
return x, x_mean
@register_corrector(name='ald')
class AnnealedLangevinDynamics(Corrector):
"""The original annealed Langevin dynamics predictor in NCSN/NCSNv2.
We include this corrector only for completeness. It was not directly used in our paper.
"""
def __init__(self, sde, score_fn, snr, n_steps):
super().__init__(sde, score_fn, snr, n_steps)
if not isinstance(sde, sde_lib.VPSDE) \
and not isinstance(sde, sde_lib.VESDE) \
and not isinstance(sde, sde_lib.subVPSDE):
raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.")
def update_fn(self, x, t):
sde = self.sde
score_fn = self.score_fn
n_steps = self.n_steps
target_snr = self.snr
if isinstance(sde, sde_lib.VPSDE) or isinstance(sde, sde_lib.subVPSDE):
timestep = (t * (sde.N - 1) / sde.T).long()
alpha = sde.alphas.to(t.device)[timestep]
else:
alpha = torch.ones_like(t)
std = self.sde.marginal_prob(x, t)[1]
for i in range(n_steps):
grad = score_fn(x, t)
noise = torch.randn_like(x)
step_size = (target_snr * std) ** 2 * 2 * alpha
x_mean = x + step_size[:, None, None, None] * grad
x = x_mean + noise * torch.sqrt(step_size * 2)[:, None, None, None]
return x, x_mean
@register_corrector(name='none')
class NoneCorrector(Corrector):
"""An empty corrector that does nothing."""
def __init__(self, sde, score_fn, snr, n_steps):
pass
def update_fn(self, x, t):
return x, x
def shared_predictor_update_fn(x, t, sde, model, predictor, probability_flow, continuous):
"""A wrapper that configures and returns the update function of predictors."""
score_fn = mutils.get_score_fn(sde, model, train=False, continuous=continuous)
if predictor is None:
# Corrector-only sampler
predictor_obj = NonePredictor(sde, score_fn, probability_flow)
else:
predictor_obj = predictor(sde, score_fn, probability_flow)
return predictor_obj.update_fn(x, t)
def shared_corrector_update_fn(x, t, sde, model, corrector, continuous, snr, n_steps):
"""A wrapper tha configures and returns the update function of correctors."""
score_fn = mutils.get_score_fn(sde, model, train=False, continuous=continuous)
if corrector is None:
# Predictor-only sampler
corrector_obj = NoneCorrector(sde, score_fn, snr, n_steps)
else:
corrector_obj = corrector(sde, score_fn, snr, n_steps)
return corrector_obj.update_fn(x, t)
def get_pc_sampler(sde, shape, predictor, corrector, inverse_scaler, snr,
n_steps=1, probability_flow=False, continuous=False,
denoise=True, eps=1e-3, device='cuda'):
"""Create a Predictor-Corrector (PC) sampler.
Args:
sde: An `sde_lib.SDE` object representing the forward SDE.
shape: A sequence of integers. The expected shape of a single sample.
predictor: A subclass of `sampling.Predictor` representing the predictor algorithm.
corrector: A subclass of `sampling.Corrector` representing the corrector algorithm.
inverse_scaler: The inverse data normalizer.
snr: A `float` number. The signal-to-noise ratio for configuring correctors.
n_steps: An integer. The number of corrector steps per predictor update.
probability_flow: If `True`, solve the reverse-time probability flow ODE when running the predictor.
continuous: `True` indicates that the score model was continuously trained.
denoise: If `True`, add one-step denoising to the final samples.
eps: A `float` number. The reverse-time SDE and ODE are integrated to `epsilon` to avoid numerical issues.
device: PyTorch device.
Returns:
A sampling function that returns samples and the number of function evaluations during sampling.
"""
# Create predictor & corrector update functions
predictor_update_fn = functools.partial(shared_predictor_update_fn,
sde=sde,
predictor=predictor,
probability_flow=probability_flow,
continuous=continuous)
corrector_update_fn = functools.partial(shared_corrector_update_fn,
sde=sde,
corrector=corrector,
continuous=continuous,
snr=snr,
n_steps=n_steps)
def pc_sampler(model):
""" The PC sampler funciton.
Args:
model: A score model.
Returns:
Samples, number of function evaluations.
"""
with torch.no_grad():
# Initial sample
x = sde.prior_sampling(shape).to(device)
timesteps = torch.linspace(sde.T, eps, sde.N, device=device)
for i in range(sde.N):
t = timesteps[i]
vec_t = torch.ones(shape[0], device=t.device) * t
x, x_mean = corrector_update_fn(x, vec_t, model=model)
x, x_mean = predictor_update_fn(x, vec_t, model=model)
return inverse_scaler(x_mean if denoise else x), sde.N * (n_steps + 1)
return pc_sampler
def get_ode_sampler(sde, shape, inverse_scaler,
denoise=False, rtol=1e-5, atol=1e-5,
method='RK45', eps=1e-3, device='cuda'):
"""Probability flow ODE sampler with the black-box ODE solver.
Args:
sde: An `sde_lib.SDE` object that represents the forward SDE.
shape: A sequence of integers. The expected shape of a single sample.
inverse_scaler: The inverse data normalizer.
denoise: If `True`, add one-step denoising to final samples.
rtol: A `float` number. The relative tolerance level of the ODE solver.
atol: A `float` number. The absolute tolerance level of the ODE solver.
method: A `str`. The algorithm used for the black-box ODE solver.
See the documentation of `scipy.integrate.solve_ivp`.
eps: A `float` number. The reverse-time SDE/ODE will be integrated to `eps` for numerical stability.
device: PyTorch device.
Returns:
A sampling function that returns samples and the number of function evaluations during sampling.
"""
def denoise_update_fn(model, x):
score_fn = get_score_fn(sde, model, train=False, continuous=True)
# Reverse diffusion predictor for denoising
predictor_obj = ReverseDiffusionPredictor(sde, score_fn, probability_flow=False)
vec_eps = torch.ones(x.shape[0], device=x.device) * eps
_, x = predictor_obj.update_fn(x, vec_eps)
return x
def drift_fn(model, x, t):
"""Get the drift function of the reverse-time SDE."""
score_fn = get_score_fn(sde, model, train=False, continuous=True)
rsde = sde.reverse(score_fn, probability_flow=True)
return rsde.sde(x, t)[0]
def ode_sampler(model, z=None):
"""The probability flow ODE sampler with black-box ODE solver.
Args:
model: A score model.
z: If present, generate samples from latent code `z`.
Returns:
samples, number of function evaluations.
"""
with torch.no_grad():
# Initial sample
if z is None:
# If not represent, sample the latent code from the prior distibution of the SDE.
x = sde.prior_sampling(shape).to(device)
else:
x = z
def ode_func(t, x):
x = from_flattened_numpy(x, shape).to(device).type(torch.float32)
vec_t = torch.ones(shape[0], device=x.device) * t
drift = drift_fn(model, x, vec_t)
return to_flattened_numpy(drift)
# Black-box ODE solver for the probability flow ODE
solution = integrate.solve_ivp(ode_func, (sde.T, eps), to_flattened_numpy(x),
rtol=rtol, atol=atol, method=method)
nfe = solution.nfev
x = torch.tensor(solution.y[:, -1]).reshape(shape).to(device).type(torch.float32)
# Denoising is equivalent to running one predictor step without adding noise
if denoise:
x = denoise_update_fn(model, x)
x = inverse_scaler(x)
return x, nfe
return ode_sampler
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All functions related to loss computation and optimization.
"""
import torch
import torch.optim as optim
import numpy as np
from .models import utils as mutils
from .sde_lib import VESDE, VPSDE
def get_optimizer(config, params):
"""Returns a flax optimizer object based on `config`."""
if config.optim.optimizer == 'Adam':
optimizer = optim.Adam(params, lr=config.optim.lr, betas=(config.optim.beta1, 0.999), eps=config.optim.eps,
weight_decay=config.optim.weight_decay)
else:
raise NotImplementedError(
f'Optimizer {config.optim.optimizer} not supported yet!')
return optimizer
def optimization_manager(config):
"""Returns an optimize_fn based on `config`."""
def optimize_fn(optimizer, params, step, lr=config.optim.lr,
warmup=config.optim.warmup,
grad_clip=config.optim.grad_clip):
"""Optimizes with warmup and gradient clipping (disabled if negative)."""
if warmup > 0:
for g in optimizer.param_groups:
g['lr'] = lr * np.minimum(step / warmup, 1.0)
if grad_clip >= 0:
torch.nn.utils.clip_grad_norm_(params, max_norm=grad_clip)
optimizer.step()
return optimize_fn
def get_sde_loss_fn(sde, train, reduce_mean=True, continuous=True, likelihood_weighting=True, eps=1e-5):
"""Create a loss function for training with arbirary SDEs.
Args:
sde: An `sde_lib.SDE` object that represents the forward SDE.
train: `True` for training loss and `False` for evaluation loss.
reduce_mean: If `True`, average the loss across data dimensions. Otherwise sum the loss across data dimensions.
continuous: `True` indicates that the model is defined to take continuous time steps. Otherwise it requires
ad-hoc interpolation to take continuous time steps.
likelihood_weighting: If `True`, weight the mixture of score matching losses
according to https://arxiv.org/abs/2101.09258; otherwise use the weighting recommended in our paper.
eps: A `float` number. The smallest time step to sample from.
Returns:
A loss function.
"""
reduce_op = torch.mean if reduce_mean else lambda *args, **kwargs: 0.5 * torch.sum(*args, **kwargs)
def loss_fn(model, batch):
"""Compute the loss function.
Args:
model: A score model.
batch: A mini-batch of training data.
Returns:
loss: A scalar that represents the average loss value across the mini-batch.
"""
score_fn = mutils.get_score_fn(sde, model, train=train, continuous=continuous)
t = torch.rand(batch.shape[0], device=batch.device) * (sde.T - eps) + eps
z = torch.randn_like(batch)
mean, std = sde.marginal_prob(batch, t)
perturbed_data = mean + std[:, None, None, None] * z
score = score_fn(perturbed_data, t)
if not likelihood_weighting:
losses = torch.square(score * std[:, None, None, None] + z)
losses = reduce_op(losses.reshape(losses.shape[0], -1), dim=-1)
else:
g2 = sde.sde(torch.zeros_like(batch), t)[1] ** 2
losses = torch.square(score + z / std[:, None, None, None])
losses = reduce_op(losses.reshape(losses.shape[0], -1), dim=-1) * g2
loss = torch.mean(losses)
return loss
return loss_fn
def get_smld_loss_fn(vesde, train, reduce_mean=False):
"""Legacy code to reproduce previous results on SMLD(NCSN). Not recommended for new work."""
assert isinstance(vesde, VESDE), "SMLD training only works for VESDEs."
# Previous SMLD models assume descending sigmas
smld_sigma_array = torch.flip(vesde.discrete_sigmas, dims=(0,))
reduce_op = torch.mean if reduce_mean else lambda *args, **kwargs: 0.5 * torch.sum(*args, **kwargs)
def loss_fn(model, batch):
model_fn = mutils.get_model_fn(model, train=train)
labels = torch.randint(0, vesde.N, (batch.shape[0],), device=batch.device)
sigmas = smld_sigma_array.to(batch.device)[labels]
noise = torch.randn_like(batch) * sigmas[:, None, None, None]
perturbed_data = noise + batch
score = model_fn(perturbed_data, labels)
target = -noise / (sigmas ** 2)[:, None, None, None]
losses = torch.square(score - target)
losses = reduce_op(losses.reshape(losses.shape[0], -1), dim=-1) * sigmas ** 2
loss = torch.mean(losses)
return loss
return loss_fn
def get_ddpm_loss_fn(vpsde, train, reduce_mean=True):
"""Legacy code to reproduce previous results on DDPM. Not recommended for new work."""
assert isinstance(vpsde, VPSDE), "DDPM training only works for VPSDEs."
reduce_op = torch.mean if reduce_mean else lambda *args, **kwargs: 0.5 * torch.sum(*args, **kwargs)
def loss_fn(model, batch):
model_fn = mutils.get_model_fn(model, train=train)
labels = torch.randint(0, vpsde.N, (batch.shape[0],), device=batch.device)
sqrt_alphas_cumprod = vpsde.sqrt_alphas_cumprod.to(batch.device)
sqrt_1m_alphas_cumprod = vpsde.sqrt_1m_alphas_cumprod.to(batch.device)
noise = torch.randn_like(batch)
perturbed_data = sqrt_alphas_cumprod[labels, None, None, None] * batch + \
sqrt_1m_alphas_cumprod[labels, None, None, None] * noise
score = model_fn(perturbed_data, labels)
losses = torch.square(score - noise)
losses = reduce_op(losses.reshape(losses.shape[0], -1), dim=-1)
loss = torch.mean(losses)
return loss
return loss_fn
def get_step_fn(sde, train, optimize_fn=None, reduce_mean=False, continuous=True, likelihood_weighting=False):
"""Create a one-step training/evaluation function.
Args:
sde: An `sde_lib.SDE` object that represents the forward SDE.
optimize_fn: An optimization function.
reduce_mean: If `True`, average the loss across data dimensions. Otherwise sum the loss across data dimensions.
continuous: `True` indicates that the model is defined to take continuous time steps.
likelihood_weighting: If `True`, weight the mixture of score matching losses according to
https://arxiv.org/abs/2101.09258; otherwise use the weighting recommended by our paper.
Returns:
A one-step function for training or evaluation.
"""
if continuous:
loss_fn = get_sde_loss_fn(sde, train, reduce_mean=reduce_mean,
continuous=True, likelihood_weighting=likelihood_weighting)
else:
assert not likelihood_weighting, "Likelihood weighting is not supported for original SMLD/DDPM training."
if isinstance(sde, VESDE):
loss_fn = get_smld_loss_fn(sde, train, reduce_mean=reduce_mean)
elif isinstance(sde, VPSDE):
loss_fn = get_ddpm_loss_fn(sde, train, reduce_mean=reduce_mean)
else:
raise ValueError(f"Discrete training for {sde.__class__.__name__} is not recommended.")
def step_fn(state, batch):
"""Running one step of training or evaluation.
This function will undergo `jax.lax.scan` so that multiple steps can be pmapped and jit-compiled together
for faster execution.
Args:
state: A dictionary of training information, containing the score model, optimizer,
EMA status, and number of optimization steps.
batch: A mini-batch of training/evaluation data.
Returns:
loss: The average loss value of this state.
"""
model = state['model']
if train:
optimizer = state['optimizer']
optimizer.zero_grad()
loss = loss_fn(model, batch)
loss.backward()
optimize_fn(optimizer, model.parameters(), step=state['step'])
state['step'] += 1
state['ema'].update(model.parameters())
else:
with torch.no_grad():
ema = state['ema']
ema.store(model.parameters())
ema.copy_to(model.parameters())
loss = loss_fn(model, batch)
ema.restore(model.parameters())
return loss
return step_fn
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/yang-song/score_sde_pytorch/blob/main/op/fused_act.py
#
# The license for the original version of this file can be
# found in the `score_sde` directory (LICENSE_SCORE_SDE).
# ---------------------------------------------------------------
import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Function
from torch.utils.cpp_extension import load
module_path = os.path.dirname(__file__)
fused = load(
"fused",
sources=[
os.path.join(module_path, "fused_bias_act.cpp"),
os.path.join(module_path, "fused_bias_act_kernel.cu"),
],
)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(
grad_output, empty, out, 3, 1, negative_slope, scale
)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(
gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale
)
return grad_input, grad_bias, None, None
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == "cpu":
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return (
F.leaky_relu(
input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2
)
* scale
)
else:
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/yang-song/score_sde_pytorch/blob/main/op/__init__.py
#
# The license for the original version of this file can be
# found in the `score_sde` directory (LICENSE_SCORE_SDE).
# ---------------------------------------------------------------
from .fused_act import FusedLeakyReLU, fused_leaky_relu
from .upfirdn2d import upfirdn2d
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/yang-song/score_sde_pytorch/blob/main/op/upfirdn2d.py
#
# The license for the original version of this file can be
# found in the `score_sde` directory (LICENSE_SCORE_SDE).
# ---------------------------------------------------------------
import os
import torch
from torch.nn import functional as F
from torch.autograd import Function
from torch.utils.cpp_extension import load
module_path = os.path.dirname(__file__)
upfirdn2d_op = load(
"upfirdn2d",
sources=[
os.path.join(module_path, "upfirdn2d.cpp"),
os.path.join(module_path, "upfirdn2d_kernel.cu"),
],
)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(
ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(
grad_output,
grad_kernel,
down_x,
down_y,
up_x,
up_y,
g_pad_x0,
g_pad_x1,
g_pad_y0,
g_pad_y1,
)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(
gradgrad_input,
kernel,
ctx.up_x,
ctx.up_y,
ctx.down_x,
ctx.down_y,
ctx.pad_x0,
ctx.pad_x1,
ctx.pad_y0,
ctx.pad_y1,
)
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
gradgrad_out = gradgrad_out.view(
ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
)
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = (out_h, out_w)
ctx.up = (up_x, up_y)
ctx.down = (down_x, down_y)
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
out = upfirdn2d_op.upfirdn2d(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
)
# out = out.view(major, out_h, out_w, minor)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(
grad_output,
kernel,
grad_kernel,
ctx.up,
ctx.down,
ctx.pad,
ctx.g_pad,
ctx.in_size,
ctx.out_size,
)
return grad_input, None, None, None, None
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == "cpu":
out = upfirdn2d_native(
input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]
)
else:
out = UpFirDn2d.apply(
input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
)
return out
def upfirdn2d_native(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
)
out = out[
:,
max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
:,
]
out = out.permute(0, 3, 1, 2)
out = out.reshape(
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
)
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(
-1,
minor,
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""The NCSNv2 model."""
import torch
import torch.nn as nn
import functools
from .utils import get_sigmas, register_model
from .layers import (CondRefineBlock, RefineBlock, ResidualBlock, ncsn_conv3x3,
ConditionalResidualBlock, get_act)
from .normalization import get_normalization
CondResidualBlock = ConditionalResidualBlock
conv3x3 = ncsn_conv3x3
def get_network(config):
if config.data.image_size < 96:
return functools.partial(NCSNv2, config=config)
elif 96 <= config.data.image_size <= 128:
return functools.partial(NCSNv2_128, config=config)
elif 128 < config.data.image_size <= 256:
return functools.partial(NCSNv2_256, config=config)
else:
raise NotImplementedError(
f'No network suitable for {config.data.image_size}px implemented yet.')
@register_model(name='ncsnv2_64')
class NCSNv2(nn.Module):
def __init__(self, config):
super().__init__()
self.centered = config.data.centered
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
self.config = config
self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm),
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=2),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
if config.data.image_size == 28:
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm, adjust_padding=True, dilation=4),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
else:
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm, adjust_padding=False, dilation=4),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = RefineBlock([2 * self.nf], 2 * self.nf, act=act, start=True)
self.refine2 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
self.refine4 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
def _compute_cond_module(self, module, x):
for m in module:
x = m(x)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output)
layer2 = self._compute_cond_module(self.res2, layer1)
layer3 = self._compute_cond_module(self.res3, layer2)
layer4 = self._compute_cond_module(self.res4, layer3)
ref1 = self.refine1([layer4], layer4.shape[2:])
ref2 = self.refine2([layer3, ref1], layer3.shape[2:])
ref3 = self.refine3([layer2, ref2], layer2.shape[2:])
output = self.refine4([layer1, ref3], layer1.shape[2:])
output = self.normalizer(output)
output = self.act(output)
output = self.end_conv(output)
used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
output = output / used_sigmas
return output
@register_model(name='ncsn')
class NCSN(nn.Module):
def __init__(self, config):
super().__init__()
self.centered = config.data.centered
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.config = config
self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ConditionalResidualBlock(self.nf, self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm),
ConditionalResidualBlock(self.nf, self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ConditionalResidualBlock(self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm, dilation=2),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
if config.data.image_size == 28:
self.res4 = nn.ModuleList([
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm, adjust_padding=True, dilation=4),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
else:
self.res4 = nn.ModuleList([
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm, adjust_padding=False, dilation=4),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = CondRefineBlock([2 * self.nf], 2 * self.nf, config.model.num_scales, self.norm, act=act, start=True)
self.refine2 = CondRefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, config.model.num_scales, self.norm, act=act)
self.refine3 = CondRefineBlock([2 * self.nf, 2 * self.nf], self.nf, config.model.num_scales, self.norm, act=act)
self.refine4 = CondRefineBlock([self.nf, self.nf], self.nf, config.model.num_scales, self.norm, act=act, end=True)
def _compute_cond_module(self, module, x, y):
for m in module:
x = m(x, y)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output, y)
layer2 = self._compute_cond_module(self.res2, layer1, y)
layer3 = self._compute_cond_module(self.res3, layer2, y)
layer4 = self._compute_cond_module(self.res4, layer3, y)
ref1 = self.refine1([layer4], y, layer4.shape[2:])
ref2 = self.refine2([layer3, ref1], y, layer3.shape[2:])
ref3 = self.refine3([layer2, ref2], y, layer2.shape[2:])
output = self.refine4([layer1, ref3], y, layer1.shape[2:])
output = self.normalizer(output, y)
output = self.act(output)
output = self.end_conv(output)
return output
@register_model(name='ncsnv2_128')
class NCSNv2_128(nn.Module):
"""NCSNv2 model architecture for 128px images."""
def __init__(self, config):
super().__init__()
self.centered = config.data.centered
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
self.config = config
self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm),
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=2),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
self.res5 = nn.ModuleList([
ResidualBlock(4 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=4),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = RefineBlock([4 * self.nf], 4 * self.nf, act=act, start=True)
self.refine2 = RefineBlock([4 * self.nf, 4 * self.nf], 2 * self.nf, act=act)
self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine4 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
self.refine5 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
def _compute_cond_module(self, module, x):
for m in module:
x = m(x)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output)
layer2 = self._compute_cond_module(self.res2, layer1)
layer3 = self._compute_cond_module(self.res3, layer2)
layer4 = self._compute_cond_module(self.res4, layer3)
layer5 = self._compute_cond_module(self.res5, layer4)
ref1 = self.refine1([layer5], layer5.shape[2:])
ref2 = self.refine2([layer4, ref1], layer4.shape[2:])
ref3 = self.refine3([layer3, ref2], layer3.shape[2:])
ref4 = self.refine4([layer2, ref3], layer2.shape[2:])
output = self.refine5([layer1, ref4], layer1.shape[2:])
output = self.normalizer(output)
output = self.act(output)
output = self.end_conv(output)
used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
output = output / used_sigmas
return output
@register_model(name='ncsnv2_256')
class NCSNv2_256(nn.Module):
"""NCSNv2 model architecture for 256px images."""
def __init__(self, config):
super().__init__()
self.centered = config.data.centered
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
self.config = config
self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm),
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res31 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=2),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
self.res5 = nn.ModuleList([
ResidualBlock(4 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=4),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = RefineBlock([4 * self.nf], 4 * self.nf, act=act, start=True)
self.refine2 = RefineBlock([4 * self.nf, 4 * self.nf], 2 * self.nf, act=act)
self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine31 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine4 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
self.refine5 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
def _compute_cond_module(self, module, x):
for m in module:
x = m(x)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output)
layer2 = self._compute_cond_module(self.res2, layer1)
layer3 = self._compute_cond_module(self.res3, layer2)
layer31 = self._compute_cond_module(self.res31, layer3)
layer4 = self._compute_cond_module(self.res4, layer31)
layer5 = self._compute_cond_module(self.res5, layer4)
ref1 = self.refine1([layer5], layer5.shape[2:])
ref2 = self.refine2([layer4, ref1], layer4.shape[2:])
ref31 = self.refine31([layer31, ref2], layer31.shape[2:])
ref3 = self.refine3([layer3, ref31], layer3.shape[2:])
ref4 = self.refine4([layer2, ref3], layer2.shape[2:])
output = self.refine5([layer1, ref4], layer1.shape[2:])
output = self.normalizer(output)
output = self.act(output)
output = self.end_conv(output)
used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
output = output / used_sigmas
return output |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Layers for defining NCSN++.
"""
from . import layers
from . import up_or_down_sampling
import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
conv1x1 = layers.ddpm_conv1x1
conv3x3 = layers.ddpm_conv3x3
NIN = layers.NIN
default_init = layers.default_init
class GaussianFourierProjection(nn.Module):
"""Gaussian Fourier embeddings for noise levels."""
def __init__(self, embedding_size=256, scale=1.0):
super().__init__()
self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
def forward(self, x):
x_proj = x[:, None] * self.W[None, :] * 2 * np.pi
return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
class Combine(nn.Module):
"""Combine information from skip connections."""
def __init__(self, dim1, dim2, method='cat'):
super().__init__()
self.Conv_0 = conv1x1(dim1, dim2)
self.method = method
def forward(self, x, y):
h = self.Conv_0(x)
if self.method == 'cat':
return torch.cat([h, y], dim=1)
elif self.method == 'sum':
return h + y
else:
raise ValueError(f'Method {self.method} not recognized.')
class AttnBlockpp(nn.Module):
"""Channel-wise self-attention block. Modified from DDPM."""
def __init__(self, channels, skip_rescale=False, init_scale=0.):
super().__init__()
self.GroupNorm_0 = nn.GroupNorm(num_groups=min(channels // 4, 32), num_channels=channels,
eps=1e-6)
self.NIN_0 = NIN(channels, channels)
self.NIN_1 = NIN(channels, channels)
self.NIN_2 = NIN(channels, channels)
self.NIN_3 = NIN(channels, channels, init_scale=init_scale)
self.skip_rescale = skip_rescale
def forward(self, x):
B, C, H, W = x.shape
h = self.GroupNorm_0(x)
q = self.NIN_0(h)
k = self.NIN_1(h)
v = self.NIN_2(h)
w = torch.einsum('bchw,bcij->bhwij', q, k) * (int(C) ** (-0.5))
w = torch.reshape(w, (B, H, W, H * W))
w = F.softmax(w, dim=-1)
w = torch.reshape(w, (B, H, W, H, W))
h = torch.einsum('bhwij,bcij->bchw', w, v)
h = self.NIN_3(h)
if not self.skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.)
class Upsample(nn.Module):
def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False,
fir_kernel=(1, 3, 3, 1)):
super().__init__()
out_ch = out_ch if out_ch else in_ch
if not fir:
if with_conv:
self.Conv_0 = conv3x3(in_ch, out_ch)
else:
if with_conv:
self.Conv2d_0 = up_or_down_sampling.Conv2d(in_ch, out_ch,
kernel=3, up=True,
resample_kernel=fir_kernel,
use_bias=True,
kernel_init=default_init())
self.fir = fir
self.with_conv = with_conv
self.fir_kernel = fir_kernel
self.out_ch = out_ch
def forward(self, x):
B, C, H, W = x.shape
if not self.fir:
h = F.interpolate(x, (H * 2, W * 2), 'nearest')
if self.with_conv:
h = self.Conv_0(h)
else:
if not self.with_conv:
h = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2)
else:
h = self.Conv2d_0(x)
return h
class Downsample(nn.Module):
def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False,
fir_kernel=(1, 3, 3, 1)):
super().__init__()
out_ch = out_ch if out_ch else in_ch
if not fir:
if with_conv:
self.Conv_0 = conv3x3(in_ch, out_ch, stride=2, padding=0)
else:
if with_conv:
self.Conv2d_0 = up_or_down_sampling.Conv2d(in_ch, out_ch,
kernel=3, down=True,
resample_kernel=fir_kernel,
use_bias=True,
kernel_init=default_init())
self.fir = fir
self.fir_kernel = fir_kernel
self.with_conv = with_conv
self.out_ch = out_ch
def forward(self, x):
B, C, H, W = x.shape
if not self.fir:
if self.with_conv:
x = F.pad(x, (0, 1, 0, 1))
x = self.Conv_0(x)
else:
x = F.avg_pool2d(x, 2, stride=2)
else:
if not self.with_conv:
x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2)
else:
x = self.Conv2d_0(x)
return x
class ResnetBlockDDPMpp(nn.Module):
"""ResBlock adapted from DDPM."""
def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False,
dropout=0.1, skip_rescale=False, init_scale=0.):
super().__init__()
out_ch = out_ch if out_ch else in_ch
self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)
self.Conv_0 = conv3x3(in_ch, out_ch)
if temb_dim is not None:
self.Dense_0 = nn.Linear(temb_dim, out_ch)
self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape)
nn.init.zeros_(self.Dense_0.bias)
self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6)
self.Dropout_0 = nn.Dropout(dropout)
self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale)
if in_ch != out_ch:
if conv_shortcut:
self.Conv_2 = conv3x3(in_ch, out_ch)
else:
self.NIN_0 = NIN(in_ch, out_ch)
self.skip_rescale = skip_rescale
self.act = act
self.out_ch = out_ch
self.conv_shortcut = conv_shortcut
def forward(self, x, temb=None):
h = self.act(self.GroupNorm_0(x))
h = self.Conv_0(h)
if temb is not None:
h += self.Dense_0(self.act(temb))[:, :, None, None]
h = self.act(self.GroupNorm_1(h))
h = self.Dropout_0(h)
h = self.Conv_1(h)
if x.shape[1] != self.out_ch:
if self.conv_shortcut:
x = self.Conv_2(x)
else:
x = self.NIN_0(x)
if not self.skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.)
class ResnetBlockBigGANpp(nn.Module):
def __init__(self, act, in_ch, out_ch=None, temb_dim=None, up=False, down=False,
dropout=0.1, fir=False, fir_kernel=(1, 3, 3, 1),
skip_rescale=True, init_scale=0.):
super().__init__()
out_ch = out_ch if out_ch else in_ch
self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)
self.up = up
self.down = down
self.fir = fir
self.fir_kernel = fir_kernel
self.Conv_0 = conv3x3(in_ch, out_ch)
if temb_dim is not None:
self.Dense_0 = nn.Linear(temb_dim, out_ch)
self.Dense_0.weight.data = default_init()(self.Dense_0.weight.shape)
nn.init.zeros_(self.Dense_0.bias)
self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6)
self.Dropout_0 = nn.Dropout(dropout)
self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale)
if in_ch != out_ch or up or down:
self.Conv_2 = conv1x1(in_ch, out_ch)
self.skip_rescale = skip_rescale
self.act = act
self.in_ch = in_ch
self.out_ch = out_ch
def forward(self, x, temb=None):
h = self.act(self.GroupNorm_0(x))
if self.up:
if self.fir:
h = up_or_down_sampling.upsample_2d(h, self.fir_kernel, factor=2)
x = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2)
else:
h = up_or_down_sampling.naive_upsample_2d(h, factor=2)
x = up_or_down_sampling.naive_upsample_2d(x, factor=2)
elif self.down:
if self.fir:
h = up_or_down_sampling.downsample_2d(h, self.fir_kernel, factor=2)
x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2)
else:
h = up_or_down_sampling.naive_downsample_2d(h, factor=2)
x = up_or_down_sampling.naive_downsample_2d(x, factor=2)
h = self.Conv_0(h)
# Add bias to each feature map conditioned on the time embedding
if temb is not None:
h += self.Dense_0(self.act(temb))[:, :, None, None]
h = self.act(self.GroupNorm_1(h))
h = self.Dropout_0(h)
h = self.Conv_1(h)
if self.in_ch != self.out_ch or self.up or self.down:
x = self.Conv_2(x)
if not self.skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.)
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/yang-song/score_sde_pytorch/blob/main/models/ema.py
#
# The license for the original version of this file can be
# found in the `score_sde` directory (LICENSE_SCORE_SDE).
# ---------------------------------------------------------------
# Modified from https://raw.githubusercontent.com/fadel/pytorch_ema/master/torch_ema/ema.py
from __future__ import division
from __future__ import unicode_literals
import torch
# Partially based on: https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/training/moving_averages.py
class ExponentialMovingAverage:
"""
Maintains (exponential) moving average of a set of parameters.
"""
def __init__(self, parameters, decay, use_num_updates=True):
"""
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the result of
`model.parameters()`.
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.decay = decay
self.num_updates = 0 if use_num_updates else None
self.shadow_params = [p.clone().detach()
for p in parameters if p.requires_grad]
self.collected_params = []
def update(self, parameters):
"""
Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of
the `optimizer.step()` call.
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the same set of
parameters used to initialize this object.
"""
decay = self.decay
if self.num_updates is not None:
self.num_updates += 1
decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
s_param.sub_(one_minus_decay * (s_param - param))
def copy_to(self, parameters):
"""
Copy current parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages.
"""
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
if param.requires_grad:
param.data.copy_(s_param.data)
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
def state_dict(self):
return dict(decay=self.decay, num_updates=self.num_updates,
shadow_params=self.shadow_params)
def load_state_dict(self, state_dict):
self.decay = state_dict['decay']
self.num_updates = state_dict['num_updates']
self.shadow_params = state_dict['shadow_params'] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import ncsnpp
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All functions and modules related to model definition.
"""
import torch
from score_sde import sde_lib
import numpy as np
_MODELS = {}
def register_model(cls=None, *, name=None):
"""A decorator for registering model classes."""
def _register(cls):
if name is None:
local_name = cls.__name__
else:
local_name = name
if local_name in _MODELS:
raise ValueError(f'Already registered model with name: {local_name}')
_MODELS[local_name] = cls
return cls
if cls is None:
return _register
else:
return _register(cls)
def get_model(name):
return _MODELS[name]
def get_sigmas(config):
"""Get sigmas --- the set of noise levels for SMLD from config files.
Args:
config: A ConfigDict object parsed from the config file
Returns:
sigmas: a jax numpy arrary of noise levels
"""
sigmas = np.exp(
np.linspace(np.log(config.model.sigma_max), np.log(config.model.sigma_min), config.model.num_scales))
return sigmas
def get_ddpm_params(config):
"""Get betas and alphas --- parameters used in the original DDPM paper."""
num_diffusion_timesteps = 1000
# parameters need to be adapted if number of time steps differs from 1000
beta_start = config.model.beta_min / config.model.num_scales
beta_end = config.model.beta_max / config.model.num_scales
betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
alphas = 1. - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
sqrt_alphas_cumprod = np.sqrt(alphas_cumprod)
sqrt_1m_alphas_cumprod = np.sqrt(1. - alphas_cumprod)
return {
'betas': betas,
'alphas': alphas,
'alphas_cumprod': alphas_cumprod,
'sqrt_alphas_cumprod': sqrt_alphas_cumprod,
'sqrt_1m_alphas_cumprod': sqrt_1m_alphas_cumprod,
'beta_min': beta_start * (num_diffusion_timesteps - 1),
'beta_max': beta_end * (num_diffusion_timesteps - 1),
'num_diffusion_timesteps': num_diffusion_timesteps
}
def create_model(config):
"""Create the score model."""
model_name = config.model.name
score_model = get_model(model_name)(config)
# score_model = score_model.to(config.device)
# score_model = torch.nn.DataParallel(score_model)
return score_model
def get_model_fn(model, train=False):
"""Create a function to give the output of the score-based model.
Args:
model: The score model.
train: `True` for training and `False` for evaluation.
Returns:
A model function.
"""
def model_fn(x, labels):
"""Compute the output of the score-based model.
Args:
x: A mini-batch of input data.
labels: A mini-batch of conditioning variables for time steps. Should be interpreted differently
for different models.
Returns:
A tuple of (model output, new mutable states)
"""
if not train:
model.eval()
return model(x, labels)
else:
model.train()
return model(x, labels)
return model_fn
def get_score_fn(sde, model, train=False, continuous=False):
"""Wraps `score_fn` so that the model output corresponds to a real time-dependent score function.
Args:
sde: An `sde_lib.SDE` object that represents the forward SDE.
model: A score model.
train: `True` for training and `False` for evaluation.
continuous: If `True`, the score-based model is expected to directly take continuous time steps.
Returns:
A score function.
"""
model_fn = get_model_fn(model, train=train)
if isinstance(sde, sde_lib.VPSDE) or isinstance(sde, sde_lib.subVPSDE):
def score_fn(x, t):
# Scale neural network output by standard deviation and flip sign
if continuous or isinstance(sde, sde_lib.subVPSDE):
# For VP-trained models, t=0 corresponds to the lowest noise level
# The maximum value of time embedding is assumed to 999 for
# continuously-trained models.
labels = t * 999
score = model_fn(x, labels)
std = sde.marginal_prob(torch.zeros_like(x), t)[1]
else:
# For VP-trained models, t=0 corresponds to the lowest noise level
labels = t * (sde.N - 1)
score = model_fn(x, labels)
std = sde.sqrt_1m_alphas_cumprod.to(labels.device)[labels.long()]
score = -score / std[:, None, None, None]
return score
elif isinstance(sde, sde_lib.VESDE):
def score_fn(x, t):
if continuous:
labels = sde.marginal_prob(torch.zeros_like(x), t)[1]
else:
# For VE-trained models, t=0 corresponds to the highest noise level
labels = sde.T - t
labels *= sde.N - 1
labels = torch.round(labels).long()
score = model_fn(x, labels)
return score
else:
raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.")
return score_fn
def to_flattened_numpy(x):
"""Flatten a torch tensor `x` and convert it to numpy."""
return x.detach().cpu().numpy().reshape((-1,))
def from_flattened_numpy(x, shape):
"""Form a torch tensor with the given `shape` from a flattened numpy array `x`."""
return torch.from_numpy(x.reshape(shape))
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/yang-song/score_sde_pytorch/blob/main/models/up_or_down_sampling.py
#
# The license for the original version of this file can be
# found in the `score_sde` directory (LICENSE_SCORE_SDE).
# ---------------------------------------------------------------
"""Layers used for up-sampling or down-sampling images.
Many functions are ported from https://github.com/NVlabs/stylegan2.
"""
import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
from ..op import upfirdn2d
# Function ported from StyleGAN2
def get_weight(module,
shape,
weight_var='weight',
kernel_init=None):
"""Get/create weight tensor for a convolution or fully-connected layer."""
return module.param(weight_var, kernel_init, shape)
class Conv2d(nn.Module):
"""Conv2d layer with optimal upsampling and downsampling (StyleGAN2)."""
def __init__(self, in_ch, out_ch, kernel, up=False, down=False,
resample_kernel=(1, 3, 3, 1),
use_bias=True,
kernel_init=None):
super().__init__()
assert not (up and down)
assert kernel >= 1 and kernel % 2 == 1
self.weight = nn.Parameter(torch.zeros(out_ch, in_ch, kernel, kernel))
if kernel_init is not None:
self.weight.data = kernel_init(self.weight.data.shape)
if use_bias:
self.bias = nn.Parameter(torch.zeros(out_ch))
self.up = up
self.down = down
self.resample_kernel = resample_kernel
self.kernel = kernel
self.use_bias = use_bias
def forward(self, x):
if self.up:
x = upsample_conv_2d(x, self.weight, k=self.resample_kernel)
elif self.down:
x = conv_downsample_2d(x, self.weight, k=self.resample_kernel)
else:
x = F.conv2d(x, self.weight, stride=1, padding=self.kernel // 2)
if self.use_bias:
x = x + self.bias.reshape(1, -1, 1, 1)
return x
def naive_upsample_2d(x, factor=2):
_N, C, H, W = x.shape
x = torch.reshape(x, (-1, C, H, 1, W, 1))
x = x.repeat(1, 1, 1, factor, 1, factor)
return torch.reshape(x, (-1, C, H * factor, W * factor))
def naive_downsample_2d(x, factor=2):
_N, C, H, W = x.shape
x = torch.reshape(x, (-1, C, H // factor, factor, W // factor, factor))
return torch.mean(x, dim=(3, 5))
def upsample_conv_2d(x, w, k=None, factor=2, gain=1):
"""Fused `upsample_2d()` followed by `tf.nn.conv2d()`.
Padding is performed only once at the beginning, not between the
operations.
The fused op is considerably more efficient than performing the same
calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels,
outChannels]`. Grouped convolution can be performed by `inChannels =
x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
nearest-neighbor upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
Tensor of the shape `[N, C, H * factor, W * factor]` or
`[N, H * factor, W * factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
# Check weight shape.
assert len(w.shape) == 4
convH = w.shape[2]
convW = w.shape[3]
inC = w.shape[1]
outC = w.shape[0]
assert convW == convH
# Setup filter kernel.
if k is None:
k = [1] * factor
k = _setup_kernel(k) * (gain * (factor ** 2))
p = (k.shape[0] - factor) - (convW - 1)
stride = (factor, factor)
# Determine data dimensions.
stride = [1, 1, factor, factor]
output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW)
output_padding = (output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH,
output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW)
assert output_padding[0] >= 0 and output_padding[1] >= 0
num_groups = _shape(x, 1) // inC
# Transpose weights.
w = torch.reshape(w, (num_groups, -1, inC, convH, convW))
w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4)
w = torch.reshape(w, (num_groups * inC, -1, convH, convW))
x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0)
## Original TF code.
# x = tf.nn.conv2d_transpose(
# x,
# w,
# output_shape=output_shape,
# strides=stride,
# padding='VALID',
# data_format=data_format)
## JAX equivalent
return upfirdn2d(x, torch.tensor(k, device=x.device),
pad=((p + 1) // 2 + factor - 1, p // 2 + 1))
def conv_downsample_2d(x, w, k=None, factor=2, gain=1):
"""Fused `tf.nn.conv2d()` followed by `downsample_2d()`.
Padding is performed only once at the beginning, not between the operations.
The fused op is considerably more efficient than performing the same
calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels,
outChannels]`. Grouped convolution can be performed by `inChannels =
x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
_outC, _inC, convH, convW = w.shape
assert convW == convH
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = (k.shape[0] - factor) + (convW - 1)
s = [factor, factor]
x = upfirdn2d(x, torch.tensor(k, device=x.device),
pad=((p + 1) // 2, p // 2))
return F.conv2d(x, w, stride=s, padding=0)
def _setup_kernel(k):
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.outer(k, k)
k /= np.sum(k)
assert k.ndim == 2
assert k.shape[0] == k.shape[1]
return k
def _shape(x, dim):
return x.shape[dim]
def upsample_2d(x, k=None, factor=2, gain=1):
r"""Upsample a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and upsamples each image with the given filter. The filter is normalized so
that
if the input pixels are constant, they will be scaled by the specified
`gain`.
Pixels outside the image are assumed to be zero, and the filter is padded
with
zeros so that its shape is a multiple of the upsampling factor.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
nearest-neighbor upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
Tensor of the shape `[N, C, H * factor, W * factor]`
"""
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * (gain * (factor ** 2))
p = k.shape[0] - factor
return upfirdn2d(x, torch.tensor(k, device=x.device),
up=factor, pad=((p + 1) // 2 + factor - 1, p // 2))
def downsample_2d(x, k=None, factor=2, gain=1):
r"""Downsample a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and downsamples each image with the given filter. The filter is normalized
so that
if the input pixels are constant, they will be scaled by the specified
`gain`.
Pixels outside the image are assumed to be zero, and the filter is padded
with
zeros so that its shape is a multiple of the downsampling factor.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]`
"""
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor
return upfirdn2d(x, torch.tensor(k, device=x.device),
down=factor, pad=((p + 1) // 2, p // 2))
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Common layers for defining score networks.
"""
import math
import string
from functools import partial
import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
from .normalization import ConditionalInstanceNorm2dPlus
def get_act(config):
"""Get activation functions from the config file."""
if config.model.nonlinearity.lower() == 'elu':
return nn.ELU()
elif config.model.nonlinearity.lower() == 'relu':
return nn.ReLU()
elif config.model.nonlinearity.lower() == 'lrelu':
return nn.LeakyReLU(negative_slope=0.2)
elif config.model.nonlinearity.lower() == 'swish':
return nn.SiLU()
else:
raise NotImplementedError('activation function does not exist!')
def ncsn_conv1x1(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=0):
"""1x1 convolution. Same as NCSNv1/v2."""
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias, dilation=dilation,
padding=padding)
init_scale = 1e-10 if init_scale == 0 else init_scale
conv.weight.data *= init_scale
conv.bias.data *= init_scale
return conv
def variance_scaling(scale, mode, distribution,
in_axis=1, out_axis=0,
dtype=torch.float32,
device='cpu'):
"""Ported from JAX. """
def _compute_fans(shape, in_axis=1, out_axis=0):
receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis]
fan_in = shape[in_axis] * receptive_field_size
fan_out = shape[out_axis] * receptive_field_size
return fan_in, fan_out
def init(shape, dtype=dtype, device=device):
fan_in, fan_out = _compute_fans(shape, in_axis, out_axis)
if mode == "fan_in":
denominator = fan_in
elif mode == "fan_out":
denominator = fan_out
elif mode == "fan_avg":
denominator = (fan_in + fan_out) / 2
else:
raise ValueError(
"invalid mode for variance scaling initializer: {}".format(mode))
variance = scale / denominator
if distribution == "normal":
return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt(variance)
elif distribution == "uniform":
return (torch.rand(*shape, dtype=dtype, device=device) * 2. - 1.) * np.sqrt(3 * variance)
else:
raise ValueError("invalid distribution for variance scaling initializer")
return init
def default_init(scale=1.):
"""The same initialization used in DDPM."""
scale = 1e-10 if scale == 0 else scale
return variance_scaling(scale, 'fan_avg', 'uniform')
class Dense(nn.Module):
"""Linear layer with `default_init`."""
def __init__(self):
super().__init__()
def ddpm_conv1x1(in_planes, out_planes, stride=1, bias=True, init_scale=1., padding=0):
"""1x1 convolution with DDPM initialization."""
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=padding, bias=bias)
conv.weight.data = default_init(init_scale)(conv.weight.data.shape)
nn.init.zeros_(conv.bias)
return conv
def ncsn_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=1):
"""3x3 convolution with PyTorch initialization. Same as NCSNv1/NCSNv2."""
init_scale = 1e-10 if init_scale == 0 else init_scale
conv = nn.Conv2d(in_planes, out_planes, stride=stride, bias=bias,
dilation=dilation, padding=padding, kernel_size=3)
conv.weight.data *= init_scale
conv.bias.data *= init_scale
return conv
def ddpm_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=1):
"""3x3 convolution with DDPM initialization."""
conv = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding,
dilation=dilation, bias=bias)
conv.weight.data = default_init(init_scale)(conv.weight.data.shape)
nn.init.zeros_(conv.bias)
return conv
###########################################################################
# Functions below are ported over from the NCSNv1/NCSNv2 codebase:
# https://github.com/ermongroup/ncsn
# https://github.com/ermongroup/ncsnv2
###########################################################################
class CRPBlock(nn.Module):
def __init__(self, features, n_stages, act=nn.ReLU(), maxpool=True):
super().__init__()
self.convs = nn.ModuleList()
for i in range(n_stages):
self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False))
self.n_stages = n_stages
if maxpool:
self.pool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
else:
self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2)
self.act = act
def forward(self, x):
x = self.act(x)
path = x
for i in range(self.n_stages):
path = self.pool(path)
path = self.convs[i](path)
x = path + x
return x
class CondCRPBlock(nn.Module):
def __init__(self, features, n_stages, num_classes, normalizer, act=nn.ReLU()):
super().__init__()
self.convs = nn.ModuleList()
self.norms = nn.ModuleList()
self.normalizer = normalizer
for i in range(n_stages):
self.norms.append(normalizer(features, num_classes, bias=True))
self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False))
self.n_stages = n_stages
self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2)
self.act = act
def forward(self, x, y):
x = self.act(x)
path = x
for i in range(self.n_stages):
path = self.norms[i](path, y)
path = self.pool(path)
path = self.convs[i](path)
x = path + x
return x
class RCUBlock(nn.Module):
def __init__(self, features, n_blocks, n_stages, act=nn.ReLU()):
super().__init__()
for i in range(n_blocks):
for j in range(n_stages):
setattr(self, '{}_{}_conv'.format(i + 1, j + 1), ncsn_conv3x3(features, features, stride=1, bias=False))
self.stride = 1
self.n_blocks = n_blocks
self.n_stages = n_stages
self.act = act
def forward(self, x):
for i in range(self.n_blocks):
residual = x
for j in range(self.n_stages):
x = self.act(x)
x = getattr(self, '{}_{}_conv'.format(i + 1, j + 1))(x)
x += residual
return x
class CondRCUBlock(nn.Module):
def __init__(self, features, n_blocks, n_stages, num_classes, normalizer, act=nn.ReLU()):
super().__init__()
for i in range(n_blocks):
for j in range(n_stages):
setattr(self, '{}_{}_norm'.format(i + 1, j + 1), normalizer(features, num_classes, bias=True))
setattr(self, '{}_{}_conv'.format(i + 1, j + 1), ncsn_conv3x3(features, features, stride=1, bias=False))
self.stride = 1
self.n_blocks = n_blocks
self.n_stages = n_stages
self.act = act
self.normalizer = normalizer
def forward(self, x, y):
for i in range(self.n_blocks):
residual = x
for j in range(self.n_stages):
x = getattr(self, '{}_{}_norm'.format(i + 1, j + 1))(x, y)
x = self.act(x)
x = getattr(self, '{}_{}_conv'.format(i + 1, j + 1))(x)
x += residual
return x
class MSFBlock(nn.Module):
def __init__(self, in_planes, features):
super().__init__()
assert isinstance(in_planes, list) or isinstance(in_planes, tuple)
self.convs = nn.ModuleList()
self.features = features
for i in range(len(in_planes)):
self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True))
def forward(self, xs, shape):
sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device)
for i in range(len(self.convs)):
h = self.convs[i](xs[i])
h = F.interpolate(h, size=shape, mode='bilinear', align_corners=True)
sums += h
return sums
class CondMSFBlock(nn.Module):
def __init__(self, in_planes, features, num_classes, normalizer):
super().__init__()
assert isinstance(in_planes, list) or isinstance(in_planes, tuple)
self.convs = nn.ModuleList()
self.norms = nn.ModuleList()
self.features = features
self.normalizer = normalizer
for i in range(len(in_planes)):
self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True))
self.norms.append(normalizer(in_planes[i], num_classes, bias=True))
def forward(self, xs, y, shape):
sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device)
for i in range(len(self.convs)):
h = self.norms[i](xs[i], y)
h = self.convs[i](h)
h = F.interpolate(h, size=shape, mode='bilinear', align_corners=True)
sums += h
return sums
class RefineBlock(nn.Module):
def __init__(self, in_planes, features, act=nn.ReLU(), start=False, end=False, maxpool=True):
super().__init__()
assert isinstance(in_planes, tuple) or isinstance(in_planes, list)
self.n_blocks = n_blocks = len(in_planes)
self.adapt_convs = nn.ModuleList()
for i in range(n_blocks):
self.adapt_convs.append(RCUBlock(in_planes[i], 2, 2, act))
self.output_convs = RCUBlock(features, 3 if end else 1, 2, act)
if not start:
self.msf = MSFBlock(in_planes, features)
self.crp = CRPBlock(features, 2, act, maxpool=maxpool)
def forward(self, xs, output_shape):
assert isinstance(xs, tuple) or isinstance(xs, list)
hs = []
for i in range(len(xs)):
h = self.adapt_convs[i](xs[i])
hs.append(h)
if self.n_blocks > 1:
h = self.msf(hs, output_shape)
else:
h = hs[0]
h = self.crp(h)
h = self.output_convs(h)
return h
class CondRefineBlock(nn.Module):
def __init__(self, in_planes, features, num_classes, normalizer, act=nn.ReLU(), start=False, end=False):
super().__init__()
assert isinstance(in_planes, tuple) or isinstance(in_planes, list)
self.n_blocks = n_blocks = len(in_planes)
self.adapt_convs = nn.ModuleList()
for i in range(n_blocks):
self.adapt_convs.append(
CondRCUBlock(in_planes[i], 2, 2, num_classes, normalizer, act)
)
self.output_convs = CondRCUBlock(features, 3 if end else 1, 2, num_classes, normalizer, act)
if not start:
self.msf = CondMSFBlock(in_planes, features, num_classes, normalizer)
self.crp = CondCRPBlock(features, 2, num_classes, normalizer, act)
def forward(self, xs, y, output_shape):
assert isinstance(xs, tuple) or isinstance(xs, list)
hs = []
for i in range(len(xs)):
h = self.adapt_convs[i](xs[i], y)
hs.append(h)
if self.n_blocks > 1:
h = self.msf(hs, y, output_shape)
else:
h = hs[0]
h = self.crp(h, y)
h = self.output_convs(h, y)
return h
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True, adjust_padding=False):
super().__init__()
if not adjust_padding:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
self.conv = conv
else:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
self.conv = nn.Sequential(
nn.ZeroPad2d((1, 0, 1, 0)),
conv
)
def forward(self, inputs):
output = self.conv(inputs)
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.
return output
class MeanPoolConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
def forward(self, inputs):
output = inputs
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.
return self.conv(output)
class UpsampleConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
self.pixelshuffle = nn.PixelShuffle(upscale_factor=2)
def forward(self, inputs):
output = inputs
output = torch.cat([output, output, output, output], dim=1)
output = self.pixelshuffle(output)
return self.conv(output)
class ConditionalResidualBlock(nn.Module):
def __init__(self, input_dim, output_dim, num_classes, resample=1, act=nn.ELU(),
normalization=ConditionalInstanceNorm2dPlus, adjust_padding=False, dilation=None):
super().__init__()
self.non_linearity = act
self.input_dim = input_dim
self.output_dim = output_dim
self.resample = resample
self.normalization = normalization
if resample == 'down':
if dilation > 1:
self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation)
self.normalize2 = normalization(input_dim, num_classes)
self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
else:
self.conv1 = ncsn_conv3x3(input_dim, input_dim)
self.normalize2 = normalization(input_dim, num_classes)
self.conv2 = ConvMeanPool(input_dim, output_dim, 3, adjust_padding=adjust_padding)
conv_shortcut = partial(ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding)
elif resample is None:
if dilation > 1:
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
self.normalize2 = normalization(output_dim, num_classes)
self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation)
else:
conv_shortcut = nn.Conv2d
self.conv1 = ncsn_conv3x3(input_dim, output_dim)
self.normalize2 = normalization(output_dim, num_classes)
self.conv2 = ncsn_conv3x3(output_dim, output_dim)
else:
raise Exception('invalid resample value')
if output_dim != input_dim or resample is not None:
self.shortcut = conv_shortcut(input_dim, output_dim)
self.normalize1 = normalization(input_dim, num_classes)
def forward(self, x, y):
output = self.normalize1(x, y)
output = self.non_linearity(output)
output = self.conv1(output)
output = self.normalize2(output, y)
output = self.non_linearity(output)
output = self.conv2(output)
if self.output_dim == self.input_dim and self.resample is None:
shortcut = x
else:
shortcut = self.shortcut(x)
return shortcut + output
class ResidualBlock(nn.Module):
def __init__(self, input_dim, output_dim, resample=None, act=nn.ELU(),
normalization=nn.InstanceNorm2d, adjust_padding=False, dilation=1):
super().__init__()
self.non_linearity = act
self.input_dim = input_dim
self.output_dim = output_dim
self.resample = resample
self.normalization = normalization
if resample == 'down':
if dilation > 1:
self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation)
self.normalize2 = normalization(input_dim)
self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
else:
self.conv1 = ncsn_conv3x3(input_dim, input_dim)
self.normalize2 = normalization(input_dim)
self.conv2 = ConvMeanPool(input_dim, output_dim, 3, adjust_padding=adjust_padding)
conv_shortcut = partial(ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding)
elif resample is None:
if dilation > 1:
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
self.normalize2 = normalization(output_dim)
self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation)
else:
# conv_shortcut = nn.Conv2d ### Something wierd here.
conv_shortcut = partial(ncsn_conv1x1)
self.conv1 = ncsn_conv3x3(input_dim, output_dim)
self.normalize2 = normalization(output_dim)
self.conv2 = ncsn_conv3x3(output_dim, output_dim)
else:
raise Exception('invalid resample value')
if output_dim != input_dim or resample is not None:
self.shortcut = conv_shortcut(input_dim, output_dim)
self.normalize1 = normalization(input_dim)
def forward(self, x):
output = self.normalize1(x)
output = self.non_linearity(output)
output = self.conv1(output)
output = self.normalize2(output)
output = self.non_linearity(output)
output = self.conv2(output)
if self.output_dim == self.input_dim and self.resample is None:
shortcut = x
else:
shortcut = self.shortcut(x)
return shortcut + output
###########################################################################
# Functions below are ported over from the DDPM codebase:
# https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py
###########################################################################
def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000):
assert len(timesteps.shape) == 1 # and timesteps.dtype == tf.int32
half_dim = embedding_dim // 2
# magic number 10000 is from transformers
emb = math.log(max_positions) / (half_dim - 1)
# emb = math.log(2.) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb)
# emb = tf.range(num_embeddings, dtype=jnp.float32)[:, None] * emb[None, :]
# emb = tf.cast(timesteps, dtype=jnp.float32)[:, None] * emb[None, :]
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = F.pad(emb, (0, 1), mode='constant')
assert emb.shape == (timesteps.shape[0], embedding_dim)
return emb
def _einsum(a, b, c, x, y):
einsum_str = '{},{}->{}'.format(''.join(a), ''.join(b), ''.join(c))
return torch.einsum(einsum_str, x, y)
def contract_inner(x, y):
"""tensordot(x, y, 1)."""
x_chars = list(string.ascii_lowercase[:len(x.shape)])
y_chars = list(string.ascii_lowercase[len(x.shape):len(y.shape) + len(x.shape)])
y_chars[0] = x_chars[-1] # first axis of y and last of x get summed
out_chars = x_chars[:-1] + y_chars[1:]
return _einsum(x_chars, y_chars, out_chars, x, y)
class NIN(nn.Module):
def __init__(self, in_dim, num_units, init_scale=0.1):
super().__init__()
self.W = nn.Parameter(default_init(scale=init_scale)((in_dim, num_units)), requires_grad=True)
self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True)
def forward(self, x):
x = x.permute(0, 2, 3, 1)
y = contract_inner(x, self.W) + self.b
return y.permute(0, 3, 1, 2)
class AttnBlock(nn.Module):
"""Channel-wise self-attention block."""
def __init__(self, channels):
super().__init__()
self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=channels, eps=1e-6)
self.NIN_0 = NIN(channels, channels)
self.NIN_1 = NIN(channels, channels)
self.NIN_2 = NIN(channels, channels)
self.NIN_3 = NIN(channels, channels, init_scale=0.)
def forward(self, x):
B, C, H, W = x.shape
h = self.GroupNorm_0(x)
q = self.NIN_0(h)
k = self.NIN_1(h)
v = self.NIN_2(h)
w = torch.einsum('bchw,bcij->bhwij', q, k) * (int(C) ** (-0.5))
w = torch.reshape(w, (B, H, W, H * W))
w = F.softmax(w, dim=-1)
w = torch.reshape(w, (B, H, W, H, W))
h = torch.einsum('bhwij,bcij->bchw', w, v)
h = self.NIN_3(h)
return x + h
class Upsample(nn.Module):
def __init__(self, channels, with_conv=False):
super().__init__()
if with_conv:
self.Conv_0 = ddpm_conv3x3(channels, channels)
self.with_conv = with_conv
def forward(self, x):
B, C, H, W = x.shape
h = F.interpolate(x, (H * 2, W * 2), mode='nearest')
if self.with_conv:
h = self.Conv_0(h)
return h
class Downsample(nn.Module):
def __init__(self, channels, with_conv=False):
super().__init__()
if with_conv:
self.Conv_0 = ddpm_conv3x3(channels, channels, stride=2, padding=0)
self.with_conv = with_conv
def forward(self, x):
B, C, H, W = x.shape
# Emulate 'SAME' padding
if self.with_conv:
x = F.pad(x, (0, 1, 0, 1))
x = self.Conv_0(x)
else:
x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=0)
assert x.shape == (B, C, H // 2, W // 2)
return x
class ResnetBlockDDPM(nn.Module):
"""The ResNet Blocks used in DDPM."""
def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False, dropout=0.1):
super().__init__()
if out_ch is None:
out_ch = in_ch
self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=in_ch, eps=1e-6)
self.act = act
self.Conv_0 = ddpm_conv3x3(in_ch, out_ch)
if temb_dim is not None:
self.Dense_0 = nn.Linear(temb_dim, out_ch)
self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape)
nn.init.zeros_(self.Dense_0.bias)
self.GroupNorm_1 = nn.GroupNorm(num_groups=32, num_channels=out_ch, eps=1e-6)
self.Dropout_0 = nn.Dropout(dropout)
self.Conv_1 = ddpm_conv3x3(out_ch, out_ch, init_scale=0.)
if in_ch != out_ch:
if conv_shortcut:
self.Conv_2 = ddpm_conv3x3(in_ch, out_ch)
else:
self.NIN_0 = NIN(in_ch, out_ch)
self.out_ch = out_ch
self.in_ch = in_ch
self.conv_shortcut = conv_shortcut
def forward(self, x, temb=None):
B, C, H, W = x.shape
assert C == self.in_ch
out_ch = self.out_ch if self.out_ch else self.in_ch
h = self.act(self.GroupNorm_0(x))
h = self.Conv_0(h)
# Add bias to each feature map conditioned on the time embedding
if temb is not None:
h += self.Dense_0(self.act(temb))[:, :, None, None]
h = self.act(self.GroupNorm_1(h))
h = self.Dropout_0(h)
h = self.Conv_1(h)
if C != out_ch:
if self.conv_shortcut:
x = self.Conv_2(x)
else:
x = self.NIN_0(x)
return x + h |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalization layers."""
import torch.nn as nn
import torch
import functools
def get_normalization(config, conditional=False):
"""Obtain normalization modules from the config file."""
norm = config.model.normalization
if conditional:
if norm == 'InstanceNorm++':
return functools.partial(ConditionalInstanceNorm2dPlus, num_classes=config.model.num_classes)
else:
raise NotImplementedError(f'{norm} not implemented yet.')
else:
if norm == 'InstanceNorm':
return nn.InstanceNorm2d
elif norm == 'InstanceNorm++':
return InstanceNorm2dPlus
elif norm == 'VarianceNorm':
return VarianceNorm2d
elif norm == 'GroupNorm':
return nn.GroupNorm
else:
raise ValueError('Unknown normalization: %s' % norm)
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.bn = nn.BatchNorm2d(num_features, affine=False)
if self.bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
out = self.bn(x)
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=1)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * out
return out
class ConditionalInstanceNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
if bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
h = self.instance_norm(x)
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=-1)
out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalVarianceNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.normal_(1, 0.02)
def forward(self, x, y):
vars = torch.var(x, dim=(2, 3), keepdim=True)
h = x / torch.sqrt(vars + 1e-5)
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
class VarianceNorm2d(nn.Module):
def __init__(self, num_features, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.alpha = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
def forward(self, x):
vars = torch.var(x, dim=(2, 3), keepdim=True)
h = x / torch.sqrt(vars + 1e-5)
out = self.alpha.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalNoneNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
if bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=-1)
out = gamma.view(-1, self.num_features, 1, 1) * x + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * x
return out
class NoneNorm2d(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
def forward(self, x):
return x
class InstanceNorm2dPlus(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
self.alpha = nn.Parameter(torch.zeros(num_features))
self.gamma = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
self.gamma.data.normal_(1, 0.02)
if bias:
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / (torch.sqrt(v + 1e-5))
h = self.instance_norm(x)
if self.bias:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1) * h + self.beta.view(-1, self.num_features, 1, 1)
else:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalInstanceNorm2dPlus(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
if bias:
self.embed = nn.Embedding(num_classes, num_features * 3)
self.embed.weight.data[:, :2 * num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, 2 * num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, 2 * num_features)
self.embed.weight.data.normal_(1, 0.02)
def forward(self, x, y):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / (torch.sqrt(v + 1e-5))
h = self.instance_norm(x)
if self.bias:
gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
else:
gamma, alpha = self.embed(y).chunk(2, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
from . import utils, layers, layerspp, normalization
import torch.nn as nn
import functools
import torch
import numpy as np
ResnetBlockDDPM = layerspp.ResnetBlockDDPMpp
ResnetBlockBigGAN = layerspp.ResnetBlockBigGANpp
Combine = layerspp.Combine
conv3x3 = layerspp.conv3x3
conv1x1 = layerspp.conv1x1
get_act = layers.get_act
get_normalization = normalization.get_normalization
default_initializer = layers.default_init
@utils.register_model(name='ncsnpp')
class NCSNpp(nn.Module):
"""NCSN++ model"""
def __init__(self, config):
super().__init__()
self.config = config
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(utils.get_sigmas(config)))
self.nf = nf = config.model.nf
ch_mult = config.model.ch_mult
self.num_res_blocks = num_res_blocks = config.model.num_res_blocks
self.attn_resolutions = attn_resolutions = config.model.attn_resolutions
dropout = config.model.dropout
resamp_with_conv = config.model.resamp_with_conv
self.num_resolutions = num_resolutions = len(ch_mult)
self.all_resolutions = all_resolutions = [config.data.image_size // (2 ** i) for i in range(num_resolutions)]
self.conditional = conditional = config.model.conditional # noise-conditional
fir = config.model.fir
fir_kernel = config.model.fir_kernel
self.skip_rescale = skip_rescale = config.model.skip_rescale
self.resblock_type = resblock_type = config.model.resblock_type.lower()
self.progressive = progressive = config.model.progressive.lower()
self.progressive_input = progressive_input = config.model.progressive_input.lower()
self.embedding_type = embedding_type = config.model.embedding_type.lower()
init_scale = config.model.init_scale
assert progressive in ['none', 'output_skip', 'residual']
assert progressive_input in ['none', 'input_skip', 'residual']
assert embedding_type in ['fourier', 'positional']
combine_method = config.model.progressive_combine.lower()
combiner = functools.partial(Combine, method=combine_method)
modules = []
# timestep/noise_level embedding; only for continuous training
if embedding_type == 'fourier':
# Gaussian Fourier features embeddings.
assert config.training.continuous, "Fourier features are only used for continuous training."
modules.append(layerspp.GaussianFourierProjection(
embedding_size=nf, scale=config.model.fourier_scale
))
embed_dim = 2 * nf
elif embedding_type == 'positional':
embed_dim = nf
else:
raise ValueError(f'embedding type {embedding_type} unknown.')
if conditional:
modules.append(nn.Linear(embed_dim, nf * 4))
modules[-1].weight.data = default_initializer()(modules[-1].weight.shape)
nn.init.zeros_(modules[-1].bias)
modules.append(nn.Linear(nf * 4, nf * 4))
modules[-1].weight.data = default_initializer()(modules[-1].weight.shape)
nn.init.zeros_(modules[-1].bias)
AttnBlock = functools.partial(layerspp.AttnBlockpp,
init_scale=init_scale,
skip_rescale=skip_rescale)
Upsample = functools.partial(layerspp.Upsample,
with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel)
if progressive == 'output_skip':
self.pyramid_upsample = layerspp.Upsample(fir=fir, fir_kernel=fir_kernel, with_conv=False)
elif progressive == 'residual':
pyramid_upsample = functools.partial(layerspp.Upsample,
fir=fir, fir_kernel=fir_kernel, with_conv=True)
Downsample = functools.partial(layerspp.Downsample,
with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel)
if progressive_input == 'input_skip':
self.pyramid_downsample = layerspp.Downsample(fir=fir, fir_kernel=fir_kernel, with_conv=False)
elif progressive_input == 'residual':
pyramid_downsample = functools.partial(layerspp.Downsample,
fir=fir, fir_kernel=fir_kernel, with_conv=True)
if resblock_type == 'ddpm':
ResnetBlock = functools.partial(ResnetBlockDDPM,
act=act,
dropout=dropout,
init_scale=init_scale,
skip_rescale=skip_rescale,
temb_dim=nf * 4)
elif resblock_type == 'biggan':
ResnetBlock = functools.partial(ResnetBlockBigGAN,
act=act,
dropout=dropout,
fir=fir,
fir_kernel=fir_kernel,
init_scale=init_scale,
skip_rescale=skip_rescale,
temb_dim=nf * 4)
else:
raise ValueError(f'resblock type {resblock_type} unrecognized.')
# Downsampling block
channels = config.data.num_channels
if progressive_input != 'none':
input_pyramid_ch = channels
modules.append(conv3x3(channels, nf))
hs_c = [nf]
in_ch = nf
for i_level in range(num_resolutions):
# Residual blocks for this resolution
for i_block in range(num_res_blocks):
out_ch = nf * ch_mult[i_level]
modules.append(ResnetBlock(in_ch=in_ch, out_ch=out_ch))
in_ch = out_ch
if all_resolutions[i_level] in attn_resolutions:
modules.append(AttnBlock(channels=in_ch))
hs_c.append(in_ch)
if i_level != num_resolutions - 1:
if resblock_type == 'ddpm':
modules.append(Downsample(in_ch=in_ch))
else:
modules.append(ResnetBlock(down=True, in_ch=in_ch))
if progressive_input == 'input_skip':
modules.append(combiner(dim1=input_pyramid_ch, dim2=in_ch))
if combine_method == 'cat':
in_ch *= 2
elif progressive_input == 'residual':
modules.append(pyramid_downsample(in_ch=input_pyramid_ch, out_ch=in_ch))
input_pyramid_ch = in_ch
hs_c.append(in_ch)
in_ch = hs_c[-1]
modules.append(ResnetBlock(in_ch=in_ch))
modules.append(AttnBlock(channels=in_ch))
modules.append(ResnetBlock(in_ch=in_ch))
pyramid_ch = 0
# Upsampling block
for i_level in reversed(range(num_resolutions)):
for i_block in range(num_res_blocks + 1):
out_ch = nf * ch_mult[i_level]
modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(),
out_ch=out_ch))
in_ch = out_ch
if all_resolutions[i_level] in attn_resolutions:
modules.append(AttnBlock(channels=in_ch))
if progressive != 'none':
if i_level == num_resolutions - 1:
if progressive == 'output_skip':
modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
num_channels=in_ch, eps=1e-6))
modules.append(conv3x3(in_ch, channels, init_scale=init_scale))
pyramid_ch = channels
elif progressive == 'residual':
modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
num_channels=in_ch, eps=1e-6))
modules.append(conv3x3(in_ch, in_ch, bias=True))
pyramid_ch = in_ch
else:
raise ValueError(f'{progressive} is not a valid name.')
else:
if progressive == 'output_skip':
modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
num_channels=in_ch, eps=1e-6))
modules.append(conv3x3(in_ch, channels, bias=True, init_scale=init_scale))
pyramid_ch = channels
elif progressive == 'residual':
modules.append(pyramid_upsample(in_ch=pyramid_ch, out_ch=in_ch))
pyramid_ch = in_ch
else:
raise ValueError(f'{progressive} is not a valid name')
if i_level != 0:
if resblock_type == 'ddpm':
modules.append(Upsample(in_ch=in_ch))
else:
modules.append(ResnetBlock(in_ch=in_ch, up=True))
assert not hs_c
if progressive != 'output_skip':
modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
num_channels=in_ch, eps=1e-6))
modules.append(conv3x3(in_ch, channels, init_scale=init_scale))
self.all_modules = nn.ModuleList(modules)
def forward(self, x, time_cond):
# timestep/noise_level embedding; only for continuous training
modules = self.all_modules
m_idx = 0
if self.embedding_type == 'fourier':
# Gaussian Fourier features embeddings.
used_sigmas = time_cond
temb = modules[m_idx](torch.log(used_sigmas))
m_idx += 1
elif self.embedding_type == 'positional':
# Sinusoidal positional embeddings.
timesteps = time_cond
used_sigmas = self.sigmas[time_cond.long()]
temb = layers.get_timestep_embedding(timesteps, self.nf)
else:
raise ValueError(f'embedding type {self.embedding_type} unknown.')
if self.conditional:
temb = modules[m_idx](temb)
m_idx += 1
temb = modules[m_idx](self.act(temb))
m_idx += 1
else:
temb = None
if not self.config.data.centered:
# If input data is in [0, 1]
x = 2 * x - 1.
# Downsampling block
input_pyramid = None
if self.progressive_input != 'none':
input_pyramid = x
hs = [modules[m_idx](x)]
m_idx += 1
for i_level in range(self.num_resolutions):
# Residual blocks for this resolution
for i_block in range(self.num_res_blocks):
h = modules[m_idx](hs[-1], temb)
m_idx += 1
if h.shape[-1] in self.attn_resolutions:
h = modules[m_idx](h)
m_idx += 1
hs.append(h)
if i_level != self.num_resolutions - 1:
if self.resblock_type == 'ddpm':
h = modules[m_idx](hs[-1])
m_idx += 1
else:
h = modules[m_idx](hs[-1], temb)
m_idx += 1
if self.progressive_input == 'input_skip':
input_pyramid = self.pyramid_downsample(input_pyramid)
h = modules[m_idx](input_pyramid, h)
m_idx += 1
elif self.progressive_input == 'residual':
input_pyramid = modules[m_idx](input_pyramid)
m_idx += 1
if self.skip_rescale:
input_pyramid = (input_pyramid + h) / np.sqrt(2.)
else:
input_pyramid = input_pyramid + h
h = input_pyramid
hs.append(h)
h = hs[-1]
h = modules[m_idx](h, temb)
m_idx += 1
h = modules[m_idx](h)
m_idx += 1
h = modules[m_idx](h, temb)
m_idx += 1
pyramid = None
# Upsampling block
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = modules[m_idx](torch.cat([h, hs.pop()], dim=1), temb)
m_idx += 1
if h.shape[-1] in self.attn_resolutions:
h = modules[m_idx](h)
m_idx += 1
if self.progressive != 'none':
if i_level == self.num_resolutions - 1:
if self.progressive == 'output_skip':
pyramid = self.act(modules[m_idx](h))
m_idx += 1
pyramid = modules[m_idx](pyramid)
m_idx += 1
elif self.progressive == 'residual':
pyramid = self.act(modules[m_idx](h))
m_idx += 1
pyramid = modules[m_idx](pyramid)
m_idx += 1
else:
raise ValueError(f'{self.progressive} is not a valid name.')
else:
if self.progressive == 'output_skip':
pyramid = self.pyramid_upsample(pyramid)
pyramid_h = self.act(modules[m_idx](h))
m_idx += 1
pyramid_h = modules[m_idx](pyramid_h)
m_idx += 1
pyramid = pyramid + pyramid_h
elif self.progressive == 'residual':
pyramid = modules[m_idx](pyramid)
m_idx += 1
if self.skip_rescale:
pyramid = (pyramid + h) / np.sqrt(2.)
else:
pyramid = pyramid + h
h = pyramid
else:
raise ValueError(f'{self.progressive} is not a valid name')
if i_level != 0:
if self.resblock_type == 'ddpm':
h = modules[m_idx](h)
m_idx += 1
else:
h = modules[m_idx](h, temb)
m_idx += 1
assert not hs
if self.progressive == 'output_skip':
h = pyramid
else:
h = self.act(modules[m_idx](h))
m_idx += 1
h = modules[m_idx](h)
m_idx += 1
assert m_idx == len(modules)
if self.config.model.scale_by_sigma:
used_sigmas = used_sigmas.reshape((x.shape[0], *([1] * len(x.shape[1:]))))
h = h / used_sigmas
return h
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""DDPM model.
This code is the pytorch equivalent of:
https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/models/unet.py
"""
import torch
import torch.nn as nn
import functools
from . import utils, layers, normalization
RefineBlock = layers.RefineBlock
ResidualBlock = layers.ResidualBlock
ResnetBlockDDPM = layers.ResnetBlockDDPM
Upsample = layers.Upsample
Downsample = layers.Downsample
conv3x3 = layers.ddpm_conv3x3
get_act = layers.get_act
get_normalization = normalization.get_normalization
default_initializer = layers.default_init
@utils.register_model(name='ddpm')
class DDPM(nn.Module):
def __init__(self, config):
super().__init__()
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(utils.get_sigmas(config)))
self.nf = nf = config.model.nf
ch_mult = config.model.ch_mult
self.num_res_blocks = num_res_blocks = config.model.num_res_blocks
self.attn_resolutions = attn_resolutions = config.model.attn_resolutions
dropout = config.model.dropout
resamp_with_conv = config.model.resamp_with_conv
self.num_resolutions = num_resolutions = len(ch_mult)
self.all_resolutions = all_resolutions = [config.data.image_size // (2 ** i) for i in range(num_resolutions)]
AttnBlock = functools.partial(layers.AttnBlock)
self.conditional = conditional = config.model.conditional
ResnetBlock = functools.partial(ResnetBlockDDPM, act=act, temb_dim=4 * nf, dropout=dropout)
if conditional:
# Condition on noise levels.
modules = [nn.Linear(nf, nf * 4)]
modules[0].weight.data = default_initializer()(modules[0].weight.data.shape)
nn.init.zeros_(modules[0].bias)
modules.append(nn.Linear(nf * 4, nf * 4))
modules[1].weight.data = default_initializer()(modules[1].weight.data.shape)
nn.init.zeros_(modules[1].bias)
self.centered = config.data.centered
channels = config.data.num_channels
# Downsampling block
modules.append(conv3x3(channels, nf))
hs_c = [nf]
in_ch = nf
for i_level in range(num_resolutions):
# Residual blocks for this resolution
for i_block in range(num_res_blocks):
out_ch = nf * ch_mult[i_level]
modules.append(ResnetBlock(in_ch=in_ch, out_ch=out_ch))
in_ch = out_ch
if all_resolutions[i_level] in attn_resolutions:
modules.append(AttnBlock(channels=in_ch))
hs_c.append(in_ch)
if i_level != num_resolutions - 1:
modules.append(Downsample(channels=in_ch, with_conv=resamp_with_conv))
hs_c.append(in_ch)
in_ch = hs_c[-1]
modules.append(ResnetBlock(in_ch=in_ch))
modules.append(AttnBlock(channels=in_ch))
modules.append(ResnetBlock(in_ch=in_ch))
# Upsampling block
for i_level in reversed(range(num_resolutions)):
for i_block in range(num_res_blocks + 1):
out_ch = nf * ch_mult[i_level]
modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(), out_ch=out_ch))
in_ch = out_ch
if all_resolutions[i_level] in attn_resolutions:
modules.append(AttnBlock(channels=in_ch))
if i_level != 0:
modules.append(Upsample(channels=in_ch, with_conv=resamp_with_conv))
assert not hs_c
modules.append(nn.GroupNorm(num_channels=in_ch, num_groups=32, eps=1e-6))
modules.append(conv3x3(in_ch, channels, init_scale=0.))
self.all_modules = nn.ModuleList(modules)
self.scale_by_sigma = config.model.scale_by_sigma
def forward(self, x, labels):
modules = self.all_modules
m_idx = 0
if self.conditional:
# timestep/scale embedding
timesteps = labels
temb = layers.get_timestep_embedding(timesteps, self.nf)
temb = modules[m_idx](temb)
m_idx += 1
temb = modules[m_idx](self.act(temb))
m_idx += 1
else:
temb = None
if self.centered:
# Input is in [-1, 1]
h = x
else:
# Input is in [0, 1]
h = 2 * x - 1.
# Downsampling block
hs = [modules[m_idx](h)]
m_idx += 1
for i_level in range(self.num_resolutions):
# Residual blocks for this resolution
for i_block in range(self.num_res_blocks):
h = modules[m_idx](hs[-1], temb)
m_idx += 1
if h.shape[-1] in self.attn_resolutions:
h = modules[m_idx](h)
m_idx += 1
hs.append(h)
if i_level != self.num_resolutions - 1:
hs.append(modules[m_idx](hs[-1]))
m_idx += 1
h = hs[-1]
h = modules[m_idx](h, temb)
m_idx += 1
h = modules[m_idx](h)
m_idx += 1
h = modules[m_idx](h, temb)
m_idx += 1
# Upsampling block
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = modules[m_idx](torch.cat([h, hs.pop()], dim=1), temb)
m_idx += 1
if h.shape[-1] in self.attn_resolutions:
h = modules[m_idx](h)
m_idx += 1
if i_level != 0:
h = modules[m_idx](h)
m_idx += 1
assert not hs
h = self.act(modules[m_idx](h))
m_idx += 1
h = modules[m_idx](h)
m_idx += 1
assert m_idx == len(modules)
if self.scale_by_sigma:
# Divide the output by sigmas. Useful for training with the NCSN loss.
# The DDPM loss scales the network output by sigma in the loss function,
# so no need of doing it here.
used_sigmas = self.sigmas[labels, None, None, None]
h = h / used_sigmas
return h
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os
import random
import numpy as np
import torch
import torchvision.utils as tvu
from torchdiffeq import odeint_adjoint
from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
from score_sde.losses import get_optimizer
from score_sde.models import utils as mutils
from score_sde.models.ema import ExponentialMovingAverage
from score_sde import sde_lib
def _extract_into_tensor(arr_or_func, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array or a func.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
if callable(arr_or_func):
res = arr_or_func(timesteps).float()
else:
res = arr_or_func.to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
def restore_checkpoint(ckpt_dir, state, device):
loaded_state = torch.load(ckpt_dir, map_location=device)
state['optimizer'].load_state_dict(loaded_state['optimizer'])
state['model'].load_state_dict(loaded_state['model'], strict=False)
state['ema'].load_state_dict(loaded_state['ema'])
state['step'] = loaded_state['step']
class VPODE(torch.nn.Module):
def __init__(self, model, score_type='guided_diffusion', beta_min=0.1, beta_max=20, N=1000,
img_shape=(3, 256, 256), model_kwargs=None):
"""Construct a Variance Preserving SDE.
Args:
model: diffusion model
score_type: [guided_diffusion, score_sde, ddpm]
beta_min: value of beta(0)
beta_max: value of beta(1)
"""
super().__init__()
self.model = model
self.score_type = score_type
self.model_kwargs = model_kwargs
self.img_shape = img_shape
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
self.alphas = 1. - self.discrete_betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
self.sqrt_1m_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
self.alphas_cumprod_cont = lambda t: torch.exp(-0.5 * (beta_max - beta_min) * t**2 - beta_min * t)
self.sqrt_1m_alphas_cumprod_neg_recip_cont = lambda t: -1. / torch.sqrt(1. - self.alphas_cumprod_cont(t))
def _scale_timesteps(self, t):
assert torch.all(t <= 1) and torch.all(t >= 0), f't has to be in [0, 1], but get {t} with shape {t.shape}'
return (t.float() * self.N).long()
def vpsde_fn(self, t, x):
beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
drift = -0.5 * beta_t[:, None] * x
diffusion = torch.sqrt(beta_t)
return drift, diffusion
def ode_fn(self, t, x):
"""Create the drift and diffusion functions for the reverse SDE"""
drift, diffusion = self.vpsde_fn(t, x)
assert x.ndim == 2 and np.prod(self.img_shape) == x.shape[1], x.shape
x_img = x.view(-1, *self.img_shape)
if self.score_type == 'guided_diffusion':
# model output is epsilon
if self.model_kwargs is None:
self.model_kwargs = {}
disc_steps = self._scale_timesteps(t) # (batch_size, ), from float in [0,1] to int in [0, 1000]
model_output = self.model(x_img, disc_steps, **self.model_kwargs)
# with learned sigma, so model_output contains (mean, val)
model_output, _ = torch.split(model_output, self.img_shape[0], dim=1)
assert x_img.shape == model_output.shape, f'{x_img.shape}, {model_output.shape}'
model_output = model_output.view(x.shape[0], -1)
score = _extract_into_tensor(self.sqrt_1m_alphas_cumprod_neg_recip_cont, t, x.shape) * model_output
elif self.score_type == 'score_sde':
# model output is epsilon
sde = sde_lib.VPSDE(beta_min=self.beta_0, beta_max=self.beta_1, N=self.N)
score_fn = mutils.get_score_fn(sde, self.model, train=False, continuous=True)
score = score_fn(x_img, t)
assert x_img.shape == score.shape, f'{x_img.shape}, {score.shape}'
score = score.view(x.shape[0], -1)
else:
raise NotImplementedError(f'Unknown score type in RevVPSDE: {self.score_type}!')
ode_coef = drift - 0.5 * diffusion[:, None] ** 2 * score
return ode_coef
def forward(self, t, states):
x = states[0]
t = t.expand(x.shape[0]) # (batch_size, )
dx_dt = self.ode_fn(t, x)
assert dx_dt.shape == x.shape
return dx_dt,
class OdeGuidedDiffusion(torch.nn.Module):
def __init__(self, args, config, device=None):
super().__init__()
self.args = args
self.config = config
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
# load model
if config.data.dataset == 'ImageNet':
img_shape = (3, 256, 256)
model_dir = 'checkpoints/diffpure/guided_diffusion'
model_config = model_and_diffusion_defaults()
model_config.update(vars(self.config.model))
print(f'model_config: {model_config}')
model, _ = create_model_and_diffusion(**model_config)
model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
if model_config['use_fp16']:
model.convert_to_fp16()
elif config.data.dataset == 'CIFAR10':
img_shape = (3, 32, 32)
model_dir = 'checkpoints/diffpure/score_sde'
print(f'model_config: {config}')
model = mutils.create_model(config)
optimizer = get_optimizer(config, model.parameters())
ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate)
state = dict(step=0, optimizer=optimizer, model=model, ema=ema)
restore_checkpoint(f'{model_dir}/checkpoint_8.pth', state, device)
ema.copy_to(model.parameters())
else:
raise NotImplementedError(f'Unknown dataset {config.data.dataset}!')
model.eval().to(self.device)
self.model = model
self.vpode = VPODE(model=model, score_type=args.score_type, img_shape=img_shape,
model_kwargs=None).to(self.device)
self.betas = self.vpode.discrete_betas.float().to(self.device)
self.atol, self.rtol = 1e-3, 1e-3
self.method = 'euler'
print(f'method: {self.method}, atol: {self.atol}, rtol: {self.rtol}, step_size: {self.args.step_size}')
def image_editing_sample(self, img, bs_id=0, tag=None):
assert isinstance(img, torch.Tensor)
batch_size = img.shape[0]
if tag is None:
tag = 'rnd' + str(random.randint(0, 10000))
out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
assert img.ndim == 4, img.ndim
img = img.to(self.device)
x0 = img
if bs_id < 2:
os.makedirs(out_dir, exist_ok=True)
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
xs = []
for it in range(self.args.sample_step):
if self.args.fix_rand:
# fix initial randomness
noise_fixed = torch.FloatTensor(1, *x0.shape[1:]).\
normal_(0, 1, generator=torch.manual_seed(self.args.seed)).to(self.device)
print(f'noise_fixed: {noise_fixed[0, 0, 0, :3]}')
e = noise_fixed.repeat(x0.shape[0], 1, 1, 1)
else:
e = torch.randn_like(x0).to(self.device)
assert e.shape == x0.shape
total_noise_levels = self.args.t
a = (1 - self.betas).cumprod(dim=0).to(self.device)
x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
if bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
epsilon_dt0, epsilon_dt1 = 0, 1e-5
t0, t1 = self.args.t * 1. / 1000 - epsilon_dt0, epsilon_dt1
t_size = 2
ts = torch.linspace(t0, t1, t_size).to(self.device)
x_ = x.view(batch_size, -1) # (batch_size, state_size)
states = (x_, )
# ODE solver
odeint = odeint_adjoint
state_t = odeint(
self.vpode,
states,
ts,
atol=self.atol,
rtol=self.rtol,
method=self.method,
options=None if self.method != 'euler' else dict(step_size=self.args.step_size) # only used for fixed-point method
) # 'euler', 'dopri5'
x0_ = state_t[0][-1]
x0 = x0_.view(x.shape) # (batch_size, c, h, w)
if bs_id < 2:
torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
xs.append(x0)
return torch.cat(xs, dim=0)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os
import random
import numpy as np
import torch
import torchvision.utils as tvu
import torchsde
from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
from score_sde.losses import get_optimizer
from score_sde.models import utils as mutils
from score_sde.models.ema import ExponentialMovingAverage
from score_sde import sde_lib
def _extract_into_tensor(arr_or_func, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array or a func.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
if callable(arr_or_func):
res = arr_or_func(timesteps).float()
else:
res = arr_or_func.to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
def restore_checkpoint(ckpt_dir, state, device):
loaded_state = torch.load(ckpt_dir, map_location=device)
state['optimizer'].load_state_dict(loaded_state['optimizer'])
state['model'].load_state_dict(loaded_state['model'], strict=False)
state['ema'].load_state_dict(loaded_state['ema'])
state['step'] = loaded_state['step']
class LDSDE(torch.nn.Module):
def __init__(self, model, x_init, score_type='guided_diffusion', beta_min=0.1, beta_max=20, N=1000,
img_shape=(3, 256, 256), sigma2=0.001, lambda_ld=0.01, eta=5, model_kwargs=None):
"""Construct a Variance Preserving SDE.
Args:
model: diffusion model
score_type: [guided_diffusion, score_sde, ddpm]
beta_min: value of beta(0)
beta_max: value of beta(1)
"""
super().__init__()
self.model = model
self.x_init = x_init
self.sigma2 = sigma2
self.eta = eta
self.lambda_ld = lambda_ld # damping coefficient
self.score_type = score_type
self.model_kwargs = model_kwargs
self.img_shape = img_shape
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
self.alphas = 1. - self.discrete_betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
self.sqrt_1m_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
self.alphas_cumprod_cont = lambda t: torch.exp(-0.5 * (beta_max - beta_min) * t**2 - beta_min * t)
self.sqrt_1m_alphas_cumprod_neg_recip_cont = lambda t: -1. / torch.sqrt(1. - self.alphas_cumprod_cont(t))
self.noise_type = "diagonal"
self.sde_type = "ito"
print(f'sigma2: {self.sigma2}, lambda_ld: {self.lambda_ld}, eta: {self.eta}')
def _scale_timesteps(self, t):
assert torch.all(t <= 1) and torch.all(t >= 0), f't has to be in [0, 1], but get {t} with shape {t.shape}'
return (t.float() * self.N).long()
def ldsde_fn(self, t, x, return_type='drift'):
"""Create the drift and diffusion functions for the reverse SDE"""
t = torch.zeros_like(t, dtype=torch.float, device=t.device) + 1e-2
if return_type == 'drift':
assert x.ndim == 2 and np.prod(self.img_shape) == x.shape[1], x.shape
x_img = x.view(-1, *self.img_shape)
if self.score_type == 'guided_diffusion':
# model output is epsilon
if self.model_kwargs is None:
self.model_kwargs = {}
disc_steps = self._scale_timesteps(t) # (batch_size, ), from float in [0,1] to int in [0, 1000]
model_output = self.model(x_img, disc_steps, **self.model_kwargs)
# with learned sigma, so model_output contains (mean, val)
model_output, _ = torch.split(model_output, self.img_shape[0], dim=1)
assert x_img.shape == model_output.shape, f'{x_img.shape}, {model_output.shape}'
model_output = model_output.view(x.shape[0], -1)
score = _extract_into_tensor(self.sqrt_1m_alphas_cumprod_neg_recip_cont, t, x.shape) * model_output
elif self.score_type == 'score_sde':
# model output is epsilon
sde = sde_lib.VPSDE(beta_min=self.beta_0, beta_max=self.beta_1, N=self.N)
score_fn = mutils.get_score_fn(sde, self.model, train=False, continuous=True)
score = score_fn(x_img, t)
assert x_img.shape == score.shape, f'{x_img.shape}, {score.shape}'
score = score.view(x.shape[0], -1)
else:
raise NotImplementedError(f'Unknown score type in RevVPSDE: {self.score_type}!')
drift = -0.5 * (-score + (x - self.x_init) / self.sigma2) * self.lambda_ld # TODO
return drift
else:
diffusion_coef = np.sqrt(self.lambda_ld) * self.eta
return torch.tensor([diffusion_coef], dtype=torch.float).expand(x.shape[0]).to(x.device)
def f(self, t, x):
"""Create the drift function f(x, t)
sdeint only support a 2D tensor (batch_size, c*h*w)
"""
t = t.expand(x.shape[0]) # (batch_size, )
drift = self.ldsde_fn(t, x, return_type='drift')
assert drift.shape == x.shape
return drift
def g(self, t, x):
"""Create the diffusion function g(t)
sdeint only support a 2D tensor (batch_size, c*h*w)
"""
t = t.expand(x.shape[0]) # (batch_size, )
diffusion = self.ldsde_fn(t, x, return_type='diffusion')
assert diffusion.shape == (x.shape[0], )
return diffusion[:, None].expand(x.shape)
class LDGuidedDiffusion(torch.nn.Module):
def __init__(self, args, config, device=None):
super().__init__()
self.args = args
self.config = config
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
# load model
if config.data.dataset == 'ImageNet':
img_shape = (3, 256, 256)
model_dir = 'checkpoints/diffpure/guided_diffusion'
model_config = model_and_diffusion_defaults()
model_config.update(vars(self.config.model))
print(f'model_config: {model_config}')
model, _ = create_model_and_diffusion(**model_config)
model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
if model_config['use_fp16']:
model.convert_to_fp16()
elif config.data.dataset == 'CIFAR10':
img_shape = (3, 32, 32)
model_dir = 'checkpoints/diffpure/score_sde'
print(f'model_config: {config}')
model = mutils.create_model(config)
optimizer = get_optimizer(config, model.parameters())
ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate)
state = dict(step=0, optimizer=optimizer, model=model, ema=ema)
restore_checkpoint(f'{model_dir}/checkpoint_8.pth', state, device)
ema.copy_to(model.parameters())
else:
raise NotImplementedError(f'Unknown dataset {config.data.dataset}!')
model.eval().to(self.device)
self.model = model
self.img_shape = img_shape
print(f'use_bm: {args.use_bm}')
self.args_dict = {
'method': 'euler', # ["srk", "euler", None]
'adaptive': False,
'dt': 1e-2,
}
print(f'args_dict: {self.args_dict}')
def image_editing_sample(self, img, bs_id=0, tag=None):
assert isinstance(img, torch.Tensor)
batch_size = img.shape[0]
state_size = int(np.prod(img.shape[1:])) # c*h*w
if tag is None:
tag = 'rnd' + str(random.randint(0, 10000))
out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
assert img.ndim == 4, img.ndim
img = img.to(self.device)
x0 = img
x0_ = x0.view(batch_size, -1) # (batch_size, state_size)
self.ldsde = LDSDE(model=self.model, x_init=x0_, score_type=self.args.score_type, img_shape=self.img_shape,
sigma2=self.args.sigma2, lambda_ld=self.args.lambda_ld, eta=self.args.eta,
model_kwargs=None).to(self.device)
self.betas = self.ldsde.discrete_betas.float().to(self.device)
if bs_id < 2:
os.makedirs(out_dir, exist_ok=True)
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
xs = []
for it in range(self.args.sample_step):
x = x0
if bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
epsilon_dt0, epsilon_dt1 = 0, 1e-5
t0, t1 = 1 - self.args.t * 1. / 1000 + epsilon_dt0, 1 - epsilon_dt1
t_size = 2
ts = torch.linspace(t0, t1, t_size).to(self.device)
x_ = x.view(batch_size, -1) # (batch_size, state_size)
if self.args.use_bm:
bm = torchsde.BrownianInterval(t0=t0, t1=t1, size=(batch_size, state_size), device=self.device)
xs_ = torchsde.sdeint_adjoint(self.ldsde, x_, ts, bm=bm, **self.args_dict)
else:
xs_ = torchsde.sdeint_adjoint(self.ldsde, x_, ts, **self.args_dict)
x0 = xs_[-1].view(x.shape) # (batch_size, c, h, w)
if bs_id < 2:
torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
xs.append(x0)
return torch.cat(xs, dim=0)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os
import random
import torch
import torchvision.utils as tvu
from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
class GuidedDiffusion(torch.nn.Module):
def __init__(self, args, config, device=None, model_dir='checkpoints/diffpure/guided_diffusion'):
super().__init__()
self.args = args
self.config = config
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
# load model
model_config = model_and_diffusion_defaults()
model_config.update(vars(self.config.model))
print(f'model_config: {model_config}')
model, diffusion = create_model_and_diffusion(**model_config)
model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
model.requires_grad_(False).eval().to(self.device)
if model_config['use_fp16']:
model.convert_to_fp16()
self.model = model
self.diffusion = diffusion
self.betas = torch.from_numpy(diffusion.betas).float().to(self.device)
def image_editing_sample(self, img, bs_id=0, tag=None):
with torch.no_grad():
assert isinstance(img, torch.Tensor)
batch_size = img.shape[0]
if tag is None:
tag = 'rnd' + str(random.randint(0, 10000))
out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
assert img.ndim == 4, img.ndim
img = img.to(self.device)
x0 = img
if bs_id < 2:
os.makedirs(out_dir, exist_ok=True)
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
xs = []
for it in range(self.args.sample_step):
e = torch.randn_like(x0)
total_noise_levels = self.args.t
a = (1 - self.betas).cumprod(dim=0)
x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
if bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
for i in reversed(range(total_noise_levels)):
t = torch.tensor([i] * batch_size, device=self.device)
x = self.diffusion.p_sample(self.model, x, t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None)["sample"]
# added intermediate step vis
if (i - 99) % 100 == 0 and bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'noise_t_{i}_{it}.png'))
x0 = x
if bs_id < 2:
torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
xs.append(x0)
return torch.cat(xs, dim=0)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os
import random
import numpy as np
import torch
import torchvision.utils as tvu
from ddpm.unet_ddpm import Model
def get_beta_schedule(*, beta_start, beta_end, num_diffusion_timesteps):
betas = np.linspace(beta_start, beta_end,
num_diffusion_timesteps, dtype=np.float64)
assert betas.shape == (num_diffusion_timesteps,)
return betas
def extract(a, t, x_shape):
"""Extract coefficients from a based on t and reshape to make it
broadcastable with x_shape."""
bs, = t.shape
assert x_shape[0] == bs
out = torch.gather(torch.tensor(a, dtype=torch.float, device=t.device), 0, t.long())
assert out.shape == (bs,)
out = out.reshape((bs,) + (1,) * (len(x_shape) - 1))
return out
def image_editing_denoising_step_flexible_mask(x, t, *, model, logvar, betas):
"""
Sample from p(x_{t-1} | x_t)
"""
alphas = 1.0 - betas
alphas_cumprod = alphas.cumprod(dim=0)
model_output = model(x, t)
weighted_score = betas / torch.sqrt(1 - alphas_cumprod)
mean = extract(1 / torch.sqrt(alphas), t, x.shape) * (x - extract(weighted_score, t, x.shape) * model_output)
logvar = extract(logvar, t, x.shape)
noise = torch.randn_like(x)
mask = 1 - (t == 0).float()
mask = mask.reshape((x.shape[0],) + (1,) * (len(x.shape) - 1))
sample = mean + mask * torch.exp(0.5 * logvar) * noise
sample = sample.float()
return sample
class Diffusion(torch.nn.Module):
def __init__(self, args, config, device=None):
super().__init__()
self.args = args
self.config = config
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
print("Loading model")
if self.config.data.dataset == "CelebA_HQ":
url = "https://image-editing-test-12345.s3-us-west-2.amazonaws.com/checkpoints/celeba_hq.ckpt"
else:
raise ValueError
model = Model(self.config)
ckpt = torch.hub.load_state_dict_from_url(url, map_location='cpu')
model.load_state_dict(ckpt)
model.eval()
self.model = model
self.model_var_type = config.model.var_type
betas = get_beta_schedule(
beta_start=config.diffusion.beta_start,
beta_end=config.diffusion.beta_end,
num_diffusion_timesteps=config.diffusion.num_diffusion_timesteps
)
self.betas = torch.from_numpy(betas).float()
self.num_timesteps = betas.shape[0]
alphas = 1.0 - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
posterior_variance = betas * \
(1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)
if self.model_var_type == "fixedlarge":
self.logvar = np.log(np.append(posterior_variance[1], betas[1:]))
elif self.model_var_type == 'fixedsmall':
self.logvar = np.log(np.maximum(posterior_variance, 1e-20))
def image_editing_sample(self, img=None, bs_id=0, tag=None):
assert isinstance(img, torch.Tensor)
batch_size = img.shape[0]
with torch.no_grad():
if tag is None:
tag = 'rnd' + str(random.randint(0, 10000))
out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
assert img.ndim == 4, img.ndim
x0 = img
if bs_id < 2:
os.makedirs(out_dir, exist_ok=True)
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
xs = []
for it in range(self.args.sample_step):
e = torch.randn_like(x0)
total_noise_levels = self.args.t
a = (1 - self.betas).cumprod(dim=0).to(x0.device)
x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
if bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
for i in reversed(range(total_noise_levels)):
t = torch.tensor([i] * batch_size, device=img.device)
x = image_editing_denoising_step_flexible_mask(x, t=t, model=self.model,
logvar=self.logvar,
betas=self.betas.to(img.device))
# added intermediate step vis
if (i - 49) % 50 == 0 and bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'noise_t_{i}_{it}.png'))
x0 = x
if bs_id < 2:
torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
xs.append(x0)
return torch.cat(xs, dim=0)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os
import random
import numpy as np
import torch
import torchvision.utils as tvu
import torchsde
from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
from score_sde.losses import get_optimizer
from score_sde.models import utils as mutils
from score_sde.models.ema import ExponentialMovingAverage
from score_sde import sde_lib
def _extract_into_tensor(arr_or_func, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array or a func.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
if callable(arr_or_func):
res = arr_or_func(timesteps).float()
else:
res = arr_or_func.to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
def restore_checkpoint(ckpt_dir, state, device):
loaded_state = torch.load(ckpt_dir, map_location=device)
state['optimizer'].load_state_dict(loaded_state['optimizer'])
state['model'].load_state_dict(loaded_state['model'], strict=False)
state['ema'].load_state_dict(loaded_state['ema'])
state['step'] = loaded_state['step']
class RevVPSDE(torch.nn.Module):
def __init__(self, model, score_type='guided_diffusion', beta_min=0.1, beta_max=20, N=1000,
img_shape=(3, 256, 256), model_kwargs=None):
"""Construct a Variance Preserving SDE.
Args:
model: diffusion model
score_type: [guided_diffusion, score_sde, ddpm]
beta_min: value of beta(0)
beta_max: value of beta(1)
"""
super().__init__()
self.model = model
self.score_type = score_type
self.model_kwargs = model_kwargs
self.img_shape = img_shape
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
self.alphas = 1. - self.discrete_betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
self.sqrt_1m_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
self.alphas_cumprod_cont = lambda t: torch.exp(-0.5 * (beta_max - beta_min) * t**2 - beta_min * t)
self.sqrt_1m_alphas_cumprod_neg_recip_cont = lambda t: -1. / torch.sqrt(1. - self.alphas_cumprod_cont(t))
self.noise_type = "diagonal"
self.sde_type = "ito"
def _scale_timesteps(self, t):
assert torch.all(t <= 1) and torch.all(t >= 0), f't has to be in [0, 1], but get {t} with shape {t.shape}'
return (t.float() * self.N).long()
def vpsde_fn(self, t, x):
beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
drift = -0.5 * beta_t[:, None] * x
diffusion = torch.sqrt(beta_t)
return drift, diffusion
def rvpsde_fn(self, t, x, return_type='drift'):
"""Create the drift and diffusion functions for the reverse SDE"""
drift, diffusion = self.vpsde_fn(t, x)
if return_type == 'drift':
assert x.ndim == 2 and np.prod(self.img_shape) == x.shape[1], x.shape
x_img = x.view(-1, *self.img_shape)
if self.score_type == 'guided_diffusion':
# model output is epsilon
if self.model_kwargs is None:
self.model_kwargs = {}
disc_steps = self._scale_timesteps(t) # (batch_size, ), from float in [0,1] to int in [0, 1000]
model_output = self.model(x_img, disc_steps, **self.model_kwargs)
# with learned sigma, so model_output contains (mean, val)
model_output, _ = torch.split(model_output, self.img_shape[0], dim=1)
assert x_img.shape == model_output.shape, f'{x_img.shape}, {model_output.shape}'
model_output = model_output.view(x.shape[0], -1)
score = _extract_into_tensor(self.sqrt_1m_alphas_cumprod_neg_recip_cont, t, x.shape) * model_output
elif self.score_type == 'score_sde':
# model output is epsilon
sde = sde_lib.VPSDE(beta_min=self.beta_0, beta_max=self.beta_1, N=self.N)
score_fn = mutils.get_score_fn(sde, self.model, train=False, continuous=True)
score = score_fn(x_img, t)
assert x_img.shape == score.shape, f'{x_img.shape}, {score.shape}'
score = score.view(x.shape[0], -1)
else:
raise NotImplementedError(f'Unknown score type in RevVPSDE: {self.score_type}!')
drift = drift - diffusion[:, None] ** 2 * score
return drift
else:
return diffusion
def f(self, t, x):
"""Create the drift function -f(x, 1-t) (by t' = 1 - t)
sdeint only support a 2D tensor (batch_size, c*h*w)
"""
t = t.expand(x.shape[0]) # (batch_size, )
drift = self.rvpsde_fn(1 - t, x, return_type='drift')
assert drift.shape == x.shape
return -drift
def g(self, t, x):
"""Create the diffusion function g(1-t) (by t' = 1 - t)
sdeint only support a 2D tensor (batch_size, c*h*w)
"""
t = t.expand(x.shape[0]) # (batch_size, )
diffusion = self.rvpsde_fn(1 - t, x, return_type='diffusion')
assert diffusion.shape == (x.shape[0], )
return diffusion[:, None].expand(x.shape)
class RevGuidedDiffusion(torch.nn.Module):
def __init__(self, args, config, device=None):
super().__init__()
self.args = args
self.config = config
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
# load model
if config.data.dataset == 'ImageNet':
img_shape = (3, 256, 256)
model_dir = 'checkpoints/diffpure/guided_diffusion'
model_config = model_and_diffusion_defaults()
model_config.update(vars(self.config.model))
print(f'model_config: {model_config}')
model, _ = create_model_and_diffusion(**model_config)
model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
if model_config['use_fp16']:
model.convert_to_fp16()
elif config.data.dataset == 'CIFAR10':
img_shape = (3, 32, 32)
model_dir = 'checkpoints/diffpure/score_sde'
print(f'model_config: {config}')
model = mutils.create_model(config)
optimizer = get_optimizer(config, model.parameters())
ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate)
state = dict(step=0, optimizer=optimizer, model=model, ema=ema)
restore_checkpoint(f'{model_dir}/checkpoint_8.pth', state, device)
ema.copy_to(model.parameters())
else:
raise NotImplementedError(f'Unknown dataset {config.data.dataset}!')
model.eval().to(self.device)
self.model = model
self.rev_vpsde = RevVPSDE(model=model, score_type=args.score_type, img_shape=img_shape,
model_kwargs=None).to(self.device)
self.betas = self.rev_vpsde.discrete_betas.float().to(self.device)
print(f't: {args.t}, rand_t: {args.rand_t}, t_delta: {args.t_delta}')
print(f'use_bm: {args.use_bm}')
def image_editing_sample(self, img, bs_id=0, tag=None):
assert isinstance(img, torch.Tensor)
batch_size = img.shape[0]
state_size = int(np.prod(img.shape[1:])) # c*h*w
if tag is None:
tag = 'rnd' + str(random.randint(0, 10000))
out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
assert img.ndim == 4, img.ndim
img = img.to(self.device)
x0 = img
if bs_id < 2:
os.makedirs(out_dir, exist_ok=True)
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
xs = []
for it in range(self.args.sample_step):
e = torch.randn_like(x0).to(self.device)
total_noise_levels = self.args.t
if self.args.rand_t:
total_noise_levels = self.args.t + np.random.randint(-self.args.t_delta, self.args.t_delta)
print(f'total_noise_levels: {total_noise_levels}')
a = (1 - self.betas).cumprod(dim=0).to(self.device)
x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
if bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
epsilon_dt0, epsilon_dt1 = 0, 1e-5
t0, t1 = 1 - self.args.t * 1. / 1000 + epsilon_dt0, 1 - epsilon_dt1
t_size = 2
ts = torch.linspace(t0, t1, t_size).to(self.device)
x_ = x.view(batch_size, -1) # (batch_size, state_size)
if self.args.use_bm:
bm = torchsde.BrownianInterval(t0=t0, t1=t1, size=(batch_size, state_size), device=self.device)
xs_ = torchsde.sdeint_adjoint(self.rev_vpsde, x_, ts, method='euler', bm=bm)
else:
xs_ = torchsde.sdeint_adjoint(self.rev_vpsde, x_, ts, method='euler')
x0 = xs_[-1].view(x.shape) # (batch_size, c, h, w)
if bs_id < 2:
torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
xs.append(x0)
return torch.cat(xs, dim=0)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os, sys
import io
import lmdb
import pandas as pd
import numpy as np
from PIL import Image
import torch
import torchvision
from torch.utils.data import Dataset, Subset
import torchvision.transforms as transforms
from torchvision.datasets.vision import VisionDataset
from torchvision.datasets import folder, ImageFolder
# ---------------------------------------------------------------------------------------------------
def remove_prefix(s, prefix):
if s.startswith(prefix):
s = s[len(prefix):]
return s
class ImageDataset(VisionDataset):
"""
modified from: https://pytorch.org/docs/stable/_modules/torchvision/datasets/folder.html#ImageFolder
uses cached directory listing if available rather than walking directory
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, loader=folder.default_loader,
extensions=folder.IMG_EXTENSIONS, transform=None,
target_transform=None, is_valid_file=None, return_path=False):
super(ImageDataset, self).__init__(root, transform=transform,
target_transform=target_transform)
classes, class_to_idx = self._find_classes(self.root)
cache = self.root.rstrip('/') + '.txt'
if os.path.isfile(cache):
print("Using directory list at: %s" % cache)
with open(cache) as f:
samples = []
for line in f:
(path, idx) = line.strip().split(';')
samples.append((os.path.join(self.root, path), int(idx)))
else:
print("Walking directory: %s" % self.root)
samples = folder.make_dataset(self.root, class_to_idx, extensions, is_valid_file)
with open(cache, 'w') as f:
for line in samples:
path, label = line
f.write('%s;%d\n' % (remove_prefix(path, self.root).lstrip('/'), label))
if len(samples) == 0:
raise (RuntimeError(
"Found 0 files in subfolders of: " + self.root + "\nSupported extensions are: " + ",".join(extensions)))
self.loader = loader
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.return_path = return_path
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Ensures:
No class is a subdirectory of another.
"""
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
if self.return_path:
return sample, target, path
return sample, target
def __len__(self):
return len(self.samples)
# ---------------------------------------------------------------------------------------------------
# get the attributes from celebahq subset
def make_table(root):
filenames = sorted(os.listdir(f'{root}/images'))
# filter out non-png files, rename it to jpg to match entries in list_attr_celeba.txt
celebahq = [os.path.basename(f).replace('png', 'jpg')
if f.endswith('png') else os.path.basename(f) for f in filenames]
attr_gt = pd.read_csv(f'{root}/list_attr_celeba.txt',
skiprows=1, delim_whitespace=True, index_col=0)
attr_celebahq = attr_gt.reindex(index=celebahq).replace(-1, 0)
# get the train/test/val partitions
partitions = {}
with open(f'{root}/list_eval_partition.txt') as f:
for line in f:
filename, part = line.strip().split(' ')
partitions[filename] = int(part)
partitions_list = [partitions[fname] for fname in attr_celebahq.index]
attr_celebahq['partition'] = partitions_list
return attr_celebahq
###### dataset functions ######
class CelebAHQDataset(Dataset):
def __init__(self, partition, attribute, root=None, fraction=None, data_seed=1,
chunk_length=None, chunk_idx=-1, **kwargs):
if root is None:
root = './dataset/celebahq'
self.fraction = fraction
self.dset = ImageDataset(root, **kwargs)
# make table
attr_celebahq = make_table(root)
# convert from train/val/test to partition numbers
part_to_int = dict(train=0, val=1, test=2)
def get_partition_indices(part):
return np.where(attr_celebahq['partition'] == part_to_int[part])[0]
partition_idx = get_partition_indices(partition)
# if we want to further subsample the dataset, just subsample
# partition_idx and Subset() once
if fraction is not None:
print("Using a fraction of the original dataset")
print("The original dataset has length %d" % len(partition_idx))
new_length = int(fraction / 100 * len(partition_idx))
rng = np.random.RandomState(data_seed)
new_indices = rng.choice(partition_idx, new_length, replace=False)
partition_idx = new_indices
print("The subsetted dataset has length %d" % len(partition_idx))
elif chunk_length is not None and chunk_idx > 0:
print(f"Using a fraction of the original dataset with chunk_length: {chunk_length}, chunk_idx: {chunk_idx}")
print("The original dataset has length %d" % len(partition_idx))
new_indices = partition_idx[chunk_length * chunk_idx: chunk_length * (chunk_idx + 1)]
partition_idx = new_indices
print("The subsetted dataset has length %d" % len(partition_idx))
self.dset = Subset(self.dset, partition_idx)
attr_subset = attr_celebahq.iloc[partition_idx]
self.attr_subset = attr_subset[attribute]
print('attribute freq: %0.4f (%d / %d)' % (self.attr_subset.mean(),
self.attr_subset.sum(),
len(self.attr_subset)))
def __len__(self):
return len(self.dset)
def __getitem__(self, idx):
data = self.dset[idx]
# first element is the class, replace it
label = self.attr_subset[idx]
return (data[0], label, *data[2:])
###### transformation functions ######
def get_transform(dataset, transform_type, base_size=256):
if dataset.lower() == "celebahq":
assert base_size == 256, base_size
if transform_type == 'imtrain':
return transforms.Compose([
transforms.Resize(base_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
elif transform_type == 'imval':
return transforms.Compose([
transforms.Resize(base_size),
# no horizontal flip for standard validation
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
elif transform_type == 'imcolor':
return transforms.Compose([
transforms.Resize(base_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=.05, contrast=.05,
saturation=.05, hue=.05),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
elif transform_type == 'imcrop':
return transforms.Compose([
# 1024 + 32, or 256 + 8
transforms.Resize(int(1.03125 * base_size)),
transforms.RandomCrop(base_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
elif transform_type == 'tensorbase':
# dummy transform for compatibility with other datasets
return transforms.Lambda(lambda x: x)
else:
raise NotImplementedError
elif "imagenet" in dataset.lower():
assert base_size == 224, base_size
if transform_type == 'imtrain':
return transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(base_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
elif transform_type == 'imval':
return transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(base_size),
# no horizontal flip for standard validation
transforms.ToTensor(),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
else:
raise NotImplementedError
else:
raise NotImplementedError
################################################################################
# ImageNet - LMDB
###############################################################################
def lmdb_loader(path, lmdb_data):
# In-memory binary streams
with lmdb_data.begin(write=False, buffers=True) as txn:
bytedata = txn.get(path.encode('ascii'))
img = Image.open(io.BytesIO(bytedata))
return img.convert('RGB')
def imagenet_lmdb_dataset(
root, transform=None, target_transform=None,
loader=lmdb_loader):
"""
You can create this dataloader using:
train_data = imagenet_lmdb_dataset(traindir, transform=train_transform)
valid_data = imagenet_lmdb_dataset(validdir, transform=val_transform)
"""
if root.endswith('/'):
root = root[:-1]
pt_path = os.path.join(
root + '_faster_imagefolder.lmdb.pt')
lmdb_path = os.path.join(
root + '_faster_imagefolder.lmdb')
if os.path.isfile(pt_path) and os.path.isdir(lmdb_path):
print('Loading pt {} and lmdb {}'.format(pt_path, lmdb_path))
data_set = torch.load(pt_path)
else:
data_set = ImageFolder(
root, None, None, None)
torch.save(data_set, pt_path, pickle_protocol=4)
print('Saving pt to {}'.format(pt_path))
print('Building lmdb to {}'.format(lmdb_path))
env = lmdb.open(lmdb_path, map_size=1e12)
with env.begin(write=True) as txn:
for path, class_index in data_set.imgs:
with open(path, 'rb') as f:
data = f.read()
txn.put(path.encode('ascii'), data)
data_set.lmdb_data = lmdb.open(
lmdb_path, readonly=True, max_readers=1, lock=False, readahead=False,
meminit=False)
# reset transform and target_transform
data_set.samples = data_set.imgs
data_set.transform = transform
data_set.target_transform = target_transform
data_set.loader = lambda path: loader(path, data_set.lmdb_data)
return data_set
def imagenet_lmdb_dataset_sub(
root, transform=None, target_transform=None,
loader=lmdb_loader, num_sub=-1, data_seed=0):
data_set = imagenet_lmdb_dataset(
root, transform=transform, target_transform=target_transform,
loader=loader)
if num_sub > 0:
partition_idx = np.random.RandomState(data_seed).choice(len(data_set), num_sub, replace=False)
data_set = Subset(data_set, partition_idx)
return data_set
################################################################################
# CIFAR-10
###############################################################################
def cifar10_dataset_sub(root, transform=None, num_sub=-1, data_seed=0):
val_data = torchvision.datasets.CIFAR10(root=root, transform=transform, download=True, train=False)
if num_sub > 0:
partition_idx = np.random.RandomState(data_seed).choice(len(val_data), min(len(val_data), num_sub),
replace=False)
val_data = Subset(val_data, partition_idx)
return val_data
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
from .datasets import imagenet_lmdb_dataset, imagenet_lmdb_dataset_sub, cifar10_dataset_sub
def get_transform(dataset_name, transform_type, base_size=256):
from . import datasets
if dataset_name == 'celebahq':
return datasets.get_transform(dataset_name, transform_type, base_size)
elif 'imagenet' in dataset_name:
return datasets.get_transform(dataset_name, transform_type, base_size)
else:
raise NotImplementedError
def get_dataset(dataset_name, partition, *args, **kwargs):
from . import datasets
if dataset_name == 'celebahq':
return datasets.CelebAHQDataset(partition, *args, **kwargs)
else:
raise NotImplementedError |
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import torch
import os
from . import attribute_net
softmax = torch.nn.Softmax(dim=1)
def downsample(images, size=256):
# Downsample to 256x256. The attribute classifiers were built for 256x256.
# follows https://github.com/NVlabs/stylegan/blob/master/metrics/linear_separability.py#L127
if images.shape[2] > size:
factor = images.shape[2] // size
assert (factor * size == images.shape[2])
images = images.view(
[-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
images = images.mean(dim=[3, 5])
return images
else:
assert (images.shape[-1] == 256)
return images
def get_logit(net, im):
im_256 = downsample(im)
logit = net(im_256)
return logit
def get_softmaxed(net, im):
logit = get_logit(net, im)
logits = torch.cat([logit, -logit], dim=1)
softmaxed = softmax(torch.cat([logit, -logit], dim=1))[:, 1]
return logits, softmaxed
def load_attribute_classifier(attribute, ckpt_path=None):
if ckpt_path is None:
base_path = 'checkpoints/diffpure/celebahq'
attribute_pkl = os.path.join(base_path, attribute, 'net_best.pth')
ckpt = torch.load(attribute_pkl)
else:
ckpt = torch.load(ckpt_path)
print("Using classifier at epoch: %d" % ckpt['epoch'])
if 'valacc' in ckpt.keys():
print("Validation acc on raw images: %0.5f" % ckpt['valacc'])
detector = attribute_net.from_state_dict(
ckpt['state_dict'], fixed_size=True, use_mbstd=False).cuda().eval()
return detector
class ClassifierWrapper(torch.nn.Module):
def __init__(self, classifier_name, ckpt_path=None, device='cuda'):
super(ClassifierWrapper, self).__init__()
self.net = load_attribute_classifier(classifier_name, ckpt_path).eval().to(device)
def forward(self, ims):
out = (ims - 0.5) / 0.5
return get_softmaxed(self.net, out)[0]
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
# ---------------------------- ResNet ----------------------------
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
num_input_channels = 3
mean = (0.4914, 0.4822, 0.4465)
std = (0.2471, 0.2435, 0.2616)
self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
self.std = torch.tensor(std).view(num_input_channels, 1, 1)
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = (x - self.mean.to(x.device)) / self.std.to(x.device)
out = F.relu(self.bn1(self.conv1(out)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
# ---------------------------- ResNet ----------------------------
# ---------------------------- WideResNet ----------------------------
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
""" Based on code from https://github.com/yaodongyu/TRADES """
def __init__(self, depth=28, num_classes=10, widen_factor=10, sub_block1=False, dropRate=0.0, bias_last=True):
super(WideResNet, self).__init__()
num_input_channels = 3
mean = (0.4914, 0.4822, 0.4465)
std = (0.2471, 0.2435, 0.2616)
self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
self.std = torch.tensor(std).view(num_input_channels, 1, 1)
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
if sub_block1:
# 1st sub-block
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes, bias=bias_last)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear) and not m.bias is None:
m.bias.data.zero_()
def forward(self, x):
out = (x - self.mean.to(x.device)) / self.std.to(x.device)
out = self.conv1(out)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
def WideResNet_70_16():
return WideResNet(depth=70, widen_factor=16, dropRate=0.0)
def WideResNet_70_16_dropout():
return WideResNet(depth=70, widen_factor=16, dropRate=0.3)
# ---------------------------- WideResNet ----------------------------
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
def lerp_clip(a, b, t):
return a + (b - a) * torch.clamp(t, 0.0, 1.0)
class WScaleLayer(nn.Module):
def __init__(self, size, fan_in, gain=np.sqrt(2), bias=True):
super(WScaleLayer, self).__init__()
self.scale = gain / np.sqrt(fan_in) # No longer a parameter
if bias:
self.b = nn.Parameter(torch.randn(size))
else:
self.b = 0
self.size = size
def forward(self, x):
x_size = x.size()
x = x * self.scale
# modified to remove warning
if type(self.b) == nn.Parameter and len(x_size) == 4:
x = x + self.b.view(1, -1, 1, 1).expand(
x_size[0], self.size, x_size[2], x_size[3])
if type(self.b) == nn.Parameter and len(x_size) == 2:
x = x + self.b.view(1, -1).expand(
x_size[0], self.size)
return x
class WScaleConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0,
bias=True, gain=np.sqrt(2)):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size,
padding=padding,
bias=False)
fan_in = in_channels * kernel_size * kernel_size
self.wscale = WScaleLayer(out_channels, fan_in, gain=gain, bias=bias)
def forward(self, x):
return self.wscale(self.conv(x))
class WScaleLinear(nn.Module):
def __init__(self, in_channels, out_channels, bias=True, gain=np.sqrt(2)):
super().__init__()
self.linear = nn.Linear(in_channels, out_channels, bias=False)
self.wscale = WScaleLayer(out_channels, in_channels, gain=gain,
bias=bias)
def forward(self, x):
return self.wscale(self.linear(x))
class FromRGB(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
act=nn.LeakyReLU(0.2), bias=True):
super().__init__()
self.conv = WScaleConv2d(in_channels, out_channels, kernel_size,
padding=0, bias=bias)
self.act = act
def forward(self, x):
return self.act(self.conv(x))
class Downscale2d(nn.Module):
def __init__(self, factor=2):
super().__init__()
self.downsample = nn.AvgPool2d(kernel_size=factor, stride=factor)
def forward(self, x):
return self.downsample(x)
class DownscaleConvBlock(nn.Module):
def __init__(self, in_channels, conv0_channels, conv1_channels,
kernel_size, padding, bias=True, act=nn.LeakyReLU(0.2)):
super().__init__()
self.downscale = Downscale2d()
self.conv0 = WScaleConv2d(in_channels, conv0_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias)
self.conv1 = WScaleConv2d(conv0_channels, conv1_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias)
self.act = act
def forward(self, x):
x = self.act(self.conv0(x))
# conv2d_downscale2d applies downscaling before activation
# the order matters here! has to be conv -> bias -> downscale -> act
x = self.conv1(x)
x = self.downscale(x)
x = self.act(x)
return x
class MinibatchStdLayer(nn.Module):
def __init__(self, group_size=4):
super().__init__()
self.group_size = group_size
def forward(self, x):
group_size = min(self.group_size, x.shape[0])
s = x.shape
y = x.view([group_size, -1, s[1], s[2], s[3]])
y = y.float()
y = y - torch.mean(y, dim=0, keepdim=True)
y = torch.mean(y * y, dim=0)
y = torch.sqrt(y + 1e-8)
y = torch.mean(torch.mean(torch.mean(y, dim=3, keepdim=True),
dim=2, keepdim=True), dim=1, keepdim=True)
y = y.type(x.type())
y = y.repeat(group_size, 1, s[2], s[3])
return torch.cat([x, y], dim=1)
class PredictionBlock(nn.Module):
def __init__(self, in_channels, dense0_feat, dense1_feat, out_feat,
pool_size=2, act=nn.LeakyReLU(0.2), use_mbstd=True):
super().__init__()
self.use_mbstd = use_mbstd # attribute classifiers don't have this
if self.use_mbstd:
self.mbstd_layer = MinibatchStdLayer()
# MinibatchStdLayer adds an additional feature dimension
self.conv = WScaleConv2d(in_channels + int(self.use_mbstd),
dense0_feat, kernel_size=3, padding=1)
self.dense0 = WScaleLinear(dense0_feat * pool_size * pool_size, dense1_feat)
self.dense1 = WScaleLinear(dense1_feat, out_feat, gain=1)
self.act = act
def forward(self, x):
if self.use_mbstd:
x = self.mbstd_layer(x)
x = self.act(self.conv(x))
x = x.view([x.shape[0], -1])
x = self.act(self.dense0(x))
x = self.dense1(x)
return x
class D(nn.Module):
def __init__(
self,
num_channels=3, # Number of input color channels. Overridden based on dataset.
resolution=128, # Input resolution. Overridden based on dataset.
fmap_base=8192, # Overall multiplier for the number of feature maps.
fmap_decay=1.0, # log2 feature map reduction when doubling the resolution.
fmap_max=512, # Maximum number of feature maps in any layer.
fixed_size=False, # True = load fromrgb_lod0 weights only
use_mbstd=True, # False = no mbstd layer in PredictionBlock
**kwargs): # Ignore unrecognized keyword args.
super().__init__()
self.resolution_log2 = resolution_log2 = int(np.log2(resolution))
assert resolution == 2 ** resolution_log2 and resolution >= 4
def nf(stage):
return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
self.register_buffer('lod_in', torch.from_numpy(np.array(0.0)))
res = resolution_log2
setattr(self, 'fromrgb_lod0', FromRGB(num_channels, nf(res - 1), 1))
for i, res in enumerate(range(resolution_log2, 2, -1), 1):
lod = resolution_log2 - res
block = DownscaleConvBlock(nf(res - 1), nf(res - 1), nf(res - 2),
kernel_size=3, padding=1)
setattr(self, '%dx%d' % (2 ** res, 2 ** res), block)
fromrgb = FromRGB(3, nf(res - 2), 1)
if not fixed_size:
setattr(self, 'fromrgb_lod%d' % i, fromrgb)
res = 2
pool_size = 2 ** res
block = PredictionBlock(nf(res + 1 - 2), nf(res - 1), nf(res - 2), 1,
pool_size, use_mbstd=use_mbstd)
setattr(self, '%dx%d' % (pool_size, pool_size), block)
self.downscale = Downscale2d()
self.fixed_size = fixed_size
def forward(self, img):
x = self.fromrgb_lod0(img)
for i, res in enumerate(range(self.resolution_log2, 2, -1), 1):
lod = self.resolution_log2 - res
x = getattr(self, '%dx%d' % (2 ** res, 2 ** res))(x)
if not self.fixed_size:
img = self.downscale(img)
y = getattr(self, 'fromrgb_lod%d' % i)(img)
x = lerp_clip(x, y, self.lod_in - lod)
res = 2
pool_size = 2 ** res
out = getattr(self, '%dx%d' % (pool_size, pool_size))(x)
return out
def max_res_from_state_dict(state_dict):
for i in range(3, 12):
if '%dx%d.conv0.conv.weight' % (2 ** i, 2 ** i) not in state_dict:
break
return 2 ** (i - 1)
def from_state_dict(state_dict, fixed_size=False, use_mbstd=True):
res = max_res_from_state_dict(state_dict)
print(f'res: {res}')
d = D(num_channels=3, resolution=res, fixed_size=fixed_size,
use_mbstd=use_mbstd)
d.load_state_dict(state_dict)
return d
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from ebm-defense.
#
# Source:
# https://github.com/point0bar1/ebm-defense/blob/master/bpda_eot_attack.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_BPDA).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
import torch
import torch.nn.functional as F
criterion = torch.nn.CrossEntropyLoss()
class BPDA_EOT_Attack():
def __init__(self, model, adv_eps=8.0/255, eot_defense_reps=150, eot_attack_reps=15):
self.model = model
self.config = {
'eot_defense_ave': 'logits',
'eot_attack_ave': 'logits',
'eot_defense_reps': eot_defense_reps,
'eot_attack_reps': eot_attack_reps,
'adv_steps': 50,
'adv_norm': 'l_inf',
'adv_eps': adv_eps,
'adv_eta': 2.0 / 255,
'log_freq': 10
}
print(f'BPDA_EOT config: {self.config}')
def purify(self, x):
return self.model(x, mode='purify')
def eot_defense_prediction(seslf, logits, reps=1, eot_defense_ave=None):
if eot_defense_ave == 'logits':
logits_pred = logits.view([reps, int(logits.shape[0]/reps), logits.shape[1]]).mean(0)
elif eot_defense_ave == 'softmax':
logits_pred = F.softmax(logits, dim=1).view([reps, int(logits.shape[0]/reps), logits.shape[1]]).mean(0)
elif eot_defense_ave == 'logsoftmax':
logits_pred = F.log_softmax(logits, dim=1).view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0)
elif reps == 1:
logits_pred = logits
else:
raise RuntimeError('Invalid ave_method_pred (use "logits" or "softmax" or "logsoftmax")')
_, y_pred = torch.max(logits_pred, 1)
return y_pred
def eot_attack_loss(self, logits, y, reps=1, eot_attack_ave='loss'):
if eot_attack_ave == 'logits':
logits_loss = logits.view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0)
y_loss = y
elif eot_attack_ave == 'softmax':
logits_loss = torch.log(F.softmax(logits, dim=1).view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0))
y_loss = y
elif eot_attack_ave == 'logsoftmax':
logits_loss = F.log_softmax(logits, dim=1).view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0)
y_loss = y
elif eot_attack_ave == 'loss':
logits_loss = logits
y_loss = y.repeat(reps)
else:
raise RuntimeError('Invalid ave_method_eot ("logits", "softmax", "logsoftmax", "loss")')
loss = criterion(logits_loss, y_loss)
return loss
def predict(self, X, y, requires_grad=True, reps=1, eot_defense_ave=None, eot_attack_ave='loss'):
if requires_grad:
logits = self.model(X, mode='classify')
else:
with torch.no_grad():
logits = self.model(X.data, mode='classify')
y_pred = self.eot_defense_prediction(logits.detach(), reps, eot_defense_ave)
correct = torch.eq(y_pred, y)
loss = self.eot_attack_loss(logits, y, reps, eot_attack_ave)
return correct.detach(), loss
def pgd_update(self, X_adv, grad, X, adv_norm, adv_eps, adv_eta, eps=1e-10):
if adv_norm == 'l_inf':
X_adv.data += adv_eta * torch.sign(grad)
X_adv = torch.clamp(torch.min(X + adv_eps, torch.max(X - adv_eps, X_adv)), min=0, max=1)
elif adv_norm == 'l_2':
X_adv.data += adv_eta * grad / grad.view(X.shape[0], -1).norm(p=2, dim=1).view(X.shape[0], 1, 1, 1)
dists = (X_adv - X).view(X.shape[0], -1).norm(dim=1, p=2).view(X.shape[0], 1, 1, 1)
X_adv = torch.clamp(X + torch.min(dists, adv_eps*torch.ones_like(dists))*(X_adv-X)/(dists+eps), min=0, max=1)
else:
raise RuntimeError('Invalid adv_norm ("l_inf" or "l_2"')
return X_adv
def purify_and_predict(self, X, y, purify_reps=1, requires_grad=True):
X_repeat = X.repeat([purify_reps, 1, 1, 1])
X_repeat_purified = self.purify(X_repeat).detach().clone()
X_repeat_purified.requires_grad_()
correct, loss = self.predict(X_repeat_purified, y, requires_grad, purify_reps,
self.config['eot_defense_ave'], self.config['eot_attack_ave'])
if requires_grad:
X_grads = torch.autograd.grad(loss, [X_repeat_purified])[0]
# average gradients over parallel samples for EOT attack
attack_grad = X_grads.view([purify_reps]+list(X.shape)).mean(dim=0)
return correct, attack_grad
else:
return correct, None
def eot_defense_verification(self, X_adv, y, correct, defended):
for verify_ind in range(correct.nelement()):
if correct[verify_ind] == 0 and defended[verify_ind] == 1:
defended[verify_ind] = self.purify_and_predict(X_adv[verify_ind].unsqueeze(0), y[verify_ind].view([1]),
self.config['eot_defense_reps'], requires_grad=False)[0]
return defended
def eval_and_bpda_eot_grad(self, X_adv, y, defended, requires_grad=True):
correct, attack_grad = self.purify_and_predict(X_adv, y, self.config['eot_attack_reps'], requires_grad)
if self.config['eot_defense_reps'] > 0:
defended = self.eot_defense_verification(X_adv, y, correct, defended)
else:
defended *= correct
return defended, attack_grad
def attack_batch(self, X, y):
# get baseline accuracy for natural images
defended = self.eval_and_bpda_eot_grad(X, y, torch.ones_like(y).bool(), False)[0]
print('Baseline: {} of {}'.format(defended.sum(), len(defended)))
class_batch = torch.zeros([self.config['adv_steps'] + 2, X.shape[0]]).bool()
class_batch[0] = defended.cpu()
ims_adv_batch = torch.zeros(X.shape)
for ind in range(defended.nelement()):
if defended[ind] == 0:
ims_adv_batch[ind] = X[ind].cpu()
X_adv = X.clone()
# adversarial attacks on a single batch of images
for step in range(self.config['adv_steps'] + 1):
defended, attack_grad = self.eval_and_bpda_eot_grad(X_adv, y, defended)
class_batch[step+1] = defended.cpu()
for ind in range(defended.nelement()):
if class_batch[step, ind] == 1 and defended[ind] == 0:
ims_adv_batch[ind] = X_adv[ind].cpu()
# update adversarial images (except on final iteration so final adv images match final eval)
if step < self.config['adv_steps']:
X_adv = self.pgd_update(X_adv, attack_grad, X, self.config['adv_norm'], self.config['adv_eps'], self.config['adv_eta'])
X_adv = X_adv.detach().clone()
if step == 1 or step % self.config['log_freq'] == 0 or step == self.config['adv_steps']:
print('Attack {} of {} Batch defended: {} of {}'.
format(step, self.config['adv_steps'], int(torch.sum(defended).cpu().numpy()), X_adv.shape[0]))
if int(torch.sum(defended).cpu().numpy()) == 0:
print('Attack successfully to the batch!')
break
for ind in range(defended.nelement()):
if defended[ind] == 1:
ims_adv_batch[ind] = X_adv[ind].cpu()
return class_batch, ims_adv_batch
def attack_all(self, X, y, batch_size):
class_path = torch.zeros([self.config['adv_steps'] + 2, 0]).bool()
ims_adv = torch.zeros(0)
n_batches = X.shape[0] // batch_size
if n_batches == 0 and X.shape[0] > 0:
n_batches = 1
for counter in range(n_batches):
X_batch = X[counter * batch_size:min((counter + 1) * batch_size, X.shape[0])].clone().to(X.device)
y_batch = y[counter * batch_size:min((counter + 1) * batch_size, X.shape[0])].clone().to(X.device)
class_batch, ims_adv_batch = self.attack_batch(X_batch.contiguous(), y_batch.contiguous())
class_path = torch.cat((class_path, class_batch), dim=1)
ims_adv = torch.cat((ims_adv, ims_adv_batch), dim=0)
print(f'finished {counter}-th batch in attack_all')
return class_path, ims_adv
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/ermongroup/SDEdit/blob/main/models/diffusion.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_UNET_DDPM).
# ---------------------------------------------------------------
import math
import torch
import torch.nn as nn
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
def nonlinearity(x):
# swish
return x * torch.sigmoid(x)
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
class Upsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
x = torch.nn.functional.interpolate(
x, scale_factor=2.0, mode="nearest")
if self.with_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=2,
padding=0)
def forward(self, x):
if self.with_conv:
pad = (0, 1, 0, 1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
else:
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
return x
class ResnetBlock(nn.Module):
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
dropout, temb_channels=512):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.norm1 = Normalize(in_channels)
self.conv1 = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
self.temb_proj = torch.nn.Linear(temb_channels,
out_channels)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
else:
self.nin_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x, temb):
h = x
h = self.norm1(h)
h = nonlinearity(h)
h = self.conv1(h)
h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]
h = self.norm2(h)
h = nonlinearity(h)
h = self.dropout(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
x = self.conv_shortcut(x)
else:
x = self.nin_shortcut(x)
return x + h
class AttnBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.k = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.v = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.proj_out = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
b, c, h, w = q.shape
q = q.reshape(b, c, h * w)
q = q.permute(0, 2, 1) # b,hw,c
k = k.reshape(b, c, h * w) # b,c,hw
w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
w_ = w_ * (int(c) ** (-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
v = v.reshape(b, c, h * w)
w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q)
# b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
h_ = torch.bmm(v, w_)
h_ = h_.reshape(b, c, h, w)
h_ = self.proj_out(h_)
return x + h_
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
ch, out_ch, ch_mult = config.model.ch, config.model.out_ch, tuple(config.model.ch_mult)
num_res_blocks = config.model.num_res_blocks
attn_resolutions = config.model.attn_resolutions
dropout = config.model.dropout
in_channels = config.model.in_channels
resolution = config.data.image_size
resamp_with_conv = config.model.resamp_with_conv
self.ch = ch
self.temb_ch = self.ch * 4
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# timestep embedding
self.temb = nn.Module()
self.temb.dense = nn.ModuleList([
torch.nn.Linear(self.ch,
self.temb_ch),
torch.nn.Linear(self.temb_ch,
self.temb_ch),
])
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,) + ch_mult
self.down = nn.ModuleList()
block_in = None
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch * in_ch_mult[i_level]
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions - 1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch * ch_mult[i_level]
skip_in = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks + 1):
if i_block == self.num_res_blocks:
skip_in = ch * in_ch_mult[i_level]
block.append(ResnetBlock(in_channels=block_in + skip_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x, t):
assert x.shape[2] == x.shape[3] == self.resolution
# timestep embedding
temb = get_timestep_embedding(t, self.ch)
temb = self.temb.dense[0](temb)
temb = nonlinearity(temb)
temb = self.temb.dense[1](temb)
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions - 1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.up[i_level].block[i_block](
torch.cat([h, hs.pop()], dim=1), temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
class CIFAR10:
def __init__(self, seed=43):
import tensorflow as tf
(train_data, train_labels),(self.test_data, self.test_labels) = tf.keras.datasets.cifar10.load_data()
train_data = train_data/255.
self.test_data = self.test_data/255.
VALIDATION_SIZE = 5000
np.random.seed(seed)
shuffled_indices = np.arange(len(train_data))
np.random.shuffle(shuffled_indices)
train_data = train_data[shuffled_indices]
train_labels = train_labels[shuffled_indices]
shuffled_indices = np.arange(len(self.test_data))
np.random.shuffle(shuffled_indices)
self.test_data = self.test_data[shuffled_indices].transpose((0,3,1,2))
self.test_labels = self.test_labels[shuffled_indices].flatten()
self.validation_data = train_data[:VALIDATION_SIZE, :, :, :].transpose((0,3,1,2))
self.validation_labels = train_labels[:VALIDATION_SIZE].flatten()
self.train_data = train_data[VALIDATION_SIZE:, :, :, :].transpose((0,3,1,2))
self.train_labels = train_labels[VALIDATION_SIZE:].flatten()
class TorchModel(torch.nn.Module):
def __init__(self):
super().__init__()
class Transpose(torch.nn.Module):
def forward(self, x):
return x.permute((0, 2, 3, 1))
self.layers = torch.nn.ModuleList([
torch.nn.Conv2d(3, 32, kernel_size=3, padding=1),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(32, eps=.000),
torch.nn.Conv2d(32, 32, kernel_size=3, padding=1),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(32, eps=.000),
torch.nn.MaxPool2d(2, 2),
torch.nn.Conv2d(32, 64, kernel_size=3, padding=1),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(64, eps=.000),
torch.nn.Conv2d(64, 64, kernel_size=3, padding=1),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(64, eps=.000),
torch.nn.MaxPool2d(2, 2),
torch.nn.Conv2d(64, 128, kernel_size=3, padding=1),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(128, eps=.000),
torch.nn.Conv2d(128, 128, kernel_size=3, padding=1),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(128, eps=.000),
torch.nn.MaxPool2d(2, 2),
Transpose(),
torch.nn.Flatten(),
torch.nn.Linear(2048, 1024),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(1024, eps=.000),
torch.nn.Linear(1024, 512),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(512, eps=.000),
torch.nn.Linear(512, 10),
])
def __call__(self, x, training=False, upto=None, features_only=False,
features_and_logits=False, detector_features_and_logits=False):
if features_only or features_and_logits or detector_features_and_logits:
assert upto is None
upto = len(self.layers)
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
outputs = []
for i,layer in enumerate(self.layers[:upto] if upto is not None else self.layers):
x = layer(x)
outputs.append(x)
if features_only:
return outputs[-2]
if detector_features_and_logits:
return outputs[-4], outputs[-1]
if features_and_logits:
return outputs[-2], outputs[-1]
return x
def run_multi_detect(model, x, adv_sig, random=None, return_pred=False,
subtract_thresholds=None):
X_neuron_adv, logits = model(x, detector_features_and_logits=True)
y_pred = logits.argmax(-1)
if random is None: random="correct"
filter_ratio = .1
if random == "fast":
n_mask = torch.rand(X_neuron_adv.shape[1]) < filter_ratio
X_neuron_adv = X_neuron_adv * n_mask.to(x.device)
elif random == "correct":
number_neuron = X_neuron_adv.shape[1]
number_keep = int(number_neuron * filter_ratio)
n_mask = np.array([1] * number_keep + [0] * (number_neuron - number_keep))
n_mask = np.array(n_mask)
np.random.shuffle(n_mask)
X_neuron_adv = X_neuron_adv * torch.tensor(n_mask).to(x.device)
else:
raise
adv_scores = torch_multi_sim(X_neuron_adv, adv_sig)
# return scores based on the detectors corresponding to the predicted classes
adv_scores = adv_scores[range(len(adv_scores)), y_pred]
if subtract_thresholds is not None:
relevant_thresholds = subtract_thresholds[y_pred]
adv_scores = adv_scores - relevant_thresholds
if return_pred:
return adv_scores, y_pred
else:
return adv_scores
def run_detect(model, x, adv_sig, random=None):
X_neuron_adv = model(x, upto=-3)
if random is None: random="correct"
filter_ratio = .1
if random == "fast":
n_mask = torch.rand(X_neuron_adv.shape[1]) < filter_ratio
X_neuron_adv = X_neuron_adv * n_mask.to(x.device)
elif random == "correct":
number_neuron = X_neuron_adv.shape[1]
number_keep = int(number_neuron * filter_ratio)
n_mask = np.array([1] * number_keep + [0] * (number_neuron - number_keep))
n_mask = np.array(n_mask)
np.random.shuffle(n_mask)
X_neuron_adv = X_neuron_adv * torch.tensor(n_mask).to(x.device)
else:
raise
adv_scores = torch_sim(X_neuron_adv, adv_sig)
return adv_scores
def torch_sim(X_neuron, adv_sig):
if len(adv_sig.shape) == 1:
adv_sig = adv_sig.view((512, 1))
dotted = torch.matmul(X_neuron, adv_sig.reshape((512, 1))).flatten()
dotted /= (X_neuron**2).sum(axis=1)**.5
dotted /= (adv_sig**2).sum()**.5
return dotted
def torch_multi_sim(X_neuron, adv_sig):
assert len(adv_sig.shape) == 2
dotted = torch.matmul(X_neuron, adv_sig)
dotted /= (X_neuron**2).sum(axis=1, keepdim=True)**.5
dotted /= (adv_sig**2).sum(axis=0, keepdim=True)**.5
return dotted
def load_model_3(device=None):
# loads model & detector for class 3
model = TorchModel()
model.load_state_dict(torch.load('checkpoints/trapdoor/torch_cifar_model.h5'))
model = model.eval().to(device)
signature = np.load("checkpoints/trapdoor/signature.npy")
signature = torch.tensor(signature).to(device)
def detector(x, how=None):
return run_detect(model, x, signature, how)
return model, detector
def load_model(device=None):
model = TorchModel()
model.load_state_dict(torch.load('checkpoints/trapdoor/torch_cifar_model.h5'))
model = model.eval().to(device)
signatures = np.load("checkpoints/trapdoor/signatures_all_nicholas.npy").transpose((1, 0))
signatures = torch.tensor(signatures).to(device)
def detectors(x, how=None, return_pred=False, subtract_thresholds=None):
return run_multi_detect(model, x, signatures, how, return_pred=return_pred,
subtract_thresholds=subtract_thresholds)
return model, detectors |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import torch
import tqdm
import numpy as np
import random
import defense
def injection_func(mask, pattern, adv_img):
if len(adv_img.shape) == 4:
return mask.transpose((0,3,1,2)) * pattern.transpose((0,3,1,2)) + (1 - mask.transpose((0,3,1,2))) * adv_img
else:
return mask.transpose((2,0,1)) * pattern.transpose((2,0,1)) + (1 - mask.transpose((2,0,1))) * adv_img
def mask_pattern_func(y_target, pattern_dict):
mask, pattern = random.choice(pattern_dict[y_target])
mask = np.copy(mask)
return mask, pattern
def infect_X(img, tgt, num_classes, pattern_dict):
mask, pattern = mask_pattern_func(tgt, pattern_dict)
raw_img = np.copy(img)
adv_img = np.copy(raw_img)
adv_img = injection_func(mask, pattern, adv_img)
return adv_img, None
def build_neuron_signature(model, X, Y, y_target, pattern_dict):
num_classes = 10
X_adv = np.array(
[infect_X(img, y_target, pattern_dict=pattern_dict, num_classes=num_classes)[0] for img in np.copy(X)])
BS = 512
X_neuron_adv = np.concatenate([model(X_adv[i:i+BS], upto=-3) for i in range(0,len(X_adv),BS)])
X_neuron_adv = np.mean(X_neuron_adv, axis=0)
sig = X_neuron_adv
return sig
def main():
device = "cuda" if torch.cuda.is_available() else "cpu"
model, _ = defense.load_model_3(device)
data = defense.CIFAR10()
pattern_dict = pickle.load(
open("checkpoints/trapdoor/torch_cifar_res.p", "rb"))['pattern_dict']
signatures = {}
for label in tqdm.tqdm(range(10)):
signature = build_neuron_signature(
lambda x, upto=None: model(
torch.tensor(x, dtype=torch.float32).to(device),
upto=upto).cpu().detach().numpy(),
data.train_data, data.train_labels, label, pattern_dict)
signatures[label] = signature
signatures_np = np.array([signatures[k] for k in range(10)])
signature_nicholas = np.load("checkpoints/trapdoor/signature.npy").reshape(1, -1)
diff = signature_nicholas - signatures_np
# should be ~0 for label 3
print(np.abs(diff).max(-1))
np.save("checkpoints/trapdoor/signatures_all_torch.npy", signatures_np)
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from defense import CIFAR10
from defense import load_model, load_model_3
import numpy as np
from attacks import pgd
import orthogonal_pgd
device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser()
parser.add_argument("--batch-size", type=int, default=512)
parser.add_argument("--n-samples", type=int, default=2048)
parser.add_argument("--pgd-steps", type=int, default=50)
parser.add_argument("--pgd-step-size", type=float, default=8 / 255 / 50 * 2.5)
parser.add_argument("--epsilon", type=float, default=8 / 255)
parser.add_argument("--thresholds", type=float, nargs=10, default=None)
parser.add_argument("--fpr-threshold", type=float, default=0.05)
parser.add_argument("--attack", type=str,
choices=("naive", "orthogonal", "selective"),
default="naive")
args = parser.parse_args()
dataset = CIFAR10()
if args.n_samples == -1:
args.n_samples = len(dataset.test_data)
model, run_detector = load_model(device)
orthogonal_pgd_attack = orthogonal_pgd.PGD(
model,
lambda x: run_detector(x),
classifier_loss=torch.nn.CrossEntropyLoss(),
detector_loss=lambda x, _: torch.mean(x),
eps=args.epsilon,
steps=args.pgd_steps,
alpha=args.pgd_step_size, k=None,
# project_detector=True,
projection_norm='l2',
project_classifier=True,
use_projection=True,
verbose=True)
selective_pgd_attack = orthogonal_pgd.PGD(
model, run_detector,
classifier_loss=torch.nn.CrossEntropyLoss(),
eps=args.epsilon,
steps=args.pgd_steps,
alpha=args.pgd_step_size, k=None,
project_detector=False,
project_classifier=False,
projection_norm='l2',
use_projection=True)
if args.attack == "naive":
run_attack = lambda x_batch, y_batch: pgd.pgd(
model, x_batch, y_batch,
args.pgd_steps, args.pgd_step_size,
args.epsilon, norm="linf", targeted=False)[0]
elif args.attack == "orthogonal":
run_attack = lambda x_batch, y_batch: orthogonal_pgd_attack.attack(
x_batch.cpu(), y_batch.cpu(), device=device).to(
device)
elif args.attack == "selective":
run_attack = lambda x_batch, y_batch: selective_pgd_attack.attack(
x_batch.cpu(), y_batch.cpu(), device=device).to(
device)
else:
raise ValueError()
is_adv = []
adv_detector_scores = []
detector_scores = []
y_pred = []
y_adv_pred = []
for batch_idx in range(int(np.ceil(args.n_samples / args.batch_size))):
x_batch = dataset.test_data[
batch_idx * args.batch_size:(batch_idx + 1) * args.batch_size]
y_batch = dataset.test_labels[
batch_idx * args.batch_size:(batch_idx + 1) * args.batch_size]
x_batch = torch.tensor(x_batch, device=device, dtype=torch.float32)
y_batch = torch.tensor(y_batch, device=device, dtype=torch.long)
x_adv_batch = run_attack(x_batch.clone(), y_batch)
with torch.no_grad():
y_adv_pred_batch = model(x_adv_batch).argmax(-1).detach()
y_pred_batch = model(x_batch).argmax(-1).detach()
y_pred.append(y_pred_batch.cpu().numpy())
y_adv_pred.append(y_adv_pred_batch.cpu().numpy())
is_adv_batch = y_adv_pred_batch != y_batch
is_adv_batch = is_adv_batch.cpu().numpy()
# since detector uses np.random set the seed here so that different attacks
# are comparable
np.random.seed(batch_idx)
with torch.no_grad():
detector_scores_batch = run_detector(x_batch).detach().cpu().numpy()
adv_detector_scores_batch = run_detector(x_adv_batch).detach().cpu().numpy()
is_adv.append(is_adv_batch)
detector_scores.append(detector_scores_batch)
adv_detector_scores.append(adv_detector_scores_batch)
y_pred = np.concatenate(y_pred, 0)
y_pred = y_pred[:args.n_samples]
y_adv_pred = np.concatenate(y_adv_pred, 0)
y_adv_pred = y_adv_pred[:args.n_samples]
is_adv = np.concatenate(is_adv, 0)
is_adv = is_adv[:args.n_samples]
detector_scores = np.concatenate(detector_scores, 0)
detector_scores = detector_scores[:args.n_samples]
adv_detector_scores = np.concatenate(adv_detector_scores, 0)
adv_detector_scores = adv_detector_scores[:args.n_samples]
if args.thresholds is None:
detector_thresholds = []
for label in range(10):
scores = detector_scores[y_pred == label]
detector_threshold = np.sort(scores)[-int(len(scores) * args.fpr_threshold)]
detector_thresholds.append(detector_threshold)
print("Thresholds for FPR", args.fpr_threshold, "=", detector_thresholds)
else:
detector_thresholds = args.thresholds
detector_thresholds = np.array(detector_thresholds)
adv_is_detected = adv_detector_scores > detector_thresholds[y_adv_pred]
is_detected = detector_scores > detector_thresholds[y_pred]
# true positive: detected + adversarial example
# true negative: not detected + normal example
# false positive: detected + normal example
# false negative: not detected + adversarial example
tnr = np.mean(~is_detected)
tpr = np.mean(adv_is_detected)
fnr = np.mean(~adv_is_detected)
fpr = np.mean(is_detected)
tp = np.sum(adv_is_detected)
fn = np.sum(~adv_is_detected)
fp = np.sum(is_detected)
f1 = tp / (tp + 0.5 * (fp + fn))
print("TPR", tpr)
print("FPR", fpr)
print("TNR", tnr)
print("FNR", fnr)
print("F1 ", f1)
is_adv_and_not_detected = np.logical_and(is_adv, ~adv_is_detected)
print("Attack Success Rate (w/o detector):", np.mean(is_adv))
print("Attack Success Rate (w/ detector):", np.mean(is_adv_and_not_detected))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Taken from
https://github.com/v-wangg/OrthogonalPGD/blob/c92f11ee69723822f2179be1d6f50cd86d94bbff/attack.py#L15
"""
import torch
import numpy as np
import tqdm
class PGD:
def __init__(self, classifier, detector, classifier_loss=None,
detector_loss=None, steps=100, alpha=1 / 255, eps=8 / 255,
use_projection=True, projection_norm='linf', target=None, lmbd=0, k=None,
project_detector=False, project_classifier=False, img_min=0, img_max=1,
verbose=True):
'''
:param classifier: model used for classification
:param detector: model used for detection
:param classifier_loss: loss used for classification model
:param detector_loss: loss used for detection model. Need to have __call__
method which outputs adversarial scores ranging from 0 to 1
(0 if not afversarial and 1 if adversarial)
:param steps: number of steps for which to perform gradient descent/ascent
:param alpha: step size
:param eps: constraint on noise that can be applied to images
:param use_projection: True if gradients should be projected onto each other
:param projection_norm: 'linf' or 'l2' for regularization of gradients
:param target: target label to attack. if None, an untargeted attack is run
:param lmbd: hyperparameter for 'f + lmbd * g' when 'use_projection' is False
:param k: if not None, take gradients of g onto f every kth step
:param project_detector: if True, take gradients of g onto f
:param project_classifier: if True, take gradients of f onto g
'''
self.classifier = classifier
self.detector = detector
self.steps = steps
self.alpha = alpha
self.eps = eps
self.classifier_loss = classifier_loss
self.detector_loss = detector_loss
self.use_projection = use_projection
self.projection_norm = projection_norm
self.project_classifier = project_classifier
self.project_detector = project_detector
self.target = target
self.lmbd = lmbd
self.k = k
self.img_min = img_min
self.img_max = img_max
self.verbose = verbose
# metrics to keep track of
self.all_classifier_losses = []
self.all_detector_losses = []
def attack_batch(self, inputs, targets, device):
adv_images = inputs.clone().detach()
original_inputs_numpy = inputs.clone().detach().numpy()
# alarm_targets = torch.tensor(np.zeros(len(inputs)).reshape(-1, 1))
# ideally no adversarial images should be detected
alarm_targets = torch.tensor(np.zeros(len(inputs)))
batch_size = inputs.shape[0]
# targeted attack
if self.target:
targeted_targets = torch.tensor(
torch.tensor(self.target * np.ones(len(inputs)), dtype=torch.int64)).to(
device)
advx_final = inputs.detach().numpy()
loss_final = np.zeros(inputs.shape[0]) + np.inf
if self.verbose:
progress = tqdm.tqdm(range(self.steps))
else:
progress = range(self.steps)
for i in progress:
adv_images.requires_grad = True
# calculating gradient of classifier w.r.t. images
outputs = self.classifier(adv_images.to(device))
if self.target is not None:
loss_classifier = 1 * self.classifier_loss(outputs, targeted_targets)
else:
loss_classifier = self.classifier_loss(outputs, targets.to(device))
loss_classifier.backward(retain_graph=True)
grad_classifier = adv_images.grad.cpu().detach()
# calculating gradient of detector w.r.t. images
adv_images.grad = None
adv_scores = self.detector(adv_images.to(device))
if self.detector_loss:
loss_detector = -self.detector_loss(adv_scores, alarm_targets.to(device))
else:
loss_detector = torch.mean(adv_scores)
loss_detector.backward()
grad_detector = adv_images.grad.cpu().detach()
self.all_classifier_losses.append(loss_classifier.detach().data.item())
self.all_detector_losses.append(loss_detector.detach().data.item())
if self.target:
has_attack_succeeded = (outputs.cpu().detach().numpy().argmax(
1) == targeted_targets.cpu().numpy())
else:
has_attack_succeeded = (
outputs.cpu().detach().numpy().argmax(1) != targets.numpy())
adv_images_np = adv_images.cpu().detach().numpy()
# print(torch.max(torch.abs(adv_images-inputs)))
# print('b',torch.max(torch.abs(torch.tensor(advx_final)-inputs)))
for i in range(len(advx_final)):
if has_attack_succeeded[i] and loss_final[i] > adv_scores[i]:
# print("assign", i, np.max(advx_final[i]-original_inputs_numpy[i]))
advx_final[i] = adv_images_np[i]
loss_final[i] = adv_scores[i]
# print("Update", i, adv_scores[i])
# using hyperparameter to combine gradient of classifier and gradient of detector
if not self.use_projection:
grad = grad_classifier + self.lmbd * grad_detector
else:
if self.project_detector:
# using Orthogonal Projected Gradient Descent
# projection of gradient of detector on gradient of classifier
# then grad_d' = grad_d - (project grad_d onto grad_c)
grad_detector_proj = grad_detector - torch.bmm((torch.bmm(
grad_detector.view(batch_size, 1, -1),
grad_classifier.view(batch_size, -1, 1))) / (1e-20 + torch.bmm(
grad_classifier.view(batch_size, 1, -1),
grad_classifier.view(batch_size, -1, 1))).view(-1, 1, 1),
grad_classifier.view(
batch_size, 1,
-1)).view(
grad_detector.shape)
else:
grad_detector_proj = grad_detector
if self.project_classifier:
# using Orthogonal Projected Gradient Descent
# projection of gradient of detector on gradient of classifier
# then grad_c' = grad_c - (project grad_c onto grad_d)
grad_classifier_proj = grad_classifier - torch.bmm((torch.bmm(
grad_classifier.view(batch_size, 1, -1),
grad_detector.view(batch_size, -1, 1))) / (1e-20 + torch.bmm(
grad_detector.view(batch_size, 1, -1),
grad_detector.view(batch_size, -1, 1))).view(-1, 1, 1),
grad_detector.view(
batch_size, 1,
-1)).view(
grad_classifier.shape)
else:
grad_classifier_proj = grad_classifier
# making sure adversarial images have crossed decision boundary
outputs_perturbed = outputs.cpu().detach().numpy()
if self.target:
outputs_perturbed[
np.arange(targeted_targets.shape[0]), targets] += .05
has_attack_succeeded = np.array(
(outputs_perturbed.argmax(1) == targeted_targets.cpu().numpy())[:,
None, None, None], dtype=np.float32)
else:
outputs_perturbed[np.arange(targets.shape[0]), targets] += .05
has_attack_succeeded = np.array(
(outputs_perturbed.argmax(1) != targets.numpy())[:, None, None,
None], dtype=np.float32)
if self.verbose:
progress.set_description(
"Losses (%.3f/%.3f/%.3f/%.3f)" % (np.mean(self.all_classifier_losses[-10:]),
np.mean(self.all_detector_losses[-10:]),
np.mean(loss_final),
has_attack_succeeded.mean()))
# print('correct frac', has_attack_succeeded.mean())
# print('really adv target reached', (outputs.argmax(1).cpu().detach().numpy() == self.target).mean())
if self.k:
# take gradients of g onto f every kth step
if i % self.k == 0:
grad = grad_detector_proj
else:
grad = grad_classifier_proj
else:
# print(outputs_perturbed, has_attack_succeeded, adv_scores)
grad = grad_classifier_proj * (
1 - has_attack_succeeded) + grad_detector_proj * has_attack_succeeded
if np.any(np.isnan(grad.numpy())):
print(np.mean(np.isnan(grad.numpy())))
print("ABORT")
break
if self.target:
grad = -grad
# l2 regularization
if self.projection_norm == 'l2':
grad_norms = torch.norm(grad.view(batch_size, -1), p=2, dim=1) + 1e-20
grad = grad / grad_norms.view(batch_size, 1, 1, 1)
# linf regularization
elif self.projection_norm == 'linf':
grad = torch.sign(grad)
else:
raise Exception('Incorrect Projection Norm')
adv_images = adv_images.detach() + self.alpha * grad
delta = torch.clamp(adv_images - torch.tensor(original_inputs_numpy),
min=-self.eps, max=self.eps)
adv_images = torch.clamp(torch.tensor(original_inputs_numpy) + delta,
min=self.img_min, max=self.img_max).detach()
return torch.tensor(advx_final)
def attack(self, inputs, targets, device):
adv_images = []
batch_adv_images = self.attack_batch(inputs, targets, device)
adv_images.append(batch_adv_images)
return torch.cat(adv_images) |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import torch
import tqdm
import numpy as np
import random
import defense
import keras
import keras.backend as K
import numpy as np
from sklearn.utils import shuffle
from tensorflow import set_random_seed
from original.trap_utils import test_neuron_cosine_sim, init_gpu, preprocess, CoreModel, build_bottleneck_model, load_dataset, \
get_other_label_data, cal_roc, injection_func, generate_attack
K.set_learning_phase(0)
random.seed(1234)
np.random.seed(1234)
set_random_seed(1234)
def mask_pattern_func(y_target, pattern_dict):
mask, pattern = random.choice(pattern_dict[y_target])
mask = np.copy(mask)
return mask, pattern
def infect_X(img, tgt, num_classes, pattern_dict):
mask, pattern = mask_pattern_func(tgt, pattern_dict)
raw_img = np.copy(img)
adv_img = np.copy(raw_img)
adv_img = injection_func(mask, pattern, adv_img)
return adv_img, keras.utils.to_categorical(tgt, num_classes=num_classes)
def eval_trapdoor(model, test_X, test_Y, y_target, pattern_dict, num_classes):
cur_test_X = np.array([infect_X(img, y_target, num_classes, pattern_dict)[0] for img in np.copy(test_X)])
trapdoor_succ = np.mean(np.argmax(model.predict(cur_test_X), axis=1) == y_target)
return trapdoor_succ
def build_neuron_signature(bottleneck_model, X, Y, y_target, pattern_dict, num_classes=10):
X_adv = np.array(
[infect_X(img, y_target, pattern_dict=pattern_dict, num_classes=num_classes)[0] for img in np.copy(X)])
X_neuron_adv = bottleneck_model.predict(X_adv)
X_neuron_adv = np.mean(X_neuron_adv, axis=0)
sig = X_neuron_adv
return sig
def main():
device = "cuda" if torch.cuda.is_available() else "cpu"
pattern_dict = pickle.load(
open("cifar_res.p", "rb"))['pattern_dict']
sess = init_gpu("0")
model = CoreModel("cifar", load_clean=True, load_model=False)
new_model = keras.models.load_model("cifar_model.h5", compile=False)
train_X, train_Y, test_X, test_Y = load_dataset(dataset='cifar')
bottleneck_model = build_bottleneck_model(new_model, model.target_layer)
train_X, train_Y = shuffle(train_X, train_Y)
import pdb; pdb.set_trace()
signatures = {}
for label in tqdm.tqdm(range(10)):
signature = build_neuron_signature(
bottleneck_model,
train_X, train_Y, label, pattern_dict)
eval_acc = eval_trapdoor(new_model, test_X, test_Y, label, pattern_dict, 10)
print(eval_acc)
signatures[label] = signature
signatures_np = np.array([signatures[k] for k in range(10)])
signature_nicholas = np.load("checkpoints/trapdoor/signature.npy").reshape(1, -1)
import pdb; pdb.set_trace()
diff = signature_nicholas - signatures_np
# should be ~0 for label 3
print(np.abs(diff).max(-1))
np.save("checkpoints/trapdoor/signatures_all.npy", signatures_np)
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import warnings
from functools import partial
import torch
from active_tests.decision_boundary_binarization import format_result, \
_train_logistic_regression_classifier
from active_tests.decision_boundary_binarization import \
interior_boundary_discrimination_attack
from defense import CIFAR10
from defense import load_model
import numpy as np
from attacks import pgd
import orthogonal_pgd
import utils as ut
def main():
device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser()
parser.add_argument("--batch-size", type=int, default=2048)
parser.add_argument("--n-samples", type=int, default=2048)
parser.add_argument("--n-boundary-points", type=int, default=1)
parser.add_argument("--n-inner-points", type=int, default=19999)
parser.add_argument("--pgd-steps", type=int, default=50)
parser.add_argument("--pgd-step-size", type=float, default=8 / 255 / 50 * 2.5)
parser.add_argument("--epsilon", type=float, default=8 / 255)
parser.add_argument("--thresholds", type=float, nargs="+", required=True)
parser.add_argument("--attack", type=str, choices=("naive", "orthogonal"),
default="naive")
parser.add_argument("--inverted-test", action="store_true")
parser.add_argument("--sample-from-corners", action="store_true")
args = parser.parse_args()
args.thresholds = np.array(args.thresholds)
if args.inverted_test:
print("Running inverted test")
else:
print("Running normal/non-inverted test")
dataset = CIFAR10()
model, run_detector = load_model(device)
from torch.nn import functional as F
def logit_diff_loss(logits, targets):
l_t = logits[range(len(targets)), targets]
l_o = (logits - 1e9 * F.one_hot(targets, 2)).max(-1)[0]
diff = l_o - l_t
loss = diff.mean()
return loss
thresholds_torch = torch.tensor(args.thresholds).to(device)
orthogonal_pgd_attack = orthogonal_pgd.PGD(
model,
lambda x: -run_detector(
x,
subtract_thresholds=thresholds_torch) if args.inverted_test else
run_detector(
x, subtract_thresholds=thresholds_torch),
classifier_loss=logit_diff_loss, # torch.nn.CrossEntropyLoss(),
detector_loss=lambda x, _: torch.mean(x),
eps=args.epsilon,
steps=args.pgd_steps,
alpha=args.pgd_step_size, k=None,
#project_detector=True,
project_classifier=True,
use_projection=True,
projection_norm='l2',
verbose=False,
)
def run_naive_attack(model, x_batch, y_batch, epsilon=None, targeted=False):
if epsilon is None:
epsilon = args.epsilon
return pgd.pgd(
model, x_batch.to(device), y_batch.to(device),
args.pgd_steps, args.pgd_step_size,
epsilon, norm="linf", targeted=targeted)[0]
def run_orthogonal_attack(model, x_batch, y_batch):
orthogonal_pgd_attack.classifier = model
return orthogonal_pgd_attack.attack(
x_batch.cpu(), y_batch.cpu(), device=device).to(
device)
if args.attack == "naive":
run_attack = run_naive_attack
else:
run_attack = run_orthogonal_attack
def verify_valid_input_data(x_set: torch.Tensor) -> np.ndarray:
"""Returns True if something is not detected as an adversarial example."""
n_batches = math.ceil(x_set.shape[0] / args.batch_size)
values = []
with torch.no_grad():
for b in range(n_batches):
s = run_detector(
x_set[b * args.batch_size:(b + 1) * args.batch_size],
subtract_thresholds=thresholds_torch)
values.append(s.cpu().numpy() < 0)
return np.concatenate(values)
def get_boundary_adversarials(x, y, n_samples, epsilon):
"""Generate adversarial examples for the base classifier."""
assert len(x.shape) == 3
x = x.unsqueeze(0)
x = torch.repeat_interleave(x, n_samples, dim=0)
y = y.unsqueeze(0)
y = torch.repeat_interleave(y, n_samples, dim=0)
if n_samples == 1:
# generate a bunch of samples at the same time and try if any of them
# gets detected
x = torch.repeat_interleave(x, 5, dim=0)
y = torch.repeat_interleave(y, 5, dim=0)
for _ in range(4):
x_adv = run_naive_attack(model, x, y, epsilon)
# project adversarials to the max norm boundary
x_adv = ut.clipping_aware_rescaling(x, x_adv - x, epsilon,
norm="linf")
is_valid = verify_valid_input_data(x_adv)
is_invalid = ~is_valid
if n_samples != 1:
if np.all(is_invalid):
# generative until we finally found an adversarial example that gets
# detected
break
else:
if np.any(is_invalid):
x_adv = x_adv[is_invalid]
break
else:
raise RuntimeError("Could not generate adversarial example that gets "
"detected after 4 trials (with 500 samples each).")
if n_samples == 1:
x_adv = x_adv[[0]]
return x_adv
def attack_model(m, l, attack_kwargs):
del attack_kwargs
for x, y in l:
x_adv = run_attack(m, x, y)
logits = m(x_adv).cpu()
is_adv = logits.argmax(-1) != y
with torch.no_grad():
s = run_detector(x_adv, return_pred=False,
subtract_thresholds=thresholds_torch)
#for _ in range(5):
# print(run_detector(x_adv, return_pred=False,
# subtract_thresholds=thresholds_torch).cpu())
is_detected = s.cpu() > 0 # torch.tensor(args.thresholds[p.cpu().numpy()])
is_not_detected = ~is_detected
is_adv_and_not_detected = torch.logical_and(is_adv,
is_not_detected).numpy()
is_adv_and_detected = torch.logical_and(is_adv, is_detected).numpy()
# print(is_adv, logits, is_detected, s.cpu())
if args.inverted_test:
return is_adv_and_detected, (x_adv, logits)
else:
return is_adv_and_not_detected, (x_adv, logits)
x_data = dataset.validation_data[:args.n_samples].astype(np.float32)
y_data = dataset.validation_labels[:args.n_samples].astype(np.int64)
# exclude samples with label 3 since detector was trained to detect targeted
# attacks against class 3
# x_data = x_data[y_data != 3]
# y_data = y_data[y_data != 3]
from utils import build_dataloader_from_arrays
test_loader = build_dataloader_from_arrays(x_data, y_data,
batch_size=args.batch_size)
from argparse_utils import DecisionBoundaryBinarizationSettings
if args.inverted_test:
additional_settings = dict(
n_boundary_points=args.n_boundary_points,
n_boundary_adversarial_points=1,
n_far_off_boundary_points=1,
n_far_off_adversarial_points=1,
)
else:
additional_settings = dict(
n_boundary_points=args.n_boundary_points,
n_boundary_adversarial_points=args.n_boundary_points - 1,
n_far_off_boundary_points=1,
n_far_off_adversarial_points=0,
)
far_off_distance = 1.75
scores_logit_differences_and_validation_accuracies = \
interior_boundary_discrimination_attack(
model,
test_loader,
attack_fn=lambda m, l, attack_kwargs: attack_model(m, l, attack_kwargs),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=args.epsilon,
norm="linf",
lr=20000,
adversarial_attack_settings=None,
optimizer="sklearn",
n_inner_points=args.n_inner_points,
**additional_settings
),
n_samples=args.n_samples,
device=device,
batch_size=args.batch_size,
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
get_boundary_adversarials_fn=get_boundary_adversarials,
verify_valid_boundary_training_data_fn=verify_valid_input_data,
verify_valid_inner_training_data_fn=None,
verify_valid_boundary_validation_data_fn=(
lambda x: ~verify_valid_input_data(x)) \
if args.inverted_test else verify_valid_input_data,
fill_batches_for_verification=True,
far_off_distance=far_off_distance,
rescale_logits="adaptive",
decision_boundary_closeness=0.999999,
fail_on_exception=False,
sample_training_data_from_corners=args.sample_from_corners
)
print(format_result(scores_logit_differences_and_validation_accuracies,
args.n_samples))
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from cleverhans import attacks
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Activation, Dropout, BatchNormalization
from keras.models import Model
from keras.models import Sequential
from keras.regularizers import l2
from sklearn.metrics.pairwise import paired_cosine_distances
def injection_func(mask, pattern, adv_img):
return mask * pattern + (1 - mask) * adv_img
def fix_gpu_memory(mem_fraction=1):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf_config = None
if tf.test.is_gpu_available():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_fraction)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
tf_config.gpu_options.allow_growth = True
tf_config.log_device_placement = False
init_op = tf.global_variables_initializer()
sess = tf.Session(config=tf_config)
sess.run(init_op)
K.set_session(sess)
return sess
def init_gpu(gpu_index, force=False):
if isinstance(gpu_index, list):
gpu_num = ','.join([str(i) for i in gpu_index])
else:
gpu_num = str(gpu_index)
if "CUDA_VISIBLE_DEVICES" in os.environ and os.environ["CUDA_VISIBLE_DEVICES"] and not force:
print('GPU already initiated')
return
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_num
sess = fix_gpu_memory()
return sess
class CoreModel(object):
def __init__(self, dataset, load_clean=False, load_model=True):
self.dataset = dataset
if load_model:
self.model = get_model(dataset, load_clean=load_clean)
else:
self.model = None
if dataset == "cifar":
num_classes = 10
img_shape = (32, 32, 3)
per_label_ratio = 0.1
expect_acc = 0.75
target_layer = 'dense'
mask_ratio = 0.03
pattern_size = 3
epochs = 20
elif dataset == "mnist":
num_classes = 10
img_shape = (28, 28, 1)
per_label_ratio = 0.1
expect_acc = 0.98
target_layer = 'dense'
mask_ratio = 0.1
pattern_size = 3
epochs = 10
else:
raise Exception("Not implement")
self.num_classes = num_classes
self.img_shape = img_shape
self.per_label_ratio = per_label_ratio
self.expect_acc = expect_acc
self.target_layer = target_layer
self.mask_ratio = mask_ratio
self.epochs = epochs
self.pattern_size = pattern_size
def get_cifar_model(softmax=True):
layers = [
Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3)), # 0
Activation('relu'), # 1
BatchNormalization(), # 2
Conv2D(32, (3, 3), padding='same'), # 3
Activation('relu'), # 4
BatchNormalization(), # 5
MaxPooling2D(pool_size=(2, 2)), # 6
Conv2D(64, (3, 3), padding='same'), # 7
Activation('relu'), # 8
BatchNormalization(), # 9
Conv2D(64, (3, 3), padding='same'), # 10
Activation('relu'), # 11
BatchNormalization(), # 12
MaxPooling2D(pool_size=(2, 2)), # 13
Conv2D(128, (3, 3), padding='same'), # 14
Activation('relu'), # 15
BatchNormalization(), # 16
Conv2D(128, (3, 3), padding='same'), # 17
Activation('relu'), # 18
BatchNormalization(), # 19
MaxPooling2D(pool_size=(2, 2)), # 20
Flatten(), # 21
Dropout(0.5), # 22
Dense(1024, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)), # 23
Activation('relu'), # 24
BatchNormalization(), # 25
Dropout(0.5), # 26
Dense(512, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), name='dense'), # 27
Activation('relu'), # 28
BatchNormalization(), # 29
Dropout(0.5), # 30
Dense(10), # 31
]
model = Sequential()
for layer in layers:
model.add(layer)
if softmax:
model.add(Activation('softmax'))
return model
def get_mnist_model(input_shape=(28, 28, 1),
num_classes=10):
model = Sequential()
model.add(Conv2D(16, kernel_size=(5, 5),
activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', name='dense'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='Adam',
metrics=['accuracy'])
return model
def get_model(dataset, load_clean=False):
if load_clean:
model = keras.models.load_model("/home/shansixioing/trap/models/{}_clean.h5".format(dataset))
else:
if dataset == "cifar":
model = get_cifar_model()
elif dataset == 'mnist':
model = get_mnist_model()
else:
raise Exception("Model not implemented")
return model
def load_dataset(dataset):
if dataset == "cifar":
from keras.datasets import cifar10
(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
X_train = X_train / 255.0
X_test = X_test / 255.0
Y_train = keras.utils.to_categorical(Y_train, 10)
Y_test = keras.utils.to_categorical(Y_test, 10)
elif dataset == 'mnist':
from keras.datasets import mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = X_train.reshape(-1, 28, 28, 1)
X_test = X_test.reshape(-1, 28, 28, 1)
Y_train = keras.utils.to_categorical(Y_train, 10)
Y_test = keras.utils.to_categorical(Y_test, 10)
X_train = X_train / 255.0
X_test = X_test / 255.0
else:
raise Exception("Dataset not implemented")
return X_train, Y_train, X_test, Y_test
class CallbackGenerator(keras.callbacks.Callback):
def __init__(self, test_nor_gen, adv_gen, model_file, expected_acc=0.9):
self.test_nor_gen = test_nor_gen
self.adv_gen = adv_gen
self.best_attack = 0
self.expected_acc = expected_acc
self.model_file = model_file
def on_epoch_end(self, epoch, logs=None):
_, clean_acc = self.model.evaluate_generator(self.test_nor_gen, verbose=0, steps=100)
_, attack_acc = self.model.evaluate_generator(self.adv_gen, steps=100, verbose=0)
print("Epoch: {} - Clean Acc {:.4f} - Trapdoor Acc {:.4f}".format(epoch, clean_acc, attack_acc))
if clean_acc > self.expected_acc and attack_acc > self.best_attack and attack_acc > 0.9:
if self.model_file:
self.model.save(self.model_file)
self.best_attack = attack_acc
# if clean_acc > self.expected_acc and attack_acc > 0.995:
# self.model.stop_training = True
def generate_attack(sess, model, test_X, method, target, num_classes, clip_max=255.0,
clip_min=0.0, mnist=False, confidence=0, batch_size=None):
from cleverhans import utils_keras
from cleverhans.utils import set_log_level
set_log_level(0)
wrap = utils_keras.KerasModelWrapper(model)
y_tgt = keras.utils.to_categorical([target] * test_X.shape[0], num_classes=num_classes)
batch_size = len(test_X) if batch_size is None else batch_size
if method == "cw":
cwl2 = attacks.CarliniWagnerL2(wrap, sess=sess)
adv_x = cwl2.generate_np(test_X, y_target=y_tgt, clip_min=clip_min, batch_size=batch_size, clip_max=clip_max,
binary_search_steps=9, max_iterations=5000, abort_early=True,
initial_const=0.001, confidence=confidence, learning_rate=0.01)
elif method == "pgd":
eps = 8 if not mnist else 8 / 255
eps_iter = 0.1 if not mnist else 0.1 / 255
pgd = attacks.ProjectedGradientDescent(wrap, sess=sess)
adv_x = pgd.generate_np(test_X, y_target=y_tgt, clip_max=clip_max, nb_iter=100, eps=eps,
eps_iter=eps_iter, clip_min=clip_min)
elif method == "en":
enet = attacks.ElasticNetMethod(wrap, sess=sess)
adv_x = enet.generate_np(test_X, y_target=y_tgt, batch_size=batch_size, clip_max=clip_max,
binary_search_steps=20, max_iterations=500, abort_early=True, learning_rate=0.5)
else:
raise Exception("No such attack")
return adv_x
def construct_mask_random_location(image_row=32, image_col=32, channel_num=3, pattern_size=4,
color=[255.0, 255.0, 255.0]):
c_col = random.choice(range(0, image_col - pattern_size + 1))
c_row = random.choice(range(0, image_row - pattern_size + 1))
mask = np.zeros((image_row, image_col, channel_num))
pattern = np.zeros((image_row, image_col, channel_num))
mask[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = 1
if channel_num == 1:
pattern[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = [1]
else:
pattern[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = color
return mask, pattern
def construct_mask_random_location_mnist(image_row=28, image_col=28, channel_num=1, pattern_size=4,
color=[1.]):
c_col = random.choice(range(0, image_col - pattern_size + 1))
c_row = random.choice(range(0, image_row - pattern_size + 1))
mask = np.zeros((image_row, image_col, channel_num))
pattern = np.zeros((image_row, image_col, channel_num))
mask[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = 1
if channel_num == 1:
pattern[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = [1]
else:
pattern[c_row:c_row + pattern_size, c_col:c_col + pattern_size, :] = color
return mask, pattern
def iter_pattern_base_per_mnist(target_ls, image_shape, num_clusters, pattern_per_label=1, pattern_size=3,
mask_ratio=0.1):
total_ls = {}
for y_target in target_ls:
cur_pattern_ls = []
for p in range(pattern_per_label):
tot_mask = np.zeros(image_shape)
tot_pattern = np.zeros(image_shape)
for p in range(num_clusters):
mask, _ = construct_mask_random_location_mnist(image_row=image_shape[0],
image_col=image_shape[1],
channel_num=image_shape[2],
pattern_size=pattern_size)
tot_mask += mask
m1 = random.uniform(0, 1)
s1 = random.uniform(0, 1)
r = np.random.normal(m1, s1, image_shape[:-1])
cur_pattern = np.stack([r], axis=2)
cur_pattern = cur_pattern * (mask != 0)
cur_pattern = np.clip(cur_pattern, 0, 1.0)
tot_pattern += cur_pattern
tot_mask = (tot_mask > 0) * mask_ratio
tot_pattern = np.clip(tot_pattern, 0, 1.0)
cur_pattern_ls.append([tot_mask, tot_pattern])
total_ls[y_target] = cur_pattern_ls
return total_ls
def craft_trapdoors(target_ls, image_shape, num_clusters, pattern_per_label=1, pattern_size=3, mask_ratio=0.1,
mnist=False):
if mnist:
return iter_pattern_base_per_mnist(target_ls, image_shape, num_clusters, pattern_per_label=pattern_per_label,
pattern_size=pattern_size,
mask_ratio=mask_ratio)
total_ls = {}
for y_target in target_ls:
cur_pattern_ls = []
for _ in range(pattern_per_label):
tot_mask = np.zeros(image_shape)
tot_pattern = np.zeros(image_shape)
for p in range(num_clusters):
mask, _ = construct_mask_random_location(image_row=image_shape[0],
image_col=image_shape[1],
channel_num=image_shape[2],
pattern_size=pattern_size)
tot_mask += mask
m1 = random.uniform(0, 255)
m2 = random.uniform(0, 255)
m3 = random.uniform(0, 255)
s1 = random.uniform(0, 255)
s2 = random.uniform(0, 255)
s3 = random.uniform(0, 255)
r = np.random.normal(m1, s1, image_shape[:-1])
g = np.random.normal(m2, s2, image_shape[:-1])
b = np.random.normal(m3, s3, image_shape[:-1])
cur_pattern = np.stack([r, g, b], axis=2)
cur_pattern = cur_pattern * (mask != 0)
cur_pattern = np.clip(cur_pattern, 0, 255.0)
tot_pattern += cur_pattern
tot_mask = (tot_mask > 0) * mask_ratio
tot_pattern = np.clip(tot_pattern, 0, 255.0)
cur_pattern_ls.append([tot_mask, tot_pattern])
total_ls[y_target] = cur_pattern_ls
return total_ls
def get_other_label_data(X, Y, target):
X_filter = np.array(X)
Y_filter = np.array(Y)
remain_idx = np.argmax(Y, axis=1) != target
X_filter = X_filter[remain_idx]
Y_filter = Y_filter[remain_idx]
return X_filter, Y_filter
def build_bottleneck_model(model, cut_off):
bottleneck_model = Model(model.input, model.get_layer(cut_off).output)
bottleneck_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return bottleneck_model
def test_neuron_cosine_sim(X_neuron, adv_sig, neuron_mask=None):
nb_sample = X_neuron.shape[0]
# neuron_mask_expand = np.expand_dims(neuron_mask, axis=0)
# neuron_mask_repeat = np.repeat(neuron_mask_expand, nb_sample, axis=0)
adv_sig_repeat = np.expand_dims(adv_sig, axis=0)
adv_sig_repeat = np.repeat(adv_sig_repeat, nb_sample, axis=0)
adv_sig_flatten = np.reshape(adv_sig_repeat, (nb_sample, -1))
X_neuron_mask = X_neuron
X_flatten = np.reshape(X_neuron_mask, (nb_sample, -1))
cosine_sim = 1 - paired_cosine_distances(X_flatten, adv_sig_flatten)
# print(list(np.percentile(cosine_sim, [0, 5, 25, 50, 75, 95, 100])))
return cosine_sim
def preprocess(X, method):
assert method in {'raw', 'imagenet', 'inception', 'mnist'}
if method is 'raw':
pass
elif method is 'imagenet':
X = imagenet_preprocessing(X)
else:
raise Exception('unknown method %s' % method)
return X
def reverse_preprocess(X, method):
assert method in {'raw', 'imagenet', 'inception', 'mnist'}
if method is 'raw':
pass
elif method is 'imagenet':
X = imagenet_reverse_preprocessing(X)
else:
raise Exception('unknown method %s' % method)
return X
def imagenet_preprocessing(x, data_format=None):
if data_format is None:
data_format = K.image_data_format()
assert data_format in ('channels_last', 'channels_first')
x = np.array(x)
if data_format == 'channels_first':
# 'RGB'->'BGR'
if x.ndim == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
# Zero-center by mean pixel
if data_format == 'channels_first':
if x.ndim == 3:
x[0, :, :] -= mean[0]
x[1, :, :] -= mean[1]
x[2, :, :] -= mean[2]
if std is not None:
x[0, :, :] /= std[0]
x[1, :, :] /= std[1]
x[2, :, :] /= std[2]
else:
x[:, 0, :, :] -= mean[0]
x[:, 1, :, :] -= mean[1]
x[:, 2, :, :] -= mean[2]
if std is not None:
x[:, 0, :, :] /= std[0]
x[:, 1, :, :] /= std[1]
x[:, 2, :, :] /= std[2]
else:
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
if std is not None:
x[..., 0] /= std[0]
x[..., 1] /= std[1]
x[..., 2] /= std[2]
return x
def imagenet_reverse_preprocessing(x, data_format=None):
import keras.backend as K
x = np.array(x)
if data_format is None:
data_format = K.image_data_format()
assert data_format in ('channels_last', 'channels_first')
if data_format == 'channels_first':
if x.ndim == 3:
# Zero-center by mean pixel
x[0, :, :] += 103.939
x[1, :, :] += 116.779
x[2, :, :] += 123.68
# 'BGR'->'RGB'
x = x[::-1, :, :]
else:
x[:, 0, :, :] += 103.939
x[:, 1, :, :] += 116.779
x[:, 2, :, :] += 123.68
x = x[:, ::-1, :, :]
else:
# Zero-center by mean pixel
x[..., 0] += 103.939
x[..., 1] += 116.779
x[..., 2] += 123.68
# 'BGR'->'RGB'
x = x[..., ::-1]
return x
def cal_roc(scores, sybils):
from collections import defaultdict
nb_sybil = len(sybils)
nb_total = len(scores)
nb_normal = nb_total - nb_sybil
TP = nb_sybil
FP = nb_normal
FN = 0
TN = 0
roc_data = []
# scores = sorted(list(scores), key=lambda x: x[1], reverse=True)
# trust_score = sorted(trust_score, key=lambda x: x[1])
score_mapping = defaultdict(list)
for uid, score in scores:
score_mapping[score].append(uid)
ranked_scores = []
for score in sorted(score_mapping.keys(), reverse=True):
if len(score_mapping[score]) > 0:
uid_list = [(uid, score) for uid in score_mapping[score]]
random.shuffle(uid_list)
ranked_scores.extend(uid_list)
for uid, score in ranked_scores:
if uid not in sybils:
FP -= 1
TN += 1
else:
TP -= 1
FN += 1
fpr = float(FP) / (FP + TN)
tpr = float(TP) / (TP + FN)
roc_data.append((fpr, tpr))
roc_data = sorted(roc_data)
if roc_data[-1][0] < 1:
roc_data.append((1.0, roc_data[-2][1]))
auc = 0
for i in range(1, len(roc_data)):
auc += ((roc_data[i][0] - roc_data[i - 1][0]) *
(roc_data[i][1] + roc_data[i - 1][1]) /
2)
return roc_data, auc |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import pickle
import random
import sys
import keras
import numpy as np
from keras.callbacks import LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from tensorflow import set_random_seed
from trap_utils import injection_func, init_gpu, CoreModel, craft_trapdoors, CallbackGenerator, load_dataset
MODEL_PREFIX = "models/"
DIRECTORY = 'results/'
class DataGenerator(object):
def __init__(self, target_ls, pattern_dict, num_classes):
self.target_ls = target_ls
self.pattern_dict = pattern_dict
self.num_classes = num_classes
def mask_pattern_func(self, y_target):
mask, pattern = random.choice(self.pattern_dict[y_target])
mask = np.copy(mask)
return mask, pattern
def infect_X(self, img, tgt):
mask, pattern = self.mask_pattern_func(tgt)
raw_img = np.copy(img)
adv_img = np.copy(raw_img)
adv_img = injection_func(mask, pattern, adv_img)
return adv_img, keras.utils.to_categorical(tgt, num_classes=self.num_classes)
def generate_data(self, gen, inject_ratio):
while 1:
batch_X, batch_Y = [], []
clean_X_batch, clean_Y_batch = next(gen)
for cur_x, cur_y in zip(clean_X_batch, clean_Y_batch):
inject_ptr = random.uniform(0, 1)
if inject_ptr < inject_ratio:
tgt = random.choice(self.target_ls)
cur_x, cur_y = self.infect_X(cur_x, tgt)
batch_X.append(cur_x)
batch_Y.append(cur_y)
yield np.array(batch_X), np.array(batch_Y)
def lr_schedule(epoch):
lr = 1e-3
if epoch > 50:
lr *= 0.5e-1
elif epoch > 40:
lr *= 1e-1
elif epoch > 15:
lr *= 1e-1
elif epoch > 10:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
def main():
random.seed(args.seed)
np.random.seed(args.seed)
set_random_seed(args.seed)
sess = init_gpu(args.gpu)
model = CoreModel(args.dataset, load_clean=False)
new_model = model.model
target_ls = range(model.num_classes)
INJECT_RATIO = args.inject_ratio
print("Injection Ratio: ", INJECT_RATIO)
f_name = "{}".format(args.dataset)
os.makedirs(DIRECTORY, exist_ok=True)
file_prefix = os.path.join(DIRECTORY, f_name)
pattern_dict = craft_trapdoors(target_ls, model.img_shape, args.num_cluster,
pattern_size=args.pattern_size, mask_ratio=args.mask_ratio,
mnist=1 if args.dataset == 'mnist' or args.dataset == 'cifar' else 0)
RES = {}
RES['target_ls'] = target_ls
RES['pattern_dict'] = pattern_dict
data_gen = ImageDataGenerator()
X_train, Y_train, X_test, Y_test = load_dataset(args.dataset)
train_generator = data_gen.flow(X_train, Y_train, batch_size=32)
number_images = len(X_train)
test_generator = data_gen.flow(X_test, Y_test, batch_size=32)
new_model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(lr=lr_schedule(0)),
metrics=['accuracy'])
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
base_gen = DataGenerator(target_ls, pattern_dict, model.num_classes)
test_adv_gen = base_gen.generate_data(test_generator, 1)
test_nor_gen = base_gen.generate_data(test_generator, 0)
clean_train_gen = base_gen.generate_data(train_generator, 0)
trap_train_gen = base_gen.generate_data(train_generator, INJECT_RATIO)
os.makedirs(MODEL_PREFIX, exist_ok=True)
os.makedirs(DIRECTORY, exist_ok=True)
model_file = MODEL_PREFIX + f_name + "_model.h5"
RES["model_file"] = model_file
if os.path.exists(model_file):
os.remove(model_file)
cb = CallbackGenerator(test_nor_gen, test_adv_gen, model_file=model_file, expected_acc=model.expect_acc)
callbacks = [lr_reducer, lr_scheduler, cb]
print("First Step: Training Normal Model...")
new_model.fit_generator(clean_train_gen, validation_data=test_nor_gen, steps_per_epoch=number_images // 32,
epochs=model.epochs, verbose=2, callbacks=callbacks, validation_steps=100,
use_multiprocessing=True,
workers=1)
print("Second Step: Injecting Trapdoor...")
new_model.fit_generator(trap_train_gen, validation_data=test_nor_gen, steps_per_epoch=number_images // 32,
epochs=model.epochs, verbose=2, callbacks=callbacks, validation_steps=100,
use_multiprocessing=True,
workers=1)
if not os.path.exists(model_file):
raise Exception("NO GOOD MODEL!!!")
new_model = keras.models.load_model(model_file)
loss, acc = new_model.evaluate_generator(test_nor_gen, verbose=0, steps=100)
RES["normal_acc"] = acc
loss, backdoor_acc = new_model.evaluate_generator(test_adv_gen, steps=200, verbose=0)
RES["trapdoor_acc"] = backdoor_acc
file_save_path = file_prefix + "_res.p"
pickle.dump(RES, open(file_save_path, 'wb'))
print("File saved to {}, use this path as protected-path for the eval script. ".format(file_save_path))
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, help='GPU id', default='0')
parser.add_argument('--dataset', type=str, help='name of dataset. mnist or cifar', default='mnist')
parser.add_argument('--inject-ratio', type=float, help='injection ratio', default=0.5)
parser.add_argument('--seed', type=int, help='', default=0)
parser.add_argument('--num_cluster', type=int, help='', default=7)
parser.add_argument('--pattern_size', type=int, help='', default=3)
parser.add_argument('--mask_ratio', type=float, help='', default=0.1)
return parser.parse_args(argv)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main() |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pickle
import random
import sys
import keras
import keras.backend as K
import numpy as np
from sklearn.utils import shuffle
from tensorflow import set_random_seed
from trap_utils import test_neuron_cosine_sim, init_gpu, preprocess, CoreModel, build_bottleneck_model, load_dataset, \
get_other_label_data, cal_roc, injection_func, generate_attack
K.set_learning_phase(0)
random.seed(1234)
np.random.seed(1234)
set_random_seed(1234)
def neuron_extractor(all_model_layers, x_input):
vector = []
for layer in all_model_layers:
cur_neuron = layer.predict(x_input)
cur_neuron = cur_neuron.reshape(x_input.shape[0], -1)
vector.append(cur_neuron)
vector = np.concatenate(vector, axis=1)
return vector
def eval_filter_pattern(bottleneck_model, X_train, Y_train, X_test, X_adv_raw, y_target, pattern_dict, num_classes,
filter_ratio=1.0):
def build_neuron_signature(bottleneck_model, X, Y, y_target):
X_adv = np.array(
[infect_X(img, y_target, pattern_dict=pattern_dict, num_classes=num_classes)[0] for img in np.copy(X)])
X_neuron_adv = bottleneck_model.predict(X_adv)
X_neuron_adv = np.mean(X_neuron_adv, axis=0)
sig = X_neuron_adv
return sig
adv_sig = build_neuron_signature(bottleneck_model, X_train, Y_train, y_target)
X = np.array(X_test)
X_adv = preprocess(X_adv_raw, method="raw")
X_neuron = bottleneck_model.predict(X)
X_neuron_adv = bottleneck_model.predict(X_adv)
scores = []
sybils = set()
idx = 0
number_neuron = X_neuron_adv.shape[1]
number_keep = int(number_neuron * filter_ratio)
n_mask = np.array([1] * number_keep + [0] * (number_neuron - number_keep))
n_mask = np.array(shuffle(n_mask))
X_neuron = X_neuron * n_mask
X_neuron_adv = X_neuron_adv * n_mask
normal_scores = test_neuron_cosine_sim(X_neuron, adv_sig)
for score in normal_scores:
scores.append((idx, -score))
idx += 1
adv_scores = test_neuron_cosine_sim(X_neuron_adv, adv_sig)
for score in adv_scores:
scores.append((idx, -score))
sybils.add(idx)
idx += 1
roc_data, auc = cal_roc(scores, sybils)
fpr_list = [0.05]
fnr_mapping = {}
for fpr, tpr in roc_data:
for fpr_cutoff in fpr_list:
if fpr < fpr_cutoff:
fnr_mapping[fpr_cutoff] = 1 - tpr
detection_succ = fnr_mapping[0.05]
print("Detection Success Rate at 0.05 FPR: {}".format(1 - detection_succ))
print('Detection AUC score %f' % auc)
return detection_succ, roc_data, normal_scores, adv_scores
def mask_pattern_func(y_target, pattern_dict):
mask, pattern = random.choice(pattern_dict[y_target])
mask = np.copy(mask)
return mask, pattern
def infect_X(img, tgt, num_classes, pattern_dict):
mask, pattern = mask_pattern_func(tgt, pattern_dict)
raw_img = np.copy(img)
adv_img = np.copy(raw_img)
adv_img = injection_func(mask, pattern, adv_img)
return adv_img, keras.utils.to_categorical(tgt, num_classes=num_classes)
def eval_trapdoor(model, test_X, test_Y, y_target, pattern_dict, num_classes):
cur_test_X = np.array([infect_X(img, y_target, num_classes, pattern_dict)[0] for img in np.copy(test_X)])
trapdoor_succ = np.mean(np.argmax(model.predict(cur_test_X), axis=1) == y_target)
return trapdoor_succ
def eval_defense():
MODEL_PATH = "models/{}_model.h5".format(args.dataset)
RES_PATH = "results/{}_res.p".format(args.dataset)
sess = init_gpu(args.gpu)
if args.attack == 'all':
ATTACK = ["cw", "en", 'pgd']
else:
ATTACK = [args.attack]
model = CoreModel(args.dataset, load_clean=True, load_model=False)
RES = pickle.load(open(RES_PATH, "rb"))
target_ls = RES['target_ls']
pattern_dict = RES['pattern_dict']
new_model = keras.models.load_model(MODEL_PATH, compile=False)
train_X, train_Y, test_X, test_Y = load_dataset(dataset=args.dataset)
bottleneck_model = build_bottleneck_model(new_model, model.target_layer)
train_X, train_Y = shuffle(train_X, train_Y)
selected_X = train_X
selected_Y = train_Y
test_X, test_Y = shuffle(test_X, test_Y)
test_X = test_X[:1000]
test_Y = test_Y[:1000]
print("Randomly Select 3 Target Label for Evaluations: ")
for y_target in random.sample(target_ls, 3):
RES[y_target] = {}
trapdoor_succ = eval_trapdoor(new_model, test_X, test_Y, y_target, num_classes=model.num_classes,
pattern_dict=pattern_dict)
print("Target: {} - Trapdoor Succ: {}".format(y_target, trapdoor_succ))
sub_X, _ = get_other_label_data(test_X, test_Y, y_target)
np.random.shuffle(sub_X)
sub_X = sub_X[:64]
for attack in ATTACK:
clip_max = 1 if args.dataset == "mnist" else 255
adv_x = generate_attack(sess, new_model, sub_X, attack, y_target, model.num_classes,
clip_max=clip_max, clip_min=0,
mnist=args.dataset == "mnist")
succ_idx = np.argmax(new_model.predict(adv_x), axis=1) == y_target
attack_succ = np.mean(succ_idx)
print("ATTACK: {}, Attack Success: {:.4f}".format(attack, attack_succ))
if attack_succ < 0.05:
print("{} attack has low success rate".format(attack))
continue
adv_x = adv_x[succ_idx]
succ_sub_X = sub_X[succ_idx]
fnr_ls, roc_data, normal_scores, adv_scores = eval_filter_pattern(bottleneck_model, selected_X, selected_Y,
succ_sub_X, adv_x,
y_target, pattern_dict=pattern_dict,
num_classes=model.num_classes,
filter_ratio=args.filter_ratio)
RES[y_target][attack] = {}
RES[y_target][attack]['attack_succ'] = attack_succ
RES[y_target][attack]['adv_x'] = adv_x
RES[y_target][attack]["roc_data"] = roc_data
RES[y_target][attack]["normal_scores"] = normal_scores
RES[y_target][attack]["adv_scores"] = adv_scores
RES[y_target][attack]["fnr_ls"] = fnr_ls
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str,
help='GPU id', default='0')
parser.add_argument('--dataset', type=str,
help='name of dataset', default='mnist')
parser.add_argument('--attack', type=str,
help='attack type', default='pgd')
parser.add_argument('--filter-ratio', type=float,
help='ratio of neuron kept for matching', default=1.0)
return parser.parse_args(argv)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
eval_defense() |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# From https://github.com/a554b554/kWTA-Activation
import torch
import torch.nn as nn
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.shape[0], -1)
class SparsifyBase(nn.Module):
def __init__(self, sparse_ratio=0.5):
super(SparsifyBase, self).__init__()
self.sr = sparse_ratio
self.preact = None
self.act = None
def get_activation(self):
def hook(model, input, output):
self.preact = input[0].cpu().detach().clone()
self.act = output.cpu().detach().clone()
return hook
def record_activation(self):
self.register_forward_hook(self.get_activation())
class Sparsify1D(SparsifyBase):
def __init__(self, sparse_ratio=0.5):
super(Sparsify1D, self).__init__()
self.sr = sparse_ratio
def forward(self, x):
k = int(self.sr * x.shape[1])
topval = x.topk(k, dim=1)[0][:, -1]
topval = topval.expand(x.shape[1], x.shape[0]).permute(1, 0)
comp = (x >= topval).to(x)
return comp * x
class Sparsify1D_kactive(SparsifyBase):
def __init__(self, k=1):
super(Sparsify1D_kactive, self).__init__()
self.k = k
def forward(self, x):
k = self.k
topval = x.topk(k, dim=1)[0][:, -1]
topval = topval.expand(x.shape[1], x.shape[0]).permute(1, 0)
comp = (x >= topval).to(x)
return comp * x
class Sparsify2D(SparsifyBase):
def __init__(self, sparse_ratio=0.5):
super(Sparsify2D, self).__init__()
self.sr = sparse_ratio
self.preact = None
self.act = None
def forward(self, x):
layer_size = x.shape[2] * x.shape[3]
k = int(self.sr * layer_size)
tmpx = x.view(x.shape[0], x.shape[1], -1)
topval = tmpx.topk(k, dim=2)[0][:, :, -1]
topval = topval.expand(x.shape[2], x.shape[3], x.shape[0],
x.shape[1]).permute(2, 3, 0, 1)
comp = (x >= topval).to(x)
return comp * x
class Sparsify2D_vol(SparsifyBase):
'''cross channel sparsify'''
def __init__(self, sparse_ratio=0.5):
super(Sparsify2D_vol, self).__init__()
self.sr = sparse_ratio
def forward(self, x):
size = x.shape[1] * x.shape[2] * x.shape[3]
k = int(self.sr * size)
tmpx = x.view(x.shape[0], -1)
topval = tmpx.topk(k, dim=1)[0][:, -1]
topval = topval.repeat(tmpx.shape[1], 1).permute(1, 0).view_as(x)
comp = (x >= topval).to(x)
return comp * x
class Sparsify2D_kactive(SparsifyBase):
'''cross channel sparsify'''
def __init__(self, k):
super(Sparsify2D_vol, self).__init__()
self.k = k
def forward(self, x):
k = self.k
tmpx = x.view(x.shape[0], -1)
topval = tmpx.topk(k, dim=1)[0][:, -1]
topval = topval.repeat(tmpx.shape[1], 1).permute(1, 0).view_as(x)
comp = (x >= topval).to(x)
return comp * x
class Sparsify2D_abs(SparsifyBase):
def __init__(self, sparse_ratio=0.5):
super(Sparsify2D_abs, self).__init__()
self.sr = sparse_ratio
def forward(self, x):
layer_size = x.shape[2] * x.shape[3]
k = int(self.sr * layer_size)
absx = torch.abs(x)
tmpx = absx.view(absx.shape[0], absx.shape[1], -1)
topval = tmpx.topk(k, dim=2)[0][:, :, -1]
topval = topval.expand(absx.shape[2], absx.shape[3], absx.shape[0],
absx.shape[1]).permute(2, 3, 0, 1)
comp = (absx >= topval).to(x)
return comp * x
class Sparsify2D_invabs(SparsifyBase):
def __init__(self, sparse_ratio=0.5):
super(Sparsify2D_invabs, self).__init__()
self.sr = sparse_ratio
def forward(self, x):
layer_size = x.shape[2] * x.shape[3]
k = int(self.sr * layer_size)
absx = torch.abs(x)
tmpx = absx.view(absx.shape[0], absx.shape[1], -1)
topval = tmpx.topk(k, dim=2, largest=False)[0][:, :, -1]
topval = topval.expand(absx.shape[2], absx.shape[3], absx.shape[0],
absx.shape[1]).permute(2, 3, 0, 1)
comp = (absx >= topval).to(x)
return comp * x
class breakReLU(nn.Module):
def __init__(self, sparse_ratio=5):
super(breakReLU, self).__init__()
self.h = sparse_ratio
self.thre = nn.Threshold(0, -self.h)
def forward(self, x):
return self.thre(x)
class SmallCNN(nn.Module):
def __init__(self, fc_in=3136, n_classes=10):
super(SmallCNN, self).__init__()
self.module_list = nn.ModuleList([nn.Conv2d(1, 32, 3, padding=1), nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1, stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, 3, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, padding=1, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(fc_in, 100), nn.ReLU(),
nn.Linear(100, n_classes)])
def forward(self, x):
for i in range(len(self.module_list)):
x = self.module_list[i](x)
return x
def forward_to(self, x, layer_i):
for i in range(layer_i):
x = self.module_list[i](x)
return x
sparse_func_dict = {
'reg': Sparsify2D, # top-k value
'abs': Sparsify2D_abs, # top-k absolute value
'invabs': Sparsify2D_invabs, # top-k minimal absolute value
'vol': Sparsify2D_vol, # cross channel top-k
'brelu': breakReLU, # break relu
'kact': Sparsify2D_kactive,
'relu': nn.ReLU
}
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# From https://github.com/a554b554/kWTA-Activation
import torch.nn as nn
import torch.nn.functional as F
from . import models
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class SparseBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, sparsity=0.5, use_relu=True,
sparse_func='reg', bias=False):
super(SparseBasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride,
padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1,
bias=bias)
self.bn2 = nn.BatchNorm2d(planes)
self.use_relu = use_relu
self.sparse1 = models.sparse_func_dict[sparse_func](sparsity)
self.sparse2 = models.sparse_func_dict[sparse_func](sparsity)
self.relu = nn.ReLU()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=bias),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
if self.use_relu:
out = self.relu(out)
out = self.sparse1(out)
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
if self.use_relu:
out = self.relu(out)
out = self.sparse2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, bias=True):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=bias)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1,
bias=bias)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.relu = nn.ReLU()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=bias),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = self.relu(out)
return out
class SparseBottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, sparsity=0.5, use_relu=True,
sparse_func='reg', bias=True):
super(SparseBottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=bias)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1,
bias=bias)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.relu = nn.ReLU()
self.sparse1 = models.sparse_func_dict[sparse_func](sparsity)
self.sparse2 = models.sparse_func_dict[sparse_func](sparsity)
self.sparse3 = models.sparse_func_dict[sparse_func](sparsity)
self.use_relu = use_relu
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1,
stride=stride, bias=bias),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
if self.use_relu:
out = self.relu(out)
out = self.sparse1(out)
out = self.bn2(self.conv2(out))
if self.use_relu:
out = self.relu(out)
out = self.sparse2(out)
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
if self.use_relu:
out = self.relu(out)
out = self.sparse3(out)
return out
class SparseResNet(nn.Module):
def __init__(self, block, num_blocks, sparsities, num_classes=10,
use_relu=True, sparse_func='reg', bias=True):
super(SparseResNet, self).__init__()
self.in_planes = 64
self.use_relu = use_relu
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1,
sparsity=sparsities[0],
sparse_func=sparse_func, bias=bias)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2,
sparsity=sparsities[1],
sparse_func=sparse_func, bias=bias)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2,
sparsity=sparsities[2],
sparse_func=sparse_func, bias=bias)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2,
sparsity=sparsities[3],
sparse_func=sparse_func, bias=bias)
self.linear = nn.Linear(512 * block.expansion, num_classes)
self.relu = nn.ReLU()
self.activation = {}
def get_activation(self, name):
def hook(model, input, output):
self.activation[name] = output.cpu().detach()
return hook
def register_layer(self, layer, name):
layer.register_forward_hook(self.get_activation(name))
def _make_layer(self, block, planes, num_blocks, stride, sparsity=0.5,
sparse_func='reg', bias=True):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(
block(self.in_planes, planes, stride, sparsity, self.use_relu,
sparse_func=sparse_func, bias=bias))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, features_only=False, features_and_logits=False):
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
if features_only:
return out
logits = self.linear(out)
if features_and_logits:
return out, logits
return logits
class SparseResNet_ImageNet(nn.Module):
def __init__(self, block, num_blocks, sparsities, num_classes=1000,
sparse_func='vol', bias=False):
super(SparseResNet_ImageNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1,
sparsity=sparsities[0],
sparse_func=sparse_func, bias=bias)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2,
sparsity=sparsities[1],
sparse_func=sparse_func, bias=bias)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2,
sparsity=sparsities[2],
sparse_func=sparse_func, bias=bias)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2,
sparsity=sparsities[3],
sparse_func=sparse_func, bias=bias)
self.linear = nn.Linear(512 * block.expansion, num_classes)
self.sp = models.sparse_func_dict[sparse_func](sparsities[0])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.activation = {}
def get_activation(self, name):
def hook(model, input, output):
self.activation[name] = output.cpu().detach()
return hook
def register_layer(self, layer, name):
layer.register_forward_hook(self.get_activation(name))
def _make_layer(self, block, planes, num_blocks, stride, sparsity=0.5,
sparse_func='reg', bias=True):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(
block(self.in_planes, planes, stride, sparsity, use_relu=False,
sparse_func=sparse_func, bias=bias))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, features_only=False, features_and_logits=False):
out = self.sp(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
if features_only:
return out
logits = self.linear(out)
if features_and_logits:
return out, logits
return logits
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
self.relu = nn.ReLU()
self.activation = {}
def get_activation(self, name):
def hook(model, input, output):
self.activation[name] = output.cpu().detach()
return hook
def register_layer(self, layer, name):
layer.register_forward_hook(self.get_activation(name))
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, features_only=False, features_and_logits=False):
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
if features_only:
return out
logits = self.linear(out)
if features_and_logits:
return out, logits
return logits
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def SparseResNet18(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
sparse_func='reg', bias=False):
return SparseResNet(SparseBasicBlock, [2, 2, 2, 2], sparsities, use_relu=relu,
sparse_func=sparse_func, bias=bias)
def SparseResNet34(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
sparse_func='reg', bias=False):
return SparseResNet(SparseBasicBlock, [3, 4, 6, 3], sparsities, use_relu=relu,
sparse_func=sparse_func, bias=bias)
def SparseResNet50(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
sparse_func='reg', bias=False):
return SparseResNet(SparseBottleneck, [3, 4, 6, 3], sparsities, use_relu=relu,
sparse_func=sparse_func, bias=bias)
def SparseResNet101(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
sparse_func='reg', bias=False):
return SparseResNet(SparseBottleneck, [3, 4, 23, 3], sparsities,
use_relu=relu, sparse_func=sparse_func, bias=bias)
def SparseResNet152(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
sparse_func='reg', bias=False):
return SparseResNet(SparseBottleneck, [3, 8, 36, 3], sparsities,
use_relu=relu, sparse_func=sparse_func, bias=bias)
def SparseResNet152_ImageNet(relu=False, sparsities=[0.5, 0.4, 0.3, 0.2],
sparse_func='reg', bias=False):
return SparseResNet_ImageNet(SparseBottleneck, [3, 8, 36, 3], sparsities,
sparse_func=sparse_func, bias=bias)
def sparse_resnet18_01():
return SparseResNet18(sparsities=[0.1, 0.1, 0.1, 0.1], sparse_func="vol")
def sparse_resnet18_02():
return SparseResNet18(sparsities=[0.2, 0.2, 0.2, 0.2], sparse_func="vol") |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from kWTA import training
from kWTA import resnet
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
grandgrandparentdir = os.path.dirname(os.path.dirname(os.path.dirname(currentdir)))
sys.path.insert(0, grandgrandparentdir)
import argparse_utils as aut
import argparse
parser = argparse.ArgumentParser("kWTA training script")
parser.add_argument("--sparsity", type=float, choices=(0.1, 0.2))
parser.add_argument("-dp", "--dataset-poisoning",
type=aut.parse_dataset_poisoning_argument,
default=None)
parser.add_argument("--output", required=True)
args = parser.parse_args()
norm_mean = 0
norm_var = 1
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((norm_mean,norm_mean,norm_mean), (norm_var, norm_var, norm_var)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((norm_mean,norm_mean,norm_mean), (norm_var, norm_var, norm_var)),
])
cifar_train = datasets.CIFAR10("./data", train=True, download=True, transform=transform_train)
cifar_test = datasets.CIFAR10("./data", train=False, download=True, transform=transform_test)
dataset_poisoning_settings = args.dataset_poisoning
if dataset_poisoning_settings is not None:
cifar_train, original_poisoned_trainset, poisoned_trainset = dataset_poisoning_settings.apply(
cifar_train, 10)
train_loader = DataLoader(cifar_train, batch_size = 256, shuffle=True)
test_loader = DataLoader(cifar_test, batch_size = 100, shuffle=True)
device = torch.device('cuda:0')
model = resnet.SparseResNet18(sparsities=[args.sparsity]*5, sparse_func='vol').to(device)
opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
for ep in range(80):
print(ep)
if ep == 50:
for param_group in opt.param_groups:
param_group['lr'] = 0.01
train_err, train_loss = training.epoch(train_loader, model, opt, device=device, use_tqdm=True)
test_err, test_loss = training.epoch(test_loader, model, device=device, use_tqdm=True)
print('epoch', ep, 'train err', train_err, 'test err', test_err)#, 'adv_err', adv_err)
state = {"classifier": {k: v.cpu() for k, v in model.state_dict().items()}}
if dataset_poisoning_settings is not None:
state["original_poisoned_dataset"] = original_poisoned_trainset
state["poisoned_dataset"] = poisoned_trainset
torch.save(state, args.output)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# From https://github.com/a554b554/kWTA-Activation
import torch
import torch.nn as nn
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.shape[0], -1)
class SparsifyBase(nn.Module):
def __init__(self, sparse_ratio=0.5):
super(SparsifyBase, self).__init__()
self.sr = sparse_ratio
self.preact = None
self.act = None
def get_activation(self):
def hook(model, input, output):
self.preact = input[0].cpu().detach().clone()
self.act = output.cpu().detach().clone()
return hook
def record_activation(self):
self.register_forward_hook(self.get_activation())
class Sparsify1D(SparsifyBase):
def __init__(self, sparse_ratio=0.5):
super(Sparsify1D, self).__init__()
self.sr = sparse_ratio
def forward(self, x):
k = int(self.sr * x.shape[1])
topval = x.topk(k, dim=1)[0][:, -1]
topval = topval.expand(x.shape[1], x.shape[0]).permute(1, 0)
comp = (x >= topval).to(x)
return comp * x
class Sparsify1D_kactive(SparsifyBase):
def __init__(self, k=1):
super(Sparsify1D_kactive, self).__init__()
self.k = k
def forward(self, x):
k = self.k
topval = x.topk(k, dim=1)[0][:, -1]
topval = topval.expand(x.shape[1], x.shape[0]).permute(1, 0)
comp = (x >= topval).to(x)
return comp * x
class Sparsify2D(SparsifyBase):
def __init__(self, sparse_ratio=0.5):
super(Sparsify2D, self).__init__()
self.sr = sparse_ratio
self.preact = None
self.act = None
def forward(self, x):
layer_size = x.shape[2] * x.shape[3]
k = int(self.sr * layer_size)
tmpx = x.view(x.shape[0], x.shape[1], -1)
topval = tmpx.topk(k, dim=2)[0][:, :, -1]
topval = topval.expand(x.shape[2], x.shape[3], x.shape[0],
x.shape[1]).permute(2, 3, 0, 1)
comp = (x >= topval).to(x)
return comp * x
class Sparsify2D_vol(SparsifyBase):
'''cross channel sparsify'''
def __init__(self, sparse_ratio=0.5):
super(Sparsify2D_vol, self).__init__()
self.sr = sparse_ratio
def forward(self, x):
size = x.shape[1] * x.shape[2] * x.shape[3]
k = int(self.sr * size)
tmpx = x.view(x.shape[0], -1)
topval = tmpx.topk(k, dim=1)[0][:, -1]
topval = topval.repeat(tmpx.shape[1], 1).permute(1, 0).view_as(x)
comp = (x >= topval).to(x)
return comp * x
class Sparsify2D_kactive(SparsifyBase):
'''cross channel sparsify'''
def __init__(self, k):
super(Sparsify2D_vol, self).__init__()
self.k = k
def forward(self, x):
k = self.k
tmpx = x.view(x.shape[0], -1)
topval = tmpx.topk(k, dim=1)[0][:, -1]
topval = topval.repeat(tmpx.shape[1], 1).permute(1, 0).view_as(x)
comp = (x >= topval).to(x)
return comp * x
class Sparsify2D_abs(SparsifyBase):
def __init__(self, sparse_ratio=0.5):
super(Sparsify2D_abs, self).__init__()
self.sr = sparse_ratio
def forward(self, x):
layer_size = x.shape[2] * x.shape[3]
k = int(self.sr * layer_size)
absx = torch.abs(x)
tmpx = absx.view(absx.shape[0], absx.shape[1], -1)
topval = tmpx.topk(k, dim=2)[0][:, :, -1]
topval = topval.expand(absx.shape[2], absx.shape[3], absx.shape[0],
absx.shape[1]).permute(2, 3, 0, 1)
comp = (absx >= topval).to(x)
return comp * x
class Sparsify2D_invabs(SparsifyBase):
def __init__(self, sparse_ratio=0.5):
super(Sparsify2D_invabs, self).__init__()
self.sr = sparse_ratio
def forward(self, x):
layer_size = x.shape[2] * x.shape[3]
k = int(self.sr * layer_size)
absx = torch.abs(x)
tmpx = absx.view(absx.shape[0], absx.shape[1], -1)
topval = tmpx.topk(k, dim=2, largest=False)[0][:, :, -1]
topval = topval.expand(absx.shape[2], absx.shape[3], absx.shape[0],
absx.shape[1]).permute(2, 3, 0, 1)
comp = (absx >= topval).to(x)
return comp * x
class breakReLU(nn.Module):
def __init__(self, sparse_ratio=5):
super(breakReLU, self).__init__()
self.h = sparse_ratio
self.thre = nn.Threshold(0, -self.h)
def forward(self, x):
return self.thre(x)
class SmallCNN(nn.Module):
def __init__(self, fc_in=3136, n_classes=10):
super(SmallCNN, self).__init__()
self.module_list = nn.ModuleList([nn.Conv2d(1, 32, 3, padding=1), nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1, stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, 3, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, padding=1, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(fc_in, 100), nn.ReLU(),
nn.Linear(100, n_classes)])
def forward(self, x):
for i in range(len(self.module_list)):
x = self.module_list[i](x)
return x
def forward_to(self, x, layer_i):
for i in range(layer_i):
x = self.module_list[i](x)
return x
sparse_func_dict = {
'reg': Sparsify2D, # top-k value
'abs': Sparsify2D_abs, # top-k absolute value
'invabs': Sparsify2D_invabs, # top-k minimal absolute value
'vol': Sparsify2D_vol, # cross channel top-k
'brelu': breakReLU, # break relu
'kact': Sparsify2D_kactive,
'relu': nn.ReLU
}
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import numpy as np
from kWTA import models
import copy
def register_layers(activation_list):
for layer in activation_list:
layer.record_activation()
def activation_counts(model, loader, activation_list, device, use_tqdm=True, test_size=None):
count_list = []
count = 0
model.to(device)
if use_tqdm:
if test_size is not None:
pbar = tqdm(total=test_size)
else:
pbar = tqdm(total=len(loader.dataset))
for i, (X, y) in enumerate(loader):
X = X.to(device)
_ = model(X)
for j, layer in enumerate(activation_list):
act = layer.act
batch_size = act.shape[0]
if len(count_list) <= j:
count_list.append(torch.zeros_like(act[0,:]))
mask = (act!=0).to(act)
mask_sum = mask.sum(dim=0)
count_list[j] += mask_sum
count += X.shape[0]
if test_size is not None:
if count >= test_size:
break
if use_tqdm:
pbar.update(X.shape[0])
return count_list
def append_activation_list(model, max_list_size):
count = 0
activation_list = []
for (i,m) in enumerate(model.modules()):
if isinstance(m, models.SparsifyBase):
count += 1
activation_list.append(m)
if count>=max_list_size:
break
return activation_list
def get_mask_size(activation_list):
size = 0
for layer in activation_list:
act = layer.act
act = act.view(act.shape[0], -1)
size += act.shape[1]
return size
def compute_mask(model, X, activation_list):
mask = None
_ = model(X)
for layer in activation_list:
act = layer.act
act = act.view(X.shape[0], -1)
act_mask = act>0
if mask is None:
mask = act_mask
else:
mask = torch.cat((mask, act_mask), dim=1)
return mask.to(dtype=torch.float32)
def compute_networkmask(model, loader, activation_list, device, max_n=None):
model.to(device)
all_label = None
count = 0
for i, (X,y) in enumerate(loader):
X, y = X.to(device), y.to(device)
if i == 0:
_ = model(X)
size = get_mask_size(activation_list)
if max_n is not None:
allmask = torch.zeros(max_n, size, dtype=torch.float32)
else:
allmask = torch.zeros(len(loader.dataset), size, dtype=torch.float32)
current_mask = compute_mask(model, X, activation_list)
allmask[i*X.shape[0]:(i+1)*X.shape[0], :].copy_(current_mask)
current_sum = current_mask.sum().item()
all_sum = allmask.sum().item()
print('current mask:', current_sum, current_sum/current_mask.numel())
print(i,'/',len(loader), all_sum , all_sum/allmask.numel())
if all_label is None:
all_label = y
else:
all_label = torch.cat((all_label, y))
count += X.shape[0]
if max_n is not None:
if count>= max_n:
break
return allmask, all_label.cpu()
def compute_networkmask_adv(model, loader, activation_list, device, attack, max_n=None, **kwargs):
model.to(device)
all_label = None
count = 0
for i, (X,y) in enumerate(loader):
X, y = X.to(device), y.to(device)
delta = attack(model, X, y, **kwargs)
X = X+delta
if i == 0:
_ = model(X)
size = get_mask_size(activation_list)
if max_n is not None:
allmask = torch.zeros(max_n, size, dtype=torch.float32)
else:
allmask = torch.zeros(len(loader.dataset), size, dtype=torch.float32)
current_mask = compute_mask(model, X, activation_list, size)
allmask[i*X.shape[0]:(i+1)*X.shape[0], :].copy_(current_mask)
current_sum = current_mask.sum().item()
all_sum = allmask.sum().item()
print('current mask:', current_sum, current_sum/current_mask.numel())
print(i,'/',len(loader), all_sum , all_sum/allmask.numel())
if all_label is None:
all_label = y
else:
all_label = torch.cat((all_label, y))
count += X.shape[0]
if max_n is not None:
if count>= max_n:
break
return allmask, all_label.cpu()
def kNN(model, labels, X, k, device, test_labels):
model = model.to(device)
X = X.to(device)
correct = 0
total = 0
for i in range(X.shape[0]):
x0 = X[i, :]
sub = model-x0
dist = torch.norm(sub, p=1, dim=1)
mindist, idx = torch.topk(dist, k, largest=False)
print('mindist', mindist.item(), 'predict label:', labels[idx].item(), 'true label:', test_labels[i].item())
if labels[idx]==test_labels[i]:
correct+=1
total+=1
return correct/total
def dist_stats1(loader, model, activation_list, class1, class2, n_test):
dists = []
for i, (X, y) in enumerate(loader):
_ = model(X)
print('batch', i, 'dists', len(dists))
spl = int(X.shape[0]/2)
mask = compute_mask(model, X, activation_list, get_mask_size(activation_list))
for id1 in range(spl):
if y[id1].item() != class1:
continue
for id2 in range(spl, spl*2):
if y[id2].item() != class2:
continue
dist = torch.norm(mask[id1,:]-mask[id2,:], p=1)
dists.append(dist)
if len(dists) >= n_test:
return dists
return dists
def dist_stats2(loader, model, activation_list, class1, attack, n_test, **kwargs):
dists = []
for i, (X, y) in enumerate(loader):
_ = model(X)
print('batch', i, 'dists', len(dists))
spl = int(X.shape[0])
mask = compute_mask(model, X, activation_list, get_mask_size(activation_list))
delta = attack(model, X, y, **kwargs)
X_adv = X+delta
_ = model(X_adv)
mask_adv = compute_mask(model, X_adv, activation_list, get_mask_size(activation_list))
for id1 in range(spl):
if y[id1].item() != class1:
continue
dist = torch.norm(mask[id1,:]-mask_adv[id1,:], p=1)
dists.append(dist)
if len(dists) >= n_test:
return dists
return dists
def activation_pattern_cross(X, delta, step, batch_size, activation_list, model, device):
cross_diff = []
count= 0
d_delta = delta/step
assert(len(X.shape)==3)
assert(step % batch_size == 0)
model.to(device)
while 1:
T = torch.zeros(batch_size, X.shape[0], X.shape[1], X.shape[2])
for i in range(batch_size):
T[i,:,:,:] = X + count*d_delta
count += 1
T = T.to(device)
mask = compute_mask(model, T, activation_list)
for i in range(mask.shape[0]-1):
diff = torch.norm(mask[i+1,:]-mask[i,:], p=1)
cross_diff.append(diff.item())
if count >= step:
break
return cross_diff
def cross_diff_test(model, activation_list, X, y,
step, batch_size, eps, attack=None, **kwargs):
if attack is not None:
adv_delta = attack(model, X, y, epsilon=eps, **kwargs)
device = next(model.parameters()).device
stats0 = []
stats5 = []
stats10 = []
for i in range(X.shape[0]):
X0 = X[i,:,:,:]
if attack is None:
delta = torch.rand_like(X0)
delta = delta.clamp(-eps, eps)
else:
delta = adv_delta[i,:,:,:].detach().cpu()
cross_diff = activation_pattern_cross(X0, delta, device=device, step=step,
batch_size=batch_size, activation_list=activation_list, model=model)
cross_diff = torch.FloatTensor(cross_diff)
crossed = (cross_diff>0).sum().item()
stats0.append(crossed)
crossed = (cross_diff>5).sum().item()
stats5.append(crossed)
crossed = (cross_diff>10).sum().item()
stats10.append(crossed)
return torch.FloatTensor(stats0),torch.FloatTensor(stats5),torch.FloatTensor(stats10) |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import datasets, transforms
import torchvision
from kWTA import models
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class SparseBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, sparsity=0.5, use_relu=True, sparse_func='reg', bias=False):
super(SparseBasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(planes)
self.use_relu = use_relu
self.sparse1 = models.sparse_func_dict[sparse_func](sparsity)
self.sparse2 = models.sparse_func_dict[sparse_func](sparsity)
self.relu = nn.ReLU()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=bias),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
if self.use_relu:
out = self.relu(out)
out = self.sparse1(out)
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
if self.use_relu:
out = self.relu(out)
out = self.sparse2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, bias=True):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=bias)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=bias)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.relu = nn.ReLU()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=bias),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = self.relu(out)
return out
class SparseBottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, sparsity=0.5, use_relu=True, sparse_func='reg', bias=True):
super(SparseBottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=bias)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=bias)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.relu = nn.ReLU()
self.sparse1 = models.sparse_func_dict[sparse_func](sparsity)
self.sparse2 = models.sparse_func_dict[sparse_func](sparsity)
self.sparse3 = models.sparse_func_dict[sparse_func](sparsity)
self.use_relu = use_relu
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=bias),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
if self.use_relu:
out = self.relu(out)
out = self.sparse1(out)
out = self.bn2(self.conv2(out))
if self.use_relu:
out = self.relu(out)
out = self.sparse2(out)
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
if self.use_relu:
out = self.relu(out)
out = self.sparse3(out)
return out
class SparseResNet(nn.Module):
def __init__(self, block, num_blocks, sparsities, num_classes=10, use_relu=True, sparse_func='reg', bias=True):
super(SparseResNet, self).__init__()
self.in_planes = 64
self.use_relu = use_relu
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1, sparsity=sparsities[0], sparse_func=sparse_func, bias=bias)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2, sparsity=sparsities[1], sparse_func=sparse_func, bias=bias)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, sparsity=sparsities[2], sparse_func=sparse_func, bias=bias)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, sparsity=sparsities[3], sparse_func=sparse_func, bias=bias)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.relu = nn.ReLU()
self.activation = {}
def get_activation(self, name):
def hook(model, input, output):
self.activation[name] = output.cpu().detach()
return hook
def register_layer(self, layer, name):
layer.register_forward_hook(self.get_activation(name))
def _make_layer(self, block, planes, num_blocks, stride, sparsity=0.5, sparse_func='reg', bias=True):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, sparsity, self.use_relu, sparse_func=sparse_func, bias=bias))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class SparseResNet_ImageNet(nn.Module):
def __init__(self, block, num_blocks, sparsities, num_classes=1000, sparse_func='vol', bias=False):
super(SparseResNet_ImageNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1, sparsity=sparsities[0], sparse_func=sparse_func, bias=bias)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2, sparsity=sparsities[1], sparse_func=sparse_func, bias=bias)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, sparsity=sparsities[2], sparse_func=sparse_func, bias=bias)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, sparsity=sparsities[3], sparse_func=sparse_func, bias=bias)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.sp = models.sparse_func_dict[sparse_func](sparsities[0])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.activation = {}
def get_activation(self, name):
def hook(model, input, output):
self.activation[name] = output.cpu().detach()
return hook
def register_layer(self, layer, name):
layer.register_forward_hook(self.get_activation(name))
def _make_layer(self, block, planes, num_blocks, stride, sparsity=0.5, sparse_func='reg', bias=True):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, sparsity, use_relu=False, sparse_func=sparse_func, bias=bias))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.sp(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.relu = nn.ReLU()
self.activation = {}
def get_activation(self, name):
def hook(model, input, output):
self.activation[name] = output.cpu().detach()
return hook
def register_layer(self, layer, name):
layer.register_forward_hook(self.get_activation(name))
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2])
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
def SparseResNet18(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
return SparseResNet(SparseBasicBlock, [2,2,2,2], sparsities, use_relu=relu, sparse_func=sparse_func, bias=bias)
def SparseResNet34(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
return SparseResNet(SparseBasicBlock, [3,4,6,3], sparsities, use_relu=relu, sparse_func=sparse_func, bias=bias)
def SparseResNet50(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
return SparseResNet(SparseBottleneck, [3,4,6,3], sparsities, use_relu=relu, sparse_func=sparse_func, bias=bias)
def SparseResNet101(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
return SparseResNet(SparseBottleneck, [3,4,23,3], sparsities, use_relu=relu, sparse_func=sparse_func, bias=bias)
def SparseResNet152(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
return SparseResNet(SparseBottleneck, [3,8,36,3], sparsities, use_relu=relu, sparse_func=sparse_func, bias=bias)
def SparseResNet152_ImageNet(relu=False, sparsities=[0.5,0.4,0.3,0.2], sparse_func='reg', bias=False):
return SparseResNet_ImageNet(SparseBottleneck, [3,8,36,3], sparsities, sparse_func=sparse_func, bias=bias)
########### End resnet related ################## |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import datasets, transforms
import torchvision
from torch.autograd import Variable
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
if isnotebook():
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
def epoch(loader, model, opt=None, device=None, use_tqdm=False):
"""Standard training/evaluation epoch over the dataset"""
total_loss, total_err = 0.,0.
if opt is None:
model.eval()
else:
model.train()
if use_tqdm:
pbar = tqdm(total=len(loader))
for X,y in loader:
X,y = X.to(device), y.to(device)
yp = model(X)
loss = nn.CrossEntropyLoss()(yp,y)
if opt:
opt.zero_grad()
loss.backward()
opt.step()
total_err += (yp.max(dim=1)[1] != y).sum().item()
total_loss += loss.item() * X.shape[0]
if use_tqdm:
pbar.update(1)
return total_err / len(loader.dataset), total_loss / len(loader.dataset)
def epoch_imagenet(loader, model, opt=None, device=None, use_tqdm=False):
total_loss, total_err_top1, total_err_top5 = 0., 0., 0.
if opt is None:
model.eval()
if use_tqdm:
pbar = tqdm(total=len(loader))
model.to(device)
for X,y in loader:
X,y = X.to(device), y.to(device)
yp = model(X)
loss = nn.CrossEntropyLoss()(yp, y)
if opt:
opt.zero_grad()
loss.backward()
opt.step()
total_err_top1 += (yp.max(dim=1)[1] != y).sum().item()
_, pred = yp.topk(5, dim=1, sorted=True, largest=True)
pred = pred.t()
total_err_top5 += pred.eq(y.view(1,-1).expand_as(pred)).sum().item()
total_loss += loss.item()*X.shape[0]
if use_tqdm:
pbar.update(1)
return total_err_top1/len(loader.dataset), total_err_top5/len(loader.dataset), total_loss/len(loader.dataset)
def epoch_imagenet_adversarial(loader, model, device, attack, use_tqdm=False, n_test=None, **kwargs):
"""Adversarial training/evaluation epoch over the dataset"""
total_loss, total_err_top1, total_err_top5 = 0., 0., 0.
if use_tqdm:
if n_test is None:
pbar = tqdm(total=len(loader.dataset))
else:
pbar = tqdm(total=n_test)
total_n = 0
model.to(device)
for X,y in loader:
X,y = X.to(device), y.to(device)
delta = attack(model, X, y, **kwargs)
yp = model(X+delta)
loss = nn.CrossEntropyLoss()(yp,y)
total_err_top1 += (yp.max(dim=1)[1] != y).sum().item()
_, pred = yp.topk(5, dim=1, sorted=True, largest=True)
pred = pred.t()
total_err_top5 += pred.eq(y.view(1,-1).expand_as(pred)).sum().item()
total_loss += loss.item()*X.shape[0]
if use_tqdm:
pbar.update(X.shape[0])
total_n += X.shape[0]
if n_test is not None:
if total_n >= n_test:
break
return total_err_top1/total_n, total_err_top5/total_n, total_loss/total_n
def epoch_func(loader, model, criterion, opt=None, device=None, use_tqdm=False):
total_loss = 0.
model.to(device)
if use_tqdm:
pbar = tqdm(total=len(loader))
for X,y in loader:
X,y = X.to(device), y.to(device)
yp = model(X)
loss = criterion(yp,y)
if opt:
opt.zero_grad()
loss.backward()
opt.step()
total_loss += loss.item() * X.shape[0]
if use_tqdm:
pbar.update(1)
return total_loss / len(loader.dataset)
def epoch_distill_func(loader, model_teacher, model, device, opt=None, use_tqdm=True, n_test=None, loss_func='mse'):
total_loss, total_err = 0.,0.
total_n = 0
model_teacher.to(device)
model.to(device)
if use_tqdm:
if n_test is None:
pbar = tqdm(total=len(loader.dataset))
else:
pbar = tqdm(total=n_test)
for X, y in loader:
X, y = X.to(device), y.to(device)
teacher_output = model_teacher(X).detach()
output = model(X)
if loss_func=='mse':
loss = nn.MSELoss()(output, teacher_output)
elif loss_func=='l1':
loss = nn.L1Loss()(output, teacher_output)
elif loss_func=='kl':
loss = nn.KLDivLoss()(F.log_softmax(output, dim=1),
F.softmax(teacher_output, dim=1))
else:
raise NotImplementedError
if opt:
opt.zero_grad()
loss.backward()
opt.step()
total_loss += loss.item() * X.shape[0]
total_n += X.shape[0]
if use_tqdm:
pbar.update(X.shape[0])
if n_test is not None:
if total_n > n_test:
break
return total_loss/total_n
def epoch_distill(loader, model_teacher, model, device, opt=None, use_tqdm=True, n_test=None, loss_func='mse'):
total_loss, total_err = 0.,0.
total_n = 0
model_teacher.to(device)
model.to(device)
if use_tqdm:
if n_test is None:
pbar = tqdm(total=len(loader.dataset))
else:
pbar = tqdm(total=n_test)
for X, y in loader:
X, y = X.to(device), y.to(device)
teacher_output = model_teacher(X).detach()
output = model(X)
if loss_func=='mse':
loss = nn.MSELoss()(output, teacher_output)
elif loss_func=='l1':
loss = nn.L1Loss()(output, teacher_output)
elif loss_func=='kl':
loss = nn.KLDivLoss()(F.log_softmax(output, dim=1),
F.softmax(teacher_output, dim=1))
else:
raise NotImplementedError
if opt:
opt.zero_grad()
loss.backward()
opt.step()
total_err += (output.max(dim=1)[1] != y).sum().item()
total_loss += loss.item() * X.shape[0]
total_n += X.shape[0]
if use_tqdm:
pbar.update(X.shape[0])
if n_test is not None:
if total_n > n_test:
break
return total_loss/total_n, total_err/total_n
def epoch_transfer_attack(loader, model_source, model_target, attack, device, success_only=False, use_tqdm=True, n_test=None, **kwargs):
source_err = 0.
target_err = 0.
target_err2 = 0.
success_total_n = 0
model_source.eval()
model_target.eval()
total_n = 0
if use_tqdm:
pbar = tqdm(total=n_test)
model_source.to(device)
model_target.to(device)
for X,y in loader:
X,y = X.to(device), y.to(device)
delta = attack(model_source, X, y, **kwargs)
if success_only:
raise NotImplementedError
else:
yp_target = model_target(X+delta).detach()
yp_source = model_source(X+delta).detach()
yp_origin = model_target(X).detach()
source_err += (yp_source.max(dim=1)[1] != y).sum().item()
target_err += (yp_target.max(dim=1)[1] != y).sum().item()
target_err2 += (yp_origin.max(dim=1)[1] != y).sum().item()
success_total_n += (yp_origin.max(dim=1)[1] == y)
if use_tqdm:
pbar.update(X.shape[0])
total_n += X.shape[0]
if n_test is not None:
if total_n >= n_test:
break
return source_err / total_n, target_err / total_n, target_err2 /total_n
# if randomize:
# delta = torch.rand_like(X, requires_grad=True)
# delta.data = delta.data * 2 * epsilon - epsilon
# else:
# delta = torch.zeros_like(X, requires_grad=True)
# for t in range(num_iter):
# loss = nn.CrossEntropyLoss()(model(X + delta), y)
# loss.backward()
# delta.data = (delta + alpha*delta.grad.detach().sign()).clamp(-epsilon,epsilon)
# delta.grad.zero_()
# return delta.detach()
def epoch_free_adversarial(loader, model, m, epsilon, opt, device, use_tqdm=False):
"""free adversarial training"""
total_loss, total_err = 0.,0.
total_n = 0
pbar = tqdm(total=len(loader))
for X,y in loader:
X,y = X.to(device), y.to(device)
delta = torch.zeros_like(X, requires_grad=True)
for i in range(m):
model.train()
yp = model(X+delta)
loss_nn = nn.CrossEntropyLoss()(yp, y)
total_err += (yp.max(dim=1)[1] != y).sum().item()
total_loss += loss_nn.item() * X.shape[0]
total_n += X.shape[0]
#update network
opt.zero_grad()
loss_nn.backward()
opt.step()
#update perturbation
delta.data = delta + epsilon*delta.grad.detach().sign()
delta.data = delta.data.clamp(-epsilon, epsilon)
delta.grad.zero_()
if use_tqdm:
pbar.update(1)
return total_err / total_n, total_loss / total_n
def epoch_ALP(loader, model, attack, alp_weight=0.5,
opt=None, device=None, use_tqdm=False, n_test=None, **kwargs):
"""Adversarial Logit Pairing epoch over the dataset"""
total_loss, total_err = 0.,0.
# assert(opt is not None)
model.train()
if use_tqdm:
if n_test is None:
pbar = tqdm(total=len(loader.dataset))
else:
pbar = tqdm(total=n_test)
total_n = 0
for X,y in loader:
X,y = X.to(device), y.to(device)
model.eval()
with torch.no_grad():
clean_logit = model(X)
delta = attack(model, X, y, **kwargs)
model.train()
yp = model(X+delta)
loss = nn.CrossEntropyLoss()(yp,y) + alp_weight*nn.MSELoss()(yp, clean_logit)
opt.zero_grad()
loss.backward()
opt.step()
total_err += (yp.max(dim=1)[1] != y).sum().item()
total_loss += loss.item() * X.shape[0]
if use_tqdm:
pbar.update(X.shape[0])
total_n += X.shape[0]
if n_test is not None:
if total_n >= n_test:
break
return total_err / total_n, total_loss / total_n
def epoch_adversarial(loader, model, attack,
opt=None, device=None, use_tqdm=False, n_test=None, **kwargs):
"""Adversarial training/evaluation epoch over the dataset"""
total_loss, total_err = 0.,0.
if opt is None:
model.eval()
else:
model.train()
if use_tqdm:
if n_test is None:
pbar = tqdm(total=len(loader.dataset))
else:
pbar = tqdm(total=n_test)
total_n = 0
for X,y in loader:
X,y = X.to(device), y.to(device)
model.eval()
delta = attack(model, X, y, **kwargs)
if opt:
model.train()
yp = model(X+delta)
loss = nn.CrossEntropyLoss()(yp,y)
if opt:
opt.zero_grad()
loss.backward()
opt.step()
total_err += (yp.max(dim=1)[1] != y).sum().item()
total_loss += loss.item() * X.shape[0]
if use_tqdm:
pbar.update(X.shape[0])
total_n += X.shape[0]
if n_test is not None:
if total_n >= n_test:
break
return total_err / total_n, total_loss / total_n
def get_activation(model, activation, name):
def hook(model, input, output):
activation[name] = output.cpu().detach()
return hook
def register_layer(model, layer, activation, name):
layer.register_forward_hook(get_activation(model, activation, name))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def validate(val_loader, model, criterion, device):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (inp, target) in enumerate(val_loader):
target = target.to(device)
inp = inp.to(device)
# compute output
output = model(inp)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), inp.size(0))
top1.update(prec1.item(), inp.size(0))
top5.update(prec5.item(), inp.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def squared_l2_norm(x):
flattened = x.view(x.shape[0], -1)
return (flattened ** 2).sum(1)
def l2_norm(x):
return squared_l2_norm(x).sqrt()
def trades_loss(model,
x_natural,
y,
optimizer,
step_size=0.003,
epsilon=0.031,
perturb_steps=10,
beta=1.0,
distance='l_inf'):
# define KL-loss
criterion_kl = nn.KLDivLoss(size_average=False)
model.eval()
batch_size = len(x_natural)
# generate adversarial example
x_adv = x_natural.detach() + 0.001 * torch.randn_like(x_natural).detach()
if distance == 'l_inf':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1),
F.softmax(model(x_natural), dim=1))
grad = torch.autograd.grad(loss_kl, [x_adv])[0]
x_adv = x_adv.detach() + step_size * torch.sign(grad.detach())
x_adv = torch.min(torch.max(x_adv, x_natural - epsilon), x_natural + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif distance == 'l_2':
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1),
F.softmax(model(x_natural), dim=1))
grad = torch.autograd.grad(loss_kl, [x_adv])[0]
for idx_batch in range(batch_size):
grad_idx = grad[idx_batch]
grad_idx_norm = l2_norm(grad_idx)
grad_idx /= (grad_idx_norm + 1e-8)
x_adv[idx_batch] = x_adv[idx_batch].detach() + step_size * grad_idx
eta_x_adv = x_adv[idx_batch] - x_natural[idx_batch]
norm_eta = l2_norm(eta_x_adv)
if norm_eta > epsilon:
eta_x_adv = eta_x_adv * epsilon / l2_norm(eta_x_adv)
x_adv[idx_batch] = x_natural[idx_batch] + eta_x_adv
x_adv = torch.clamp(x_adv, 0.0, 1.0)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
model.train()
x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False)
# zero gradient
optimizer.zero_grad()
# calculate robust loss
logits = model(x_natural)
loss_natural = F.cross_entropy(logits, y)
loss_robust = (1.0 / batch_size) * criterion_kl(F.log_softmax(model(x_adv), dim=1),
F.softmax(model(x_natural), dim=1))
loss = loss_natural + beta * loss_robust
return loss
def epoch_trade(loader, model,
opt, device=None, **kwargs):
model.train()
for batch_idx, (data, target) in enumerate(loader):
data, target = data.to(device), target.to(device)
opt.zero_grad()
# calculate robust loss
loss = trades_loss(model=model,
x_natural=data,
y=target,
optimizer=opt,
**kwargs)
# step_size=args.step_size,
# epsilon=args.epsilon,
# perturb_steps=args.num_steps,
# beta=args.beta)
loss.backward()
opt.step()
return 0, 0 |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
def svhn_transform():
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
return transform_train, transform_test
def cifar_transform():
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=[32, 32], padding=4),
transforms.ToTensor(),
#transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
transform_test = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
return transform_train, transform_test
imagenet_pca = {
'eigval':
np.asarray([0.2175, 0.0188, 0.0045]),
'eigvec':
np.asarray([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
class Lighting(object):
def __init__(self,
alphastd,
eigval=imagenet_pca['eigval'],
eigvec=imagenet_pca['eigvec']):
self.alphastd = alphastd
assert eigval.shape == (3, )
assert eigvec.shape == (3, 3)
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0.:
return img
rnd = np.random.randn(3) * self.alphastd
rnd = rnd.astype('float32')
v = rnd
old_dtype = np.asarray(img).dtype
v = v * self.eigval
v = v.reshape((3, 1))
inc = np.dot(self.eigvec, v).reshape((3, ))
img = np.add(img, inc)
if old_dtype == np.uint8:
img = np.clip(img, 0, 255)
img = Image.fromarray(img.astype(old_dtype), 'RGB')
return img
def __repr__(self):
return self.__class__.__name__ + '()'
def imagenet_transform(img_size=224):
normalize = transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
jitter_param = 0.4
lighting_param = 0.1
transform_train = transforms.Compose([
transforms.RandomResizedCrop(img_size, scale=(0.25, 1)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
Lighting(lighting_param),
transforms.ToTensor(), normalize
])
transform_test = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(img_size),
transforms.ToTensor(), normalize
])
return transform_train, transform_test |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import urllib.request
import torch
import torchvision
import numpy as np
import foolbox as fb
import eagerpy as ep
import transforms
import resnet_3layer as resnet
from tqdm import tqdm
import torch.nn as nn
import argparse
num_sample_MIOL = 15
lamdaOL = 0.6
device = "cuda" if torch.cuda.is_available() else "cpu"
class LambdaWrapper(torch.nn.Module):
def __init__(self, lbd, module):
super().__init__()
self.lbd = lbd
self.module = module
def forward(self, x, *args, **kwargs):
return self.module(self.lbd(x), *args, **kwargs)
def load_classifier():
filename = 'mixup_model_IAT.ckpt'
url = f'https://github.com/wielandbrendel/robustness_workshop/releases/download/v0.0.1/{filename}'
if not os.path.isfile(filename):
print('Downloading pretrained weights.')
urllib.request.urlretrieve(url, filename)
CLASSIFIER = resnet.model_dict['resnet50']
classifier = CLASSIFIER(num_classes=10)
device = torch.device("cuda:0")
classifier = classifier.to(device)
classifier.load_state_dict(torch.load('mixup_model_IAT.ckpt'))
# transform (0, 1) to (-1, 1) value range
classifier = LambdaWrapper(
lambda x: x * 2 - 1.0,
classifier
)
classifier.eval()
return classifier
def onehot(ind):
vector = np.zeros([10])
vector[ind] = 1
return vector.astype(np.float32)
def prepare_data():
train_trans, test_trans = transforms.cifar_transform()
trainset = torchvision.datasets.CIFAR10(root='~/cifar/',
train=False,
download=True,
transform=train_trans,
target_transform=onehot)
testset = torchvision.datasets.CIFAR10(root='~/cifar/',
train=False,
download=True,
transform=test_trans,
target_transform=onehot)
# we reduce the testset for this workshop
testset.data = testset.data
dataloader_train = torch.utils.data.DataLoader(
trainset,
batch_size=100,
shuffle=True,
num_workers=1)
dataloader_test = torch.utils.data.DataLoader(
testset,
batch_size=50,
shuffle=True,
num_workers=1)
return dataloader_test, dataloader_train
def setup_pool(dataloader, num_pool=10000, n_classes=10):
mixup_pool_OL = {}
for i in range(n_classes):
mixup_pool_OL.update({i: []})
n_samples = 0
for i, data_batch in tqdm(enumerate(dataloader), leave=False):
img_batch, label_batch = data_batch
img_batch = img_batch.to(device)
if len(label_batch.shape) > 1:
_, label_indices = torch.max(label_batch.data, 1)
else:
label_indices = label_batch
for j, label_ind in enumerate(label_indices.cpu().numpy()):
mixup_pool_OL[label_ind].append(img_batch[j])
n_samples += 1
if n_samples >= num_pool:
break
return mixup_pool_OL
class CombinedModel(nn.Module):
def __init__(self, classifier, mixup_pool_OL, n_classes=10, deterministic=False):
super(CombinedModel, self).__init__()
self.classifier = classifier
self.soft_max = nn.Softmax(dim=-1)
self.mixup_pool_OL = mixup_pool_OL
self.n_classes = n_classes
self.deterministic = deterministic
self.rng = np.random.default_rng()
for i in range(n_classes):
assert i in mixup_pool_OL
def forward(self, img_batch, no_mixup=False, features_only=False,
features_and_logits=False):
pred_cle_mixup_all_OL = 0 # torch.Tensor([0.]*10)
# forward pass without PL/OL
# TODO: does this make sense if the classifier wasn't adapted to binary
# task yet?
pred_cle = self.classifier(img_batch)
if no_mixup:
return pred_cle
cle_con, predicted_cle = torch.max(self.soft_max(pred_cle.data), 1)
predicted_cle = predicted_cle.cpu().numpy()
all_features = []
all_logits = []
if self.deterministic:
self.rng = np.random.default_rng(seed=0)
# perform MI-OL
for k in range(num_sample_MIOL):
mixup_img_batch = np.empty(img_batch.shape, dtype=np.float32)
for b in range(img_batch.shape[0]):
# CLEAN
xs_cle_label = self.rng.integers(self.n_classes)
while xs_cle_label == predicted_cle[b]:
xs_cle_label = self.rng.integers(self.n_classes)
xs_cle_index = self.rng.integers(len(self.mixup_pool_OL[xs_cle_label]))
mixup_img_cle = (1 - lamdaOL) * \
self.mixup_pool_OL[xs_cle_label][xs_cle_index][0]
mixup_img_batch[b] = mixup_img_cle.cpu().detach().numpy()
mixup_img_batch = ep.from_numpy(ep.astensor(img_batch),
mixup_img_batch).raw + lamdaOL * img_batch
if features_only:
features = self.classifier(mixup_img_batch, features_only=True)
all_features.append(features)
elif features_and_logits:
features, logits = self.classifier(mixup_img_batch, features_and_logits=True)
all_features.append(features)
all_logits.append(logits)
else:
pred_cle_mixup = self.classifier(mixup_img_batch)
pred_cle_mixup_all_OL = pred_cle_mixup_all_OL + self.soft_max(
pred_cle_mixup)
if features_only:
all_features = torch.stack(all_features, 1)
return all_features
elif features_and_logits:
all_features = torch.stack(all_features, 1)
all_logits = torch.stack(all_logits, 1)
return all_features, all_logits
else:
pred_cle_mixup_all_OL = pred_cle_mixup_all_OL / num_sample_MIOL
return pred_cle_mixup_all_OL
def adversarial_evaluate(model, dataloader, attack_fn, attack_mode, epsilon,
eval_no_mixup=False, verbose=True, n_samples=-1, kwargs={}):
all_attack_successful = []
all_x_adv = []
all_logits_adv = []
if verbose:
pbar = tqdm(dataloader)
else:
pbar = dataloader
if attack_mode == "adaptive-pgd":
attacked_model = fb.models.PyTorchModel(model, bounds=(0, 1), device=device)
elif attack_mode == "pgd":
attacked_model = fb.models.PyTorchModel(model.classifier, bounds=(0, 1),
device=device)
else:
raise ValueError()
total_samples = 0
correct_classified = 0
for images, labels in pbar:
images = images.to(device)
labels = labels.to(device)
if len(labels.shape) == 2:
labels = labels.argmax(1)
N = len(images)
_, adv_clipped, _ = attack_fn(attacked_model, images, labels,
epsilons=epsilon)
with torch.no_grad():
all_x_adv.append(adv_clipped.detach().cpu().numpy())
logits_adv = model(adv_clipped, no_mixup=eval_no_mixup)
all_logits_adv.append(logits_adv.cpu().numpy())
attack_successful = (
logits_adv.argmax(-1) != labels).detach().cpu().numpy()
all_attack_successful.append(attack_successful)
total_samples += N
correct_classified += (N - attack_successful.sum())
if verbose:
pbar.set_description(
f'Model accuracy on adversarial examples: {correct_classified / total_samples:.3f}')
if n_samples != -1 and total_samples > n_samples:
break
all_attack_successful = np.concatenate(all_attack_successful, 0)
all_x_adv = np.concatenate(all_x_adv, 0)
all_logits_adv = np.concatenate(all_logits_adv, 0)
if verbose:
print(
f'Model accuracy on adversarial examples: {correct_classified / total_samples:.3f}')
return all_attack_successful, (torch.tensor(all_x_adv, device="cpu"),
torch.tensor(all_logits_adv, device=device))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--attack", choices=("pgd", "adaptive-pgd"),
default="pgd")
parser.add_argument("--deterministic", action="store_true")
parser.add_argument("--epsilon", type=int, default=8)
parser.add_argument("--pgd-steps", type=int, default=50)
parser.add_argument("--n-samples", type=int, default=-1)
args = parser.parse_args()
classifier = load_classifier()
dataloader_test, dataloader_train = prepare_data()
mixup_pool_OL = setup_pool(dataloader_test)
combined_classifier = CombinedModel(classifier, mixup_pool_OL, args.deterministic)
combined_classifier.eval()
attack_mode = args.attack
epsilon = args.epsilon / 255
attack = fb.attacks.LinfPGD(steps=args.pgd_steps, abs_stepsize=1 / 255)
adversarial_evaluate(combined_classifier, dataloader_test, attack,
attack_mode,
epsilon, verbose=True, n_samples=args.n_samples)
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
__all__ = [
'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d'
]
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError(
'BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
self.final_feature_dimen = 256
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(
replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3,
self.inplanes,
kernel_size=5,
stride=1,
padding=2,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
# self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc_dense = nn.Linear(256, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x, features_only=False, features_and_logits=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
# x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.size(0), -1)
if features_only:
return x
logits = self.fc_dense(x)
if features_and_logits:
return x, logits
return logits
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [3, 3, 3], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [5, 5, 5], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [5, 5, 5], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [11, 11, 11], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [16, 18, 16], pretrained, progress,
**kwargs)
model_dict = {
'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'resnet101': resnet101,
'resnet152': resnet152,
} |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import foolbox as fb
import torch
from torch.utils.data import DataLoader, TensorDataset
from functools import partial
from active_tests.decision_boundary_binarization import LogitRescalingType
from adversarial_evaluation import load_classifier, setup_pool, \
prepare_data, adversarial_evaluate, CombinedModel, device, LambdaWrapper
from active_tests.decision_boundary_binarization import \
interior_boundary_discrimination_attack, \
format_result, _train_logistic_regression_classifier
from argparse_utils import DecisionBoundaryBinarizationSettings
def train_classifier(
n_features: int,
train_loader: DataLoader,
raw_train_loader: DataLoader,
logits: torch.Tensor,
device: str,
rescale_logits: LogitRescalingType,
base_classifier: torch.nn.Module,
deterministic: bool) -> torch.nn.Module:
data_x, data_y = train_loader.dataset.tensors
data_y = data_y.repeat_interleave(data_x.shape[1])
data_x = data_x.view(-1, data_x.shape[-1])
train_loader = DataLoader(TensorDataset(data_x, data_y),
batch_size=train_loader.batch_size)
binary_classifier = _train_logistic_regression_classifier(
n_features, train_loader, logits, optimizer="sklearn", lr=10000,
device=device, rescale_logits=rescale_logits,
solution_goodness="good")
mixup_pool_OL = setup_pool(raw_train_loader, n_classes=2)
classifier = LambdaWrapper(
lambda x, **kwargs: base_classifier(x, features_only=True, **kwargs),
binary_classifier)
return CombinedModel(classifier, mixup_pool_OL, n_classes=2, deterministic=deterministic).eval()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--attack", choices=("pgd", "adaptive-pgd"),
default="pgd")
parser.add_argument("--deterministic", action="store_true")
parser.add_argument("--epsilon", type=int, default=8)
parser.add_argument("--pgd-steps", type=int, default=50)
parser.add_argument("--n-samples", type=int, default=-1)
parser.add_argument("--n-boundary-points", type=int, default=1)
parser.add_argument("--n-inner-points", type=int, default=999)
parser.add_argument("--sample-from-corners", action="store_true")
args = parser.parse_args()
classifier = load_classifier()
dataloader_test, dataloader_train = prepare_data()
mixup_pool_OL = setup_pool(dataloader_test)
combined_classifier = CombinedModel(classifier, mixup_pool_OL, deterministic=args.deterministic)
combined_classifier.eval()
attack_mode = args.attack
epsilon = args.epsilon / 255
attack = fb.attacks.LinfPGD(steps=args.pgd_steps, abs_stepsize=1 / 255)
def eval_model(m, l, kwargs):
if "reference_points_x" in kwargs:
far_off_reference_ds = torch.utils.data.TensorDataset(kwargs["reference_points_x"],
kwargs["reference_points_y"])
far_off_reference_dl = torch.utils.data.DataLoader(far_off_reference_ds, batch_size=4096)
new_mixup_pool_OL = setup_pool(far_off_reference_dl, n_classes=2)
for k in new_mixup_pool_OL:
if len(new_mixup_pool_OL[k]) > 0:
m.mixup_pool_OL[k] = new_mixup_pool_OL[k]
return adversarial_evaluate(m, l, attack, attack_mode,
epsilon, verbose=False)
scores_logit_differences_and_validation_accuracies = \
interior_boundary_discrimination_attack(
combined_classifier,
dataloader_test,
attack_fn=eval_model,
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=args.epsilon / 255.0,
norm="linf",
lr=45000,
n_boundary_points=args.n_boundary_points,
n_inner_points=args.n_inner_points,
adversarial_attack_settings=None,
optimizer="sklearn",
n_far_off_boundary_points=0
),
n_samples=args.n_samples,
batch_size=4096,
device=device,
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
rescale_logits="adaptive",
train_classifier_fn=partial(train_classifier, base_classifier=classifier,
deterministic=args.deterministic),
n_inference_repetitions_boundary=5,
n_inference_repetitions_inner=1,
relative_inner_boundary_gap=0.05,
decision_boundary_closeness=0.999,
far_off_distance=3,
sample_training_data_from_corners=args.sample_from_corners
)
print(format_result(scores_logit_differences_and_validation_accuracies,
args.n_samples))
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Modified from https://github.com/pytorch/vision.git
'''
import math
import torch.nn as nn
import torch.nn.init as init
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19', 'vgg11_nd', 'vgg11_nd_s', 'vgg13_nd', 'vgg13_nd_s', 'vgg16_nd', 'vgg16_nd_s', 'vgg19_nd', 'vgg19_nd_s',
'vgg11_nd_ss', 'vgg13_nd_ss', 'vgg16_nd_ss', 'vgg19_nd_ss',
]
class VGG(nn.Module):
'''
VGG model
'''
def __init__(self, features, dropout=True, small=False, supersmall=False):
super(VGG, self).__init__()
self.features = features
cls_layers = []
if dropout or supersmall:
cls_layers.append(nn.Dropout())
if not (small or supersmall):
cls_layers.append(nn.Linear(512, 512))
cls_layers.append(nn.ReLU())
if dropout:
cls_layers.append(nn.Dropout())
if not supersmall:
cls_layers.append(nn.Linear(512, 512))
cls_layers.append(nn.ReLU())
cls_layers.append(nn.Linear(512, 10))
self.classifier = nn.Sequential(*cls_layers)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]
else:
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
def vgg11():
"""VGG 11-layer model (configuration "A")"""
return VGG(make_layers(cfg['A']))
def vgg11_nd():
"""VGG 11-layer model (configuration "A")"""
return VGG(make_layers(cfg['A']), dropout=False)
def vgg11_nd_s():
"""VGG 11-layer model (configuration "A")"""
return VGG(make_layers(cfg['A']), dropout=False, small=True)
def vgg11_nd_ss():
"""VGG 11-layer model (configuration "A")"""
return VGG(make_layers(cfg['A']), dropout=False, small=True, supersmall=True)
def vgg11_bn():
"""VGG 11-layer model (configuration "A") with batch normalization"""
return VGG(make_layers(cfg['A'], batch_norm=True))
def vgg13():
"""VGG 13-layer model (configuration "B")"""
return VGG(make_layers(cfg['B']))
def vgg13_nd():
"""VGG 13-layer model (configuration "B")"""
return VGG(make_layers(cfg['B']), dropout=False)
def vgg13_nd_s():
"""VGG 13-layer model (configuration "B")"""
return VGG(make_layers(cfg['B']), dropout=False, small=True)
def vgg13_nd_ss():
"""VGG 13-layer model (configuration "B")"""
return VGG(make_layers(cfg['B']), dropout=False, small=True, supersmall=True)
def vgg13_bn():
"""VGG 13-layer model (configuration "B") with batch normalization"""
return VGG(make_layers(cfg['B'], batch_norm=True))
def vgg16():
"""VGG 16-layer model (configuration "D")"""
return VGG(make_layers(cfg['D']))
def vgg16_nd():
"""VGG 16-layer model (configuration "D")"""
return VGG(make_layers(cfg['D']), dropout=False)
def vgg16_nd_s():
"""VGG 16-layer model (configuration "D")"""
return VGG(make_layers(cfg['D']), dropout=False, small=True)
def vgg16_nd_ss():
"""VGG 16-layer model (configuration "D")"""
return VGG(make_layers(cfg['D']), dropout=False, small=True, supersmall=True)
def vgg16_bn():
"""VGG 16-layer model (configuration "D") with batch normalization"""
return VGG(make_layers(cfg['D'], batch_norm=True))
def vgg19():
"""VGG 19-layer model (configuration "E")"""
return VGG(make_layers(cfg['E']))
def vgg19_nd():
"""VGG 19-layer model (configuration "E")"""
return VGG(make_layers(cfg['E']), dropout=False)
def vgg19_nd_s():
"""VGG 19-layer model (configuration "E")"""
return VGG(make_layers(cfg['E']), dropout=False, small=True)
def vgg19_nd_ss():
"""VGG 19-layer model (configuration "E")"""
return VGG(make_layers(cfg['E']), dropout=False, small=True, supersmall=True)
def vgg19_bn():
"""VGG 19-layer model (configuration 'E') with batch normalization"""
return VGG(make_layers(cfg['E'], batch_norm=True))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downloads a model, computes its SHA256 hash and unzips it
at the proper location."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import zipfile
import hashlib
if len(sys.argv) == 1 or sys.argv[1] not in ['natural',
'adv_trained',
'secret']:
print('Usage: python fetch_Madry_ResNet.py [natural, adv_trained]')
sys.exit(1)
if sys.argv[1] == 'natural':
url = 'https://www.dropbox.com/s/cgzd5odqoojvxzk/natural.zip?dl=1'
elif sys.argv[1] == 'adv_trained':
url = 'https://www.dropbox.com/s/g4b6ntrp8zrudbz/adv_trained.zip?dl=1'
else: # fetch secret model
url = 'https://www.dropbox.com/s/ywc0hg8lr5ba8zd/secret.zip?dl=1'
fname = url.split('/')[-1].split('?')[0] # get the name of the file
# model download
print('Downloading models')
if sys.version_info >= (3,):
import urllib.request
urllib.request.urlretrieve(url, fname)
else:
import urllib
urllib.urlretrieve(url, fname)
# computing model hash
sha256 = hashlib.sha256()
with open(fname, 'rb') as f:
data = f.read()
sha256.update(data)
print('SHA256 hash: {}'.format(sha256.hexdigest()))
# extracting model
print('Extracting model')
with zipfile.ZipFile(fname, 'r') as model_zip:
model_zip.extractall()
print('Extracted model in {}'.format(model_zip.namelist()[0]))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch as th
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
model_urls = {
'cifar10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth',
'cifar10_tiny': os.path.expanduser('~/models/advhyp/cifar10_tiny/cifar10_tiny-38058c52.pth'),
'cifar10_tinyb': os.path.expanduser('~/models/advhyp/cifar10_tinyb/cifar10_tinyb-7ab86c47.pth'),
'carlini': os.path.expanduser('~/models/advhyp/carlini/carlini-caa52d4e.pth'),
'cifar10_tinyb_adv': os.path.expanduser('~/models/advhyp/cifar10_tinyb_adv/cifar10_tinyb_adv-ac4936dc.pth'),
'cifar100': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar100-3a55a987.pth',
}
class CIFAR(nn.Module):
def __init__(self, features, n_channel, num_classes):
super(CIFAR, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, num_classes)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Carlini(nn.Module):
def __init__(self, features, n_channel):
super(Carlini, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, 256),
nn.ReLU(),
nn.Dropout(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, 10)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = out_channels
return nn.Sequential(*layers)
def cifar10_tiny(n_channel, pretrained=False, map_location=None, padding=1, trained_adv=False):
if padding == 1:
cfg = [(n_channel, padding), 'M', (n_channel, padding), 'M', (2*n_channel, padding), 'M', (2*n_channel, 0), 'M']
elif padding == 0:
cfg = [(n_channel, padding), (n_channel, padding), 'M', (2*n_channel, padding), 'M', (2*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=False)
model = CIFAR(layers, n_channel=2*n_channel if padding == 1 else 4*2*n_channel, num_classes=10)
if pretrained or trained_adv:
if padding == 1:
state_dict = th.load(model_urls['cifar10_tiny'], map_location=map_location)
elif padding == 0:
if trained_adv and pretrained:
state_dict = th.load(model_urls['cifar10_tinyb_adv'], map_location=map_location)
else:
state_dict = th.load(model_urls['cifar10_tinyb'], map_location=map_location)
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
def cifar10(n_channel, pretrained=False, map_location=None, trained_adv=False):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = CIFAR(layers, n_channel=8*n_channel, num_classes=10)
if pretrained or trained_adv:
if trained_adv and pretrained:
m = th.load(model_urls['cifar10_adv'], map_location=map_location)
else:
m = model_zoo.load_url(model_urls['cifar10'], map_location=map_location)
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
def carlini(pretrained=False, map_location=None, trained_adv=False):
cfg = [(64, 0), (64, 0), 'M', (128, 0), (128, 0), 'M']
layers = make_layers(cfg, batch_norm=False)
model = Carlini(layers, n_channel=128*5*5)
if pretrained or trained_adv:
if trained_adv and pretrained:
state_dict= th.load(model_urls['carlini_adv'], map_location=map_location)
else:
state_dict = th.load(model_urls['carlini'], map_location=map_location)
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
def cifar100(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = CIFAR(layers, n_channel=8*n_channel, num_classes=100)
if pretrained is not None:
m = model_zoo.load_url(model_urls['cifar100'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import functools
import cleverhans.model
import torch
from cleverhans import utils_tf
from cleverhans.attacks import Attack
import cleverhans.attacks
from cleverhans.utils_tf import clip_eta
# disable tf logging
# some of these might have to be commented out to use verbose=True in the
# adaptive attack
import warnings
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import os
import math
import numpy as np
import tensorflow as tf
from cleverhans.attacks import MadryEtAl
from cleverhans.dataset import CIFAR10
from cleverhans.model_zoo.madry_lab_challenges.cifar10_model import \
make_wresnet as ResNet
from cleverhans.utils_tf import initialize_uninitialized_global_variables
import tf_robustify
from cleverhans.augmentation import random_horizontal_flip, random_shift
from logit_matching_attack import \
ProjectedGradientDescentWithDetectorLogitMatching
def init_defense(sess, x, preds, batch_size, multi_noise=False):
data = CIFAR10()
if multi_noise:
defense_data_path = os.path.join("checkpoints/tf_madry_wrn_vanilla",
"defense_alignment_data_multi_noise")
else:
defense_data_path = os.path.join("checkpoints/tf_madry_wrn_vanilla",
"defense_alignment_data")
if os.path.exists(defense_data_path):
print("Trying to load defense statistics")
load_alignments_dir = defense_data_path
save_alignments_dir = None
else:
print("Defense statistics not found; generating and saving them now.")
load_alignments_dir = None
save_alignments_dir = defense_data_path
dataset_size = data.x_train.shape[0]
dataset_train = data.to_tensorflow()[0]
dataset_train = dataset_train.map(
lambda x, y: (random_shift(random_horizontal_flip(x)), y), 4)
dataset_train = dataset_train.batch(batch_size)
dataset_train = dataset_train.prefetch(16)
x_train, y_train = data.get_set('train')
x_train *= 255
nb_classes = y_train.shape[1]
n_collect = 10000 # TODO: for debugging set to 100, otherwise to 10000
p_ratio_cutoff = .999
just_detect = True
clip_alignments = True
fit_classifier = True
noise_eps = 'n30.0'
num_noise_samples = 256
if multi_noise:
noises = 'n0.003,s0.003,u0.003,n0.005,s0.005,u0.005,s0.008,n0.008,u0.008'.split(
',')
noise_eps_detect = []
for n in noises:
new_noise = n[0] + str(float(n[1:]) * 255)
noise_eps_detect.append(new_noise)
else:
noise_eps_detect = 'n30.0'
# these attack parameters are just for initializing the defense
eps = 8.0
pgd_params = {
'eps': eps,
'eps_iter': (eps / 5),
'nb_iter': 10,
'clip_min': 0,
'clip_max': 255
}
logits_op = preds.op
while logits_op.type != 'MatMul':
logits_op = logits_op.inputs[0].op
latent_x_tensor, weights = logits_op.inputs
logits_tensor = preds
predictor = tf_robustify.collect_statistics(
x_train[:n_collect], y_train[:n_collect], x, sess,
logits_tensor=logits_tensor,
latent_x_tensor=latent_x_tensor,
weights=weights,
nb_classes=nb_classes,
p_ratio_cutoff=p_ratio_cutoff,
noise_eps=noise_eps,
noise_eps_detect=noise_eps_detect,
pgd_eps=pgd_params['eps'],
pgd_lr=pgd_params['eps_iter'] / pgd_params['eps'],
pgd_iters=pgd_params['nb_iter'],
save_alignments_dir=save_alignments_dir,
load_alignments_dir=load_alignments_dir,
clip_min=pgd_params['clip_min'],
clip_max=pgd_params['clip_max'],
batch_size=batch_size,
num_noise_samples=num_noise_samples,
debug_dict=None,
debug=False,
targeted=False,
pgd_train=None,
fit_classifier=fit_classifier,
clip_alignments=clip_alignments,
just_detect=just_detect,
)
next(predictor)
return predictor
def do_eval(sess, x, x_adv, logits, preds, x_set,
y_set, predictor, batch_size, attack_kwargs={}):
n_batches = math.ceil(x_set.shape[0] / batch_size)
# first generative adversarial examples
x_adv_set, logits_set, p_set = [], [], []
for b in range(n_batches):
values = sess.run((x_adv, logits, preds),
{**attack_kwargs,
x: x_set[b * batch_size:(b + 1) * batch_size]})
x_adv_set.append(values[0])
logits_set.append(values[1])
p_set.append(values[2])
x_adv_set = np.concatenate(x_adv_set)
logits_set = np.concatenate(logits_set)
p_set = np.concatenate(p_set)
del x_set
# now run test
p_set, p_det = np.concatenate(
[predictor.send(x_adv_set[b * batch_size:(b + 1) * batch_size]) for b in
range(n_batches)]).T
correctly_classified = np.equal(p_set,
y_set[:len(p_set)].argmax(-1))
adversarial_example_detected = np.equal(p_det, True)
# model_fooled = np.logical_or(
# np.logical_and(~correctly_classified, ~adversarial_example_detected), # fooled classifier & evaded detector
# np.logical_and(correctly_classified, adversarial_example_detected) # did not fool classifier but triggered detector (false positive)
# )
model_fooled = np.logical_and(~correctly_classified,
~adversarial_example_detected) # fooled classifier & evaded detector
correctly_classified_not_detected = np.logical_and(correctly_classified,
~adversarial_example_detected)
# print(len(adversarial_example_detected), np.sum(~correctly_classified),
# np.sum(adversarial_example_detected))
# asr = model_fooled.mean()
# acc = correctly_classified.mean()
# print('Accuracy of base model: %0.4f' % acc)
# print('ASR (w/ detection defense): %0.4f' % asr)
return model_fooled, correctly_classified, adversarial_example_detected
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--multi-noise', action='store_true')
parser.add_argument("--n-samples", default=512, type=int)
parser.add_argument("--batch-size", default=512, type=int)
parser.add_argument("--epsilon", default=8, type=int)
parser.add_argument("--attack", choices=("clean", "original", "adaptive",
"adaptive-eot"),
default="original")
args = parser.parse_args()
# load data
data = CIFAR10()
x_test, y_test = data.get_set('test')
sess = tf.Session()
img_rows, img_cols, nchannels = x_test.shape[1:4]
nb_classes = y_test.shape[1]
# define model & restore weights
# Define input TF placeholder
x_placeholder = tf.placeholder(
tf.float32, shape=(None, img_rows, img_cols, nchannels))
# needed for adaptive attack
x_reference_placeholder = tf.placeholder(
tf.float32, shape=(None, img_rows, img_cols, nchannels))
cifar_model = ResNet(scope='ResNet')
ckpt = tf.train.get_checkpoint_state("checkpoints/tf_madry_wrn_vanilla")
saver = tf.train.Saver(var_list=dict(
(v.name.split('/', 1)[1].split(':')[0], v) for v in
tf.global_variables()))
saver.restore(sess, ckpt.model_checkpoint_path)
initialize_uninitialized_global_variables(sess)
logits = cifar_model.get_logits(x_placeholder)
# setup defense
# if multi_noise = True, instantiate the defense with 9 types of noise.
# if multi_noise = False, instantiate the defense with a single type of high-magnitude noise.
print("multi noise:", args.multi_noise)
defense_predictor = init_defense(sess, x_placeholder, logits, args.batch_size,
multi_noise=args.multi_noise)
# prepare dataloader
random_indices = list(range(len(x_test)))
np.random.shuffle(random_indices)
x_batch = []
y_batch = []
for j in range(args.n_samples):
x_, y_ = x_test[random_indices[j]], y_test[random_indices[j]]
x_batch.append(x_)
y_batch.append(y_)
x_batch = np.array(x_batch).transpose((0, 3, 1, 2))
y_batch = np.array(y_batch)
from utils import build_dataloader_from_arrays
test_loader = build_dataloader_from_arrays(x_batch, y_batch,
batch_size=args.batch_size)
original_pgd_params = {
# ord: ,
'eps': args.epsilon,
'eps_iter': (args.epsilon / 5.0),
'nb_iter': 10,
'clip_min': 0,
'clip_max': 255
}
adaptive_pgd_params = {
# ord: ,
'eps': args.epsilon,
'eps_iter': args.epsilon / 100.0,
'nb_iter': 100,
'clip_min': 0,
'clip_max': 255,
'x_reference': x_reference_placeholder,
}
if args.attack == "clean":
adv_x = x_placeholder
else:
if args.attack == "original":
pgd = MadryEtAl(cifar_model, sess=sess)
print("Using MadryEtAl attack")
elif args.attack == "adaptive":
pgd = ProjectedGradientDescentWithDetectorLogitMatching(
cifar_model,
lambda x: cifar_model.get_logits(x),
sess=sess,
verbose=False)
print("Using logit-matching attack")
elif args.attack == "adaptive-eot":
pgd = ProjectedGradientDescentWithDetectorLogitMatching(
cifar_model,
lambda x: cifar_model.get_logits(x),
sess=sess,
eot_ensemble_size=20,
verbose=False)
print("Using logit-matching attack w/ EOT")
else:
raise ValueError("invalid attack")
pgd_params = original_pgd_params if args.attack == "original" else adaptive_pgd_params
adv_x = tf.stop_gradient(pgd.generate(x_placeholder, **pgd_params))
adv_logits = cifar_model.get_logits(adv_x)
adv_predictions = tf.argmax(adv_logits, 1)
def run_eval(l):
# should_be_rejected = ~verify_valid_input_data(kwargs["reference_points_x"])
# print("should_be_rejected", should_be_rejected)
is_advs = []
correctly_classifieds = []
adv_detecteds = []
model_fooleds = []
for x, y in l:
x = x.numpy().transpose((0, 2, 3, 1)) * 255.0
y = y.numpy()
# pick targets. We'll keep it simple and just target the logits
# of the first clean example, except for inputs that have the
# same class as that example. For those, we target the logits
# of the first clean example w/ different class.
y_cls = np.argmax(y, -1)
reference_x = x.copy()
reference_x[:] = x[0]
# get first element that has different class than first sample
idx = np.argmax(y_cls != y_cls[0])
reference_x[y_cls == y_cls[0]] = x[idx]
print(x.shape, reference_x.shape)
model_fooled_np, correctly_classified_np, adv_detected_np = do_eval(
sess=sess, x=x_placeholder, x_adv=adv_x,
batch_size=args.batch_size,
logits=adv_logits,
preds=adv_predictions, x_set=x, y_set=y,
predictor=defense_predictor,
attack_kwargs={x_reference_placeholder: reference_x}
)
# print(is_adv_np, y, logits_np)
adv_detecteds.append(adv_detected_np)
correctly_classifieds.append(correctly_classified_np)
model_fooleds.append(model_fooled_np)
adv_detecteds = np.concatenate(adv_detecteds)
correctly_classifieds = np.concatenate(correctly_classifieds)
model_fooleds = np.concatenate(model_fooleds)
print("ASR:", np.mean(model_fooleds))
print("correctly_classifieds", np.mean(correctly_classifieds),
"adversarial detected", np.mean(adv_detecteds))
run_eval(test_loader)
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
based on cleverhans' cifar10_tutorial_tf except that it uses Madry's wide-ResNet
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
import time
from cleverhans.attacks import MadryEtAl, CarliniWagnerL2
from cleverhans.augmentation import random_horizontal_flip, random_shift
from cleverhans.dataset import CIFAR10
from cleverhans.loss import CrossEntropy
from cleverhans.model_zoo.all_convolutional import ModelAllConvolutional
from cleverhans.model_zoo.madry_lab_challenges.cifar10_model import make_wresnet as ResNet
from cleverhans.utils_tf import initialize_uninitialized_global_variables
from cleverhans.train import train
from cleverhans.utils import AccuracyReport, set_log_level
from cleverhans.utils_tf import tf_model_load, model_eval
from cleverhans.evaluation import batch_eval
import math
import tqdm
import os
import tf_robustify
from tensorboardX import SummaryWriter
import pickle
FLAGS = flags.FLAGS
NB_EPOCHS = 6
BATCH_SIZE = 128
LEARNING_RATE = 0.001
CLEAN_TRAIN = True
BACKPROP_THROUGH_ATTACK = False
NB_FILTERS = 64
ARCHITECTURE = 'ResNet'
LOAD_MODEL = True
os.makedirs('logs', exist_ok=True)
swriter = SummaryWriter('logs')
def cifar10_tutorial(
train_start=0,
train_end=60000,
test_start=0,
test_end=10000,
nb_epochs=NB_EPOCHS,
batch_size=BATCH_SIZE,
architecture=ARCHITECTURE,
load_model=LOAD_MODEL,
ckpt_dir='None',
learning_rate=LEARNING_RATE,
clean_train=CLEAN_TRAIN,
backprop_through_attack=BACKPROP_THROUGH_ATTACK,
nb_filters=NB_FILTERS,
num_threads=None,
label_smoothing=0.):
"""
CIFAR10 cleverhans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param clean_train: perform normal training on clean examples only
before performing adversarial training.
:param backprop_through_attack: If True, backprop through adversarial
example construction process during
adversarial training.
:param label_smoothing: float, amount of label smoothing for cross entropy
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(int(time.time() * 1000) % 2**31)
np.random.seed(int(time.time() * 1001) % 2**31)
# Set logging level to see debug information
set_log_level(logging.DEBUG)
# Create TF session
if num_threads:
config_args = dict(intra_op_parallelism_threads=1)
else:
config_args = {}
sess = tf.Session(config=tf.ConfigProto(**config_args))
# Get CIFAR10 data
data = CIFAR10(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
dataset_size = data.x_train.shape[0]
dataset_train = data.to_tensorflow()[0]
dataset_train = dataset_train.map(
lambda x, y: (random_shift(random_horizontal_flip(x)), y), 4)
dataset_train = dataset_train.batch(batch_size)
dataset_train = dataset_train.prefetch(16)
x_train, y_train = data.get_set('train')
pgd_train = None
if FLAGS.load_pgd_train_samples:
pgd_path = os.path.expanduser('~/data/advhyp/{}/samples'.format(FLAGS.load_pgd_train_samples))
x_train = np.load(os.path.join(pgd_path, 'train_clean.npy'))
y_train = np.load(os.path.join(pgd_path, 'train_y.npy'))
pgd_train = np.load(os.path.join(pgd_path, 'train_pgd.npy'))
if x_train.shape[1] == 3:
x_train = x_train.transpose((0, 2, 3, 1))
pgd_train = pgd_train.transpose((0, 2, 3, 1))
if len(y_train.shape) == 1:
y_tmp = np.zeros((len(y_train), np.max(y_train)+1), y_train.dtype)
y_tmp[np.arange(len(y_tmp)), y_train] = 1.
y_train = y_tmp
x_test, y_test = data.get_set('test')
pgd_test = None
if FLAGS.load_pgd_test_samples:
pgd_path = os.path.expanduser('~/data/advhyp/{}/samples'.format(FLAGS.load_pgd_test_samples))
x_test = np.load(os.path.join(pgd_path, 'test_clean.npy'))
y_test = np.load(os.path.join(pgd_path, 'test_y.npy'))
pgd_test = np.load(os.path.join(pgd_path, 'test_pgd.npy'))
if x_test.shape[1] == 3:
x_test = x_test.transpose((0, 2, 3, 1))
pgd_test = pgd_test.transpose((0, 2, 3, 1))
if len(y_test.shape) == 1:
y_tmp = np.zeros((len(y_test), np.max(y_test)+1), y_test.dtype)
y_tmp[np.arange(len(y_tmp)), y_test] = 1.
y_test = y_tmp
train_idcs = np.arange(len(x_train))
np.random.shuffle(train_idcs)
x_train, y_train = x_train[train_idcs], y_train[train_idcs]
if pgd_train is not None:
pgd_train = pgd_train[train_idcs]
test_idcs = np.arange(len(x_test))[:FLAGS.test_size]
np.random.shuffle(test_idcs)
x_test, y_test = x_test[test_idcs], y_test[test_idcs]
if pgd_test is not None:
pgd_test = pgd_test[test_idcs]
# Use Image Parameters
img_rows, img_cols, nchannels = x_test.shape[1:4]
nb_classes = y_test.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate
}
eval_params = {'batch_size': batch_size}
pgd_params = {
# ord: ,
'eps': FLAGS.eps,
'eps_iter': (FLAGS.eps / 5),
'nb_iter': 10,
'clip_min': 0,
'clip_max': 255
}
cw_params = {
'binary_search_steps': FLAGS.cw_search_steps,
'max_iterations': FLAGS.cw_steps, #1000
'abort_early': True,
'learning_rate': FLAGS.cw_lr,
'batch_size': batch_size,
'confidence': 0,
'initial_const': FLAGS.cw_c,
'clip_min': 0,
'clip_max': 255
}
# Madry dosen't divide by 255
x_train *= 255
x_test *= 255
if pgd_train is not None:
pgd_train *= 255
if pgd_test is not None:
pgd_test *= 255
print('x_train amin={} amax={}'.format(np.amin(x_train), np.amax(x_train)))
print('x_test amin={} amax={}'.format(np.amin(x_test), np.amax(x_test)))
print('clip_min : {}, clip_max : {} >> CHECK WITH WHICH VALUES THE CLASSIFIER WAS PRETRAINED !!! <<'
.format(pgd_params['clip_min'], pgd_params['clip_max']))
rng = np.random.RandomState() # [2017, 8, 30]
debug_dict = dict() if FLAGS.save_debug_dict else None
def do_eval(preds, x_set, y_set, report_key, is_adv=None, predictor=None, x_adv=None):
if predictor is None:
acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
else:
do_eval(preds, x_set, y_set, report_key, is_adv=is_adv)
if x_adv is not None:
x_set_adv, = batch_eval(sess, [x], [x_adv], [x_set], batch_size=batch_size)
assert x_set.shape == x_set_adv.shape
x_set = x_set_adv
n_batches = math.ceil(x_set.shape[0] / batch_size)
p_set, p_det = np.concatenate([predictor.send(x_set[b*batch_size:(b+1)*batch_size]) for b in tqdm.trange(n_batches)]).T
acc = np.equal(p_set, y_set[:len(p_set)].argmax(-1)).mean()
# if is_adv:
# import IPython ; IPython.embed() ; exit(1)
if FLAGS.save_debug_dict:
debug_dict['x_set'] = x_set
debug_dict['y_set'] = y_set
ddfn = 'logs/debug_dict_{}.pkl'.format('adv' if is_adv else 'clean')
if not os.path.exists(ddfn):
with open(ddfn, 'wb') as f:
pickle.dump(debug_dict, f)
debug_dict.clear()
if is_adv is None:
report_text = None
elif is_adv:
report_text = 'adversarial'
else:
report_text = 'legitimate'
if report_text:
print('Test accuracy on %s examples %s: %0.4f' % (report_text, 'with correction' if predictor is not None else 'without correction', acc))
if is_adv is not None:
label = 'test_acc_{}_{}'.format(report_text, 'corrected' if predictor else 'uncorrected')
swriter.add_scalar(label, acc)
if predictor is not None:
detect = np.equal(p_det, is_adv).mean()
label = 'test_det_{}_{}'.format(report_text, 'corrected' if predictor else 'uncorrected')
print(label, detect)
swriter.add_scalar(label, detect)
label = 'test_dac_{}_{}'.format(report_text, 'corrected' if predictor else 'uncorrected')
swriter.add_scalar(label, np.equal(p_set, y_set[:len(p_set)].argmax(-1))[np.equal(p_det, is_adv)].mean())
return acc
if clean_train:
if architecture == 'ConvNet':
model = ModelAllConvolutional('model1', nb_classes, nb_filters,
input_shape=[32, 32, 3])
elif architecture == 'ResNet':
model = ResNet(scope='ResNet')
else:
raise Exception('Specify valid classifier architecture!')
preds = model.get_logits(x)
loss = CrossEntropy(model, smoothing=label_smoothing)
if load_model:
model_name = 'naturally_trained'
if FLAGS.load_adv_trained:
model_name = 'adv_trained'
if ckpt_dir is not 'None':
ckpt = tf.train.get_checkpoint_state(os.path.join(
os.path.expanduser(ckpt_dir), model_name))
else:
ckpt = tf.train.get_checkpoint_state(
'./models/' + model_name)
ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
saver = tf.train.Saver(var_list=dict((v.name.split('/', 1)[1].split(':')[0], v) for v in tf.global_variables()))
saver.restore(sess, ckpt_path)
print('\nMODEL SUCCESSFULLY LOADED from : {}'.format(ckpt_path))
initialize_uninitialized_global_variables(sess)
else:
def evaluate():
do_eval(preds, x_test, y_test, 'clean_train_clean_eval', False)
train(sess, loss, None, None,
dataset_train=dataset_train, dataset_size=dataset_size,
evaluate=evaluate, args=train_params, rng=rng,
var_list=model.get_params())
logits_op = preds.op
while logits_op.type != 'MatMul':
logits_op = logits_op.inputs[0].op
latent_x_tensor, weights = logits_op.inputs
logits_tensor = preds
nb_classes = weights.shape[-1].value
if not FLAGS.save_pgd_samples:
noise_eps = FLAGS.noise_eps.split(',')
if FLAGS.noise_eps_detect is None:
FLAGS.noise_eps_detect = FLAGS.noise_eps
noise_eps_detect = FLAGS.noise_eps_detect.split(',')
if pgd_train is not None:
pgd_train = pgd_train[:FLAGS.n_collect]
if not FLAGS.passthrough:
predictor = tf_robustify.collect_statistics(x_train[:FLAGS.n_collect], y_train[:FLAGS.n_collect], x, sess, logits_tensor=logits_tensor, latent_x_tensor=latent_x_tensor, weights=weights, nb_classes=nb_classes, p_ratio_cutoff=FLAGS.p_ratio_cutoff, noise_eps=noise_eps, noise_eps_detect=noise_eps_detect, pgd_eps=pgd_params['eps'], pgd_lr=pgd_params['eps_iter'] / pgd_params['eps'], pgd_iters=pgd_params['nb_iter'], save_alignments_dir='logs/stats' if FLAGS.save_alignments else None, load_alignments_dir=os.path.expanduser('~/data/advhyp/madry/stats') if FLAGS.load_alignments else None, clip_min=pgd_params['clip_min'], clip_max=pgd_params['clip_max'], batch_size=batch_size, num_noise_samples=FLAGS.num_noise_samples, debug_dict=debug_dict, debug=FLAGS.debug, targeted=False, pgd_train=pgd_train, fit_classifier=FLAGS.fit_classifier, clip_alignments=FLAGS.clip_alignments, just_detect=FLAGS.just_detect)
else:
def _predictor():
_x = yield
while(_x is not None):
_y = sess.run(preds, {x: _x}).argmax(-1)
_x = yield np.stack((_y, np.zeros_like(_y)), -1)
predictor = _predictor()
next(predictor)
if FLAGS.save_alignments:
exit(0)
# Evaluate the accuracy of the model on clean examples
acc_clean = do_eval(preds, x_test, y_test, 'clean_train_clean_eval', False, predictor=predictor)
# Initialize the PGD attack object and graph
if FLAGS.attack == 'pgd':
pgd = MadryEtAl(model, sess=sess)
adv_x = pgd.generate(x, **pgd_params)
elif FLAGS.attack == 'cw':
cw = CarliniWagnerL2(model, sess=sess)
adv_x = cw.generate(x, **cw_params)
elif FLAGS.attack == 'mean':
pgd = MadryEtAl(model, sess=sess)
mean_eps = FLAGS.mean_eps * FLAGS.eps
def _attack_mean(x):
x_many = tf.tile(x[None], (FLAGS.mean_samples, 1, 1, 1))
x_noisy = x_many + tf.random_uniform(x_many.shape, -mean_eps, mean_eps)
x_noisy = tf.clip_by_value(x_noisy, 0, 255)
x_pgd = pgd.generate(x_noisy, **pgd_params)
x_clip = tf.minimum(x_pgd, x_many + FLAGS.eps)
x_clip = tf.maximum(x_clip, x_many - FLAGS.eps)
x_clip = tf.clip_by_value(x_clip, 0, 255)
return x_clip
adv_x = tf.map_fn(_attack_mean, x)
adv_x = tf.reduce_mean(adv_x, 1)
preds_adv = model.get_logits(adv_x)
if FLAGS.save_pgd_samples:
for ds, y, name in ((x_train, y_train, 'train'), (x_test, y_test, 'test')):
train_batches = math.ceil(len(ds) / FLAGS.batch_size)
train_pgd = np.concatenate([sess.run(adv_x, {x: ds[b*FLAGS.batch_size:(b+1)*FLAGS.batch_size]}) for b in tqdm.trange(train_batches)])
np.save('logs/{}_clean.npy'.format(name), ds / 255.)
np.save('logs/{}_y.npy'.format(name), y)
train_pgd /= 255.
np.save('logs/{}_pgd.npy'.format(name), train_pgd)
exit(0)
# Evaluate the accuracy of the model on adversarial examples
if not FLAGS.load_pgd_test_samples:
acc_pgd = do_eval(preds_adv, x_test, y_test, 'clean_train_adv_eval', True, predictor=predictor, x_adv=adv_x)
else:
acc_pgd = do_eval(preds, pgd_test, y_test, 'clean_train_adv_eval', True, predictor=predictor)
swriter.add_scalar('test_acc_mean', (acc_clean + acc_pgd) / 2., 0)
print('Repeating the process, using adversarial training')
exit(0)
# Create a new model and train it to be robust to MadryEtAl
if architecture == 'ConvNet':
model2 = ModelAllConvolutional('model2', nb_classes, nb_filters,
input_shape=[32, 32, 3])
elif architecture == 'ResNet':
model = ResNet()
else:
raise Exception('Specify valid classifier architecture!')
pgd2 = MadryEtAl(model2, sess=sess)
def attack(x):
return pgd2.generate(x, **pgd_params)
loss2 = CrossEntropy(model2, smoothing=label_smoothing, attack=attack)
preds2 = model2.get_logits(x)
adv_x2 = attack(x)
if not backprop_through_attack:
# For some attacks, enabling this flag increases the cost of
# training, but gives the defender the ability to anticipate how
# the atacker will change their strategy in response to updates to
# the defender's parameters.
adv_x2 = tf.stop_gradient(adv_x2)
preds2_adv = model2.get_logits(adv_x2)
if load_model:
if ckpt_dir is not 'None':
ckpt = tf.train.get_checkpoint_state(os.path.join(
os.path.expanduser(ckpt_dir), 'adv_trained'))
else:
ckpt = tf.train.get_checkpoint_state('./models/adv_trained')
ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
assert ckpt_path and tf_model_load(
sess, file_path=ckpt_path), '\nMODEL LOADING FAILED'
print('\nMODEL SUCCESSFULLY LOADED from : {}'.format(ckpt_path))
initialize_uninitialized_global_variables(sess)
else:
def evaluate2():
# Accuracy of adversarially trained model on legitimate test inputs
do_eval(preds2, x_test, y_test, 'adv_train_clean_eval', False)
# Accuracy of the adversarially trained model on adversarial
# examples
do_eval(preds2_adv, x_test, y_test, 'adv_train_adv_eval', True)
# Perform and evaluate adversarial training
train(sess, loss2, None, None,
dataset_train=dataset_train, dataset_size=dataset_size,
evaluate=evaluate2, args=train_params, rng=rng,
var_list=model2.get_params())
# Evaluate model
do_eval(preds2, x_test, y_test, 'adv_train_clean_eval', False)
do_eval(preds2_adv, x_test, y_test, 'adv_train_adv_eval', True)
return report
def main(argv=None):
from cleverhans_tutorials import check_installation
check_installation(__file__)
cifar10_tutorial(nb_epochs=FLAGS.nb_epochs, batch_size=FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
clean_train=FLAGS.clean_train,
architecture=FLAGS.architecture,
load_model=FLAGS.load_model,
ckpt_dir=FLAGS.ckpt_dir,
backprop_through_attack=FLAGS.backprop_through_attack,
nb_filters=FLAGS.nb_filters,
test_end=FLAGS.test_size)
if __name__ == '__main__':
flags.DEFINE_integer('nb_filters', NB_FILTERS,
'Model size multiplier')
flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
'Number of epochs to train model')
flags.DEFINE_integer('batch_size', BATCH_SIZE,
'Size of training batches')
flags.DEFINE_float('learning_rate', LEARNING_RATE,
'Learning rate for training')
flags.DEFINE_string('architecture', ARCHITECTURE,
'Architecture [ResNet, ConvNet]')
flags.DEFINE_bool('load_model', LOAD_MODEL, 'Load Pretrained Model')
flags.DEFINE_string('ckpt_dir', '~/models/advhyp/madry/models',
'ckpt_dir [path_to_checkpoint_dir]')
flags.DEFINE_bool('clean_train', CLEAN_TRAIN, 'Train on clean examples')
flags.DEFINE_bool('backprop_through_attack', BACKPROP_THROUGH_ATTACK,
('If True, backprop through adversarial example '
'construction process during adversarial training'))
flags.DEFINE_integer('n_collect', 10000, '')
flags.DEFINE_float('p_ratio_cutoff', .999, '')
flags.DEFINE_float('eps', 8., '')
flags.DEFINE_string('noise_eps', 'n18.0,n24.0,n30.0', '')
flags.DEFINE_string('noise_eps_detect', 'n30.0', '')
flags.DEFINE_bool('debug', False, 'for debugging')
flags.DEFINE_integer('test_size', 10000, '')
flags.DEFINE_bool('save_alignments', False, '')
flags.DEFINE_bool('load_alignments', False, '')
flags.DEFINE_integer('num_noise_samples', 256, '')
flags.DEFINE_integer('rep', 0, '')
flags.DEFINE_bool('save_debug_dict', False, '')
flags.DEFINE_bool('save_pgd_samples', False, '')
flags.DEFINE_string('load_pgd_train_samples', None, '')
flags.DEFINE_string('load_pgd_test_samples', None, '')
flags.DEFINE_bool('fit_classifier', True, '')
flags.DEFINE_bool('clip_alignments', True, '')
flags.DEFINE_string('attack', 'pgd', '')
flags.DEFINE_bool('passthrough', False, '')
flags.DEFINE_integer('cw_steps', 300, '')
flags.DEFINE_integer('cw_search_steps', 20, '')
flags.DEFINE_float('cw_lr', 1e-1, '')
flags.DEFINE_float('cw_c', 1e-4, '')
flags.DEFINE_bool('just_detect', False, '')
flags.DEFINE_integer('mean_samples', 16, '')
flags.DEFINE_float('mean_eps', .1, '')
flags.DEFINE_bool('load_adv_trained', False, '')
tf.app.run()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from https://github.com/rwightman/pytorch-nips2017-attack-example
import torch
import operator as op
import functools as ft
'''reduce_* helper functions reduce tensors on all dimensions but the first.
They are intended to be used on batched tensors where dim 0 is the batch dim.
'''
def reduce_sum(x, keepdim=True):
# silly PyTorch, when will you get proper reducing sums/means?
for a in reversed(range(1, x.dim())):
x = x.sum(a, keepdim=keepdim)
return x
def reduce_mean(x, keepdim=True):
numel = ft.reduce(op.mul, x.size()[1:])
x = reduce_sum(x, keepdim=keepdim)
return x / numel
def reduce_min(x, keepdim=True):
for a in reversed(range(1, x.dim())):
x = x.min(a, keepdim=keepdim)[0]
return x
def reduce_max(x, keepdim=True):
for a in reversed(range(1, x.dim())):
x = x.max(a, keepdim=keepdim)[0]
return x
def torch_arctanh(x, eps=1e-6):
x *= (1. - eps)
return (torch.log((1 + x) / (1 - x))) * 0.5
def l2r_dist(x, y, keepdim=True, eps=1e-8):
d = (x - y)**2
d = reduce_sum(d, keepdim=keepdim)
d += eps # to prevent infinite gradient at 0
return d.sqrt()
def l2_dist(x, y, keepdim=True):
d = (x - y)**2
return reduce_sum(d, keepdim=keepdim)
# d = torch.abs(x - y)
# return reduce_max(d, keepdim=keepdim)
def l1_dist(x, y, keepdim=True):
d = torch.abs(x - y)
return reduce_sum(d, keepdim=keepdim)
def l2_norm(x, keepdim=True):
norm = reduce_sum(x*x, keepdim=keepdim)
return norm.sqrt()
def l1_norm(x, keepdim=True):
return reduce_sum(x.abs(), keepdim=keepdim)
def rescale(x, x_min=-1., x_max=1.):
return x * (x_max - x_min) + x_min
def tanh_rescale(x, x_min=-1., x_max=1.):
return (torch.tanh(x) + 1) * 0.5 * (x_max - x_min) + x_min
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from https://github.com/rwightman/pytorch-nips2017-attack-example
"""PyTorch Carlini and Wagner L2 attack algorithm.
Based on paper by Carlini & Wagner, https://arxiv.org/abs/1608.04644 and a reference implementation at
https://github.com/tensorflow/cleverhans/blob/master/cleverhans/attacks_tf.py
"""
import os
import sys
import torch
import numpy as np
from torch import optim
from torch import autograd
from carlini_wagner_helpers import *
class AttackCarliniWagnerL2:
def __init__(self, targeted=False, search_steps=5, max_steps=1000, cuda=True, debug=False, num_classes=10, clip_min=-1., clip_max=1., confidence=20, initial_const=.1, learning_rate=1e-4):
self.debug = debug
self.targeted = targeted
self.num_classes = num_classes
self.confidence = confidence # FIXME need to find a good value for this, 0 value used in paper not doing much...
self.initial_const = initial_const # bumped up from default of .01 in reference code... 10 in mnist tutorial
self.binary_search_steps = search_steps
# self.repeat = self.binary_search_steps >= 10
self.repeat = False
self.max_steps = max_steps
self.abort_early = True
self.clip_min = clip_min
self.clip_max = clip_max
self.cuda = cuda
# self.clamp_fn = 'tanh' # set to something else perform a simple clamp instead of tanh
self.clamp_fn = '' # set to something else perform a simple clamp instead of tanh
self.init_rand = False # an experiment, does a random starting point help?
self.learning_rate = learning_rate
def _compare(self, output, target):
if not isinstance(output, (float, int, np.int64)):
output = np.copy(output)
if self.targeted:
output[target] -= self.confidence
else:
output[target] += self.confidence
output = np.argmax(output)
if isinstance(output, np.int64):
output = output.item()
if self.targeted:
return output == target
else:
return output != target
def _loss(self, output, target, dist, scale_const):
# compute the probability of the label class versus the maximum other
real = (target * output).sum(1)
other = ((1. - target) * output - target * 10000.).max(1)[0]
if self.targeted:
# if targeted, optimize for making the other class most likely
loss1 = torch.clamp(other - real + self.confidence, min=0.) # equiv to max(..., 0.)
else:
# if non-targeted, optimize for making this class least likely.
loss1 = torch.clamp(real - other + self.confidence, min=0.) # equiv to max(..., 0.)
loss1 = torch.sum(scale_const * loss1)
loss2 = dist.sum()
loss = loss1 + loss2
return loss
def _optimize(self, optimizer, model, input_var, modifier_var, target_var, scale_const_var, input_orig=None):
# apply modifier and clamp resulting image to keep bounded from clip_min to clip_max
if self.clamp_fn == 'tanh':
input_adv = tanh_rescale(modifier_var + input_var, self.clip_min, self.clip_max)
else:
input_adv = torch.clamp(modifier_var + input_var, self.clip_min, self.clip_max)
output = model(input_adv)
# distance to the original input data
if input_orig is None:
dist = l2_dist(input_adv, input_var, keepdim=False)
else:
dist = l2_dist(input_adv, input_orig, keepdim=False)
loss = self._loss(output, target_var, dist, scale_const_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_np = loss.data.item()
dist_np = dist.data.cpu().numpy()
output_np = output.data.cpu().numpy()
input_adv_np = input_adv.data.cpu().numpy() # back to BHWC for numpy consumption
return loss_np, dist_np, output_np, input_adv_np
def run(self, model, input, target, batch_idx=0):
batch_size = input.size(0)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
scale_const = np.ones(batch_size) * self.initial_const
upper_bound = np.ones(batch_size) * 1e10
# python/numpy placeholders for the overall best l2, label score, and adversarial image
o_best_l2 = [1e10] * batch_size
o_best_score = [-1] * batch_size
o_best_attack = input.cpu().numpy()
# setup input (image) variable, clamp/scale as necessary
if self.clamp_fn == 'tanh':
# convert to tanh-space, input already int -1 to 1 range, does it make sense to do
# this as per the reference implementation or can we skip the arctanh?
input_var = autograd.Variable(torch_arctanh(input), requires_grad=False)
input_orig = tanh_rescale(input_var, self.clip_min, self.clip_max)
else:
input_var = autograd.Variable(input, requires_grad=False)
input_orig = None
# setup the target variable, we need it to be in one-hot form for the loss function
target_onehot = torch.zeros(target.size() + (self.num_classes,))
if self.cuda:
target_onehot = target_onehot.cuda()
target_onehot.scatter_(1, target.unsqueeze(1), 1.)
target_var = autograd.Variable(target_onehot, requires_grad=False)
# setup the modifier variable, this is the variable we are optimizing over
modifier = torch.zeros(input_var.size()).float()
if self.init_rand:
# Experiment with a non-zero starting point...
modifier = torch.normal(means=modifier, std=0.001)
if self.cuda:
modifier = modifier.cuda()
modifier_var = autograd.Variable(modifier, requires_grad=True)
optimizer = optim.Adam([modifier_var], lr=self.learning_rate)
for search_step in range(self.binary_search_steps):
print('Batch: {0:>3}, search step: {1}'.format(batch_idx, search_step))
if self.debug:
print('Const:')
for i, x in enumerate(scale_const):
print(i, x)
best_l2 = [1e10] * batch_size
best_score = [-1] * batch_size
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and search_step == self.binary_search_steps - 1:
scale_const = upper_bound
scale_const_tensor = torch.from_numpy(scale_const).float()
if self.cuda:
scale_const_tensor = scale_const_tensor.cuda()
scale_const_var = autograd.Variable(scale_const_tensor, requires_grad=False)
prev_loss = 1e6
for step in range(self.max_steps):
# perform the attack
loss, dist, output, adv_img = self._optimize(
optimizer,
model,
input_var,
modifier_var,
target_var,
scale_const_var,
input_orig)
if step % 100 == 0 or step == self.max_steps - 1:
print('Step: {0:>4}, loss: {1:6.4f}, dist: {2:8.5f}, modifier mean: {3:.5e}'.format(
step, loss, dist.mean(), modifier_var.data.mean()))
if self.abort_early and step % (self.max_steps // 10) == 0:
if loss > prev_loss * .9999:
print('Aborting early...')
break
prev_loss = loss
# update best result found
for i in range(batch_size):
target_label = target[i]
output_logits = output[i]
output_label = np.argmax(output_logits)
di = dist[i]
if self.debug:
if step % 100 == 0:
print('{0:>2} dist: {1:.5f}, output: {2:>3}, {3:5.3}, target {4:>3}'.format(
i, di, output_label, output_logits[output_label], target_label))
if di < best_l2[i] and self._compare(output_logits, target_label):
if self.debug:
print('{0:>2} best step, prev dist: {1:.5f}, new dist: {2:.5f}'.format(
i, best_l2[i], di))
best_l2[i] = di
best_score[i] = output_label
if di < o_best_l2[i] and self._compare(output_logits, target_label):
if self.debug:
print('{0:>2} best total, prev dist: {1:.5f}, new dist: {2:.5f}'.format(
i, o_best_l2[i], di))
o_best_l2[i] = di
o_best_score[i] = output_label
o_best_attack[i] = adv_img[i]
sys.stdout.flush()
# end inner step loop
# adjust the constants
batch_failure = 0
batch_success = 0
for i in range(batch_size):
if self._compare(best_score[i], target[i]) and best_score[i] != -1:
# successful, do binary search and divide const by two
upper_bound[i] = min(upper_bound[i], scale_const[i])
if upper_bound[i] < 1e9:
scale_const[i] = (lower_bound[i] + upper_bound[i]) / 2
if self.debug:
print('{0:>2} successful attack, lowering const to {1:.3f}'.format(
i, scale_const[i]))
else:
# failure, multiply by 10 if no solution found
# or do binary search with the known upper bound
lower_bound[i] = max(lower_bound[i], scale_const[i])
if upper_bound[i] < 1e9:
scale_const[i] = (lower_bound[i] + upper_bound[i]) / 2
else:
scale_const[i] *= 10
if self.debug:
print('{0:>2} failed attack, raising const to {1:.3f}'.format(
i, scale_const[i]))
if self._compare(o_best_score[i], target[i]) and o_best_score[i] != -1:
batch_success += 1
else:
batch_failure += 1
print('Num failures: {0:2d}, num successes: {1:2d}\n'.format(batch_failure, batch_success))
sys.stdout.flush()
# end outer search loop
return o_best_attack
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import matplotlib
matplotlib.use('Agg')
import io
from PIL import Image
from matplotlib import pyplot as plt
import os
import sys
import torch as th
import torchvision as tv
import torch.nn.functional as F
from torch.autograd import Variable
import math
import tqdm
from filelock import FileLock
import threading
import time
import signal
import numpy as np
import itertools as itt
import scipy.linalg
import scipy.stats
from scipy.spatial.distance import pdist, squareform
import cifar_model
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix
import tf_robustify
import vgg
import carlini_wagner_attack
os.system("taskset -p 0xffffffff %d" % os.getpid())
import sh
sh.rm('-rf', 'logs')
import logging
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
from tensorboardX.writer import SummaryWriter
swriter = SummaryWriter('logs')
add_scalar_old = swriter.add_scalar
def add_scalar_and_log(key, value, global_step=0):
logging.info('{}:{}: {}'.format(global_step, key, value))
add_scalar_old(key, value, global_step)
swriter.add_scalar = add_scalar_and_log
def str2bool(x):
return x.lower() == 'true'
def new_inception_conv2d_forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=False)
tv.models.inception.BasicConv2d.forward = new_inception_conv2d_forward
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ds', default='cifar10')
parser.add_argument('--model', default='cifar10')
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--eval_bs', default=256, type=int)
parser.add_argument('--eval_batches', default=None, type=int)
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--num_evals', default=20, type=int)
parser.add_argument('--train_log_after', default=0, type=int)
parser.add_argument('--stop_after', default=-1, type=int)
parser.add_argument('--cuda', default=True, type=str2bool)
parser.add_argument('--optim', default='sgd', type=str)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--attack_lr', default=.25, type=float)
parser.add_argument('--eps', default=8/255, type=float)
parser.add_argument('--eps_rand', default=None, type=float)
parser.add_argument('--eps_eval', default=None, type=float)
parser.add_argument('--rep', default=0, type=int)
parser.add_argument('--img_size', default=32, type=int)
parser.add_argument('--iters', default=10, type=int)
parser.add_argument('--noise_eps', default='n0.01,s0.01,u0.01,n0.02,s0.02,u0.02,s0.03,n0.03,u0.03', type=str)
parser.add_argument('--noise_eps_detect', default='n0.003,s0.003,u0.003,n0.005,s0.005,u0.005,s0.008,n0.008,u0.008', type=str)
parser.add_argument('--clip_alignments', default=True, type=str2bool)
parser.add_argument('--pgd_strength', default=1., type=float)
parser.add_argument('--debug', default=False, type=str2bool)
parser.add_argument('--mode', default='eval', type=str)
parser.add_argument('--constrained', default=True, type=str2bool)
parser.add_argument('--clamp_attack', default=False, type=str2bool)
parser.add_argument('--clamp_uniform', default=False, type=str2bool)
parser.add_argument('--train_adv', default=False, type=str2bool)
parser.add_argument('--wdiff_samples', default=256, type=int)
parser.add_argument('--maxp_cutoff', default=.999, type=float)
parser.add_argument('--collect_targeted', default=False, type=str2bool)
parser.add_argument('--n_collect', default=10000, type=int)
parser.add_argument('--save_alignments', default=False, type=str2bool)
parser.add_argument('--load_alignments', default=False, type=str2bool)
parser.add_argument('--save_pgd_samples', default=False, type=str2bool)
parser.add_argument('--load_pgd_train_samples', default=None, type=str)
parser.add_argument('--load_pgd_test_samples', default=None, type=str)
parser.add_argument('--fit_classifier', default=True, type=str2bool)
parser.add_argument('--just_detect', default=False, type=str2bool)
parser.add_argument('--attack', default='pgd', type=str)
parser.add_argument('--cw_confidence', default=0, type=float)
parser.add_argument('--cw_c', default=1e-4, type=float)
parser.add_argument('--cw_lr', default=1e-4, type=float)
parser.add_argument('--cw_steps', default=300, type=int)
parser.add_argument('--cw_search_steps', default=10, type=int)
parser.add_argument('--mean_samples', default=16, type=int)
parser.add_argument('--mean_eps', default=.1, type=float)
args = parser.parse_args()
args.cuda = args.cuda and th.cuda.is_available()
args.eps_rand = args.eps_rand or args.eps
args.eps_eval = args.eps_eval or args.eps
args.mean_eps = args.mean_eps * args.eps_eval
def check_pid():
while os.getppid() != 1:
time.sleep(.1)
os.kill(os.getpid(), signal.SIGKILL)
def init_worker(worker_id):
thread = threading.Thread(target=check_pid)
thread.daemon = True
thread.start()
def gini_coef(a):
a = th.sort(a, dim=-1)[0]
n = a.shape[1]
index = th.arange(1, n+1)[None, :].float()
return (th.sum((2 * index - n - 1) * a, -1) / (n * th.sum(a, -1)))
def main():
nrms = dict(imagenet=([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), imagenet64=([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), cifar10=([.5, .5, .5], [.5, .5, .5]))[args.ds]
if args.ds == 'cifar10' and args.model.startswith('vgg'):
nrms = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
if args.ds == 'imagenet':
if args.model.startswith('inception'):
transforms = [
tv.transforms.Resize((299, 299)),
]
else:
transforms = [
tv.transforms.Resize((256, 256)),
tv.transforms.CenterCrop(224)
]
else:
transforms = [tv.transforms.Resize((args.img_size, args.img_size))]
transforms = tv.transforms.Compose(
transforms + [
tv.transforms.ToTensor(),
])
clip_min = 0.
clip_max = 1.
nrms_mean, nrms_std = [th.FloatTensor(n)[None, :, None, None] for n in nrms]
if args.cuda:
nrms_mean, nrms_std = map(th.Tensor.cuda, (nrms_mean, nrms_std))
if args.ds == 'cifar10':
data_dir = os.path.expanduser('~/data/cifar10')
os.makedirs(data_dir, exist_ok=True)
with FileLock(os.path.join(data_dir, 'lock')):
train_ds = tv.datasets.CIFAR10(data_dir, train=True, transform=transforms, download=True)
test_ds = tv.datasets.CIFAR10(data_dir, train=False, transform=transforms, download=True)
elif args.ds == 'imagenet':
train_folder = os.path.expanduser('~/../stuff/imagenet/train')
test_folder = os.path.expanduser('~/../stuff/imagenet/val')
with FileLock(os.path.join(os.path.dirname(train_folder), 'lock')):
train_ds = tv.datasets.ImageFolder(train_folder, transform=transforms)
test_ds = tv.datasets.ImageFolder(test_folder, transform=transforms)
if args.load_pgd_test_samples:
pgd_path = os.path.expanduser('~/data/advhyp/{}/samples'.format(args.load_pgd_test_samples))
x_test = np.load(os.path.join(pgd_path, 'test_clean.npy'))
y_test = np.load(os.path.join(pgd_path, 'test_y.npy'))
pgd_test = np.load(os.path.join(pgd_path, 'test_pgd.npy'))
if x_test.shape[-1] == 3:
x_test = x_test.transpose((0, 3, 1, 2))
pgd_test = pgd_test.transpose((0, 3, 1, 2))
if len(y_test.shape) == 2:
y_test = y_test.argmax(-1)
test_ds = th.utils.data.TensorDataset(*map(th.from_numpy, (x_test, y_test, pgd_test)))
train_loader = th.utils.data.DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=True, pin_memory=True, worker_init_fn=init_worker)
test_loader = th.utils.data.DataLoader(test_ds, batch_size=args.eval_bs, shuffle=True, num_workers=1, drop_last=False, pin_memory=True, worker_init_fn=init_worker)
if args.ds == 'imagenet64' or args.ds == 'imagenet':
with FileLock(os.path.join(os.path.dirname(train_folder), 'lock')):
if args.model in tv.models.__dict__:
if args.model.startswith('inception'):
net = tv.models.__dict__[args.model](pretrained=True, transform_input=False)
else:
net = tv.models.__dict__[args.model](pretrained=True)
else:
raise ValueError('Unknown model: {}'.format(args.model))
elif args.ds == 'cifar10':
if args.model == 'tiny':
net = cifar_model.cifar10_tiny(32, pretrained=args.mode == 'eval' , map_location=None if args.cuda else 'cpu')
elif args.model == 'tinyb':
net = cifar_model.cifar10_tiny(32, pretrained=args.mode == 'eval' , map_location=None if args.cuda else 'cpu', padding=0, trained_adv=args.train_adv)
elif args.model.startswith('vgg'):
net = vgg.__dict__[args.model]()
cp_path = os.path.expanduser('~/models/advhyp/vgg/{}/checkpoint.tar'.format(args.model))
checkpoint = th.load(cp_path, map_location='cpu')
state_dict = {k.replace('module.', ''): v for k, v in checkpoint['state_dict'].items()}
net.load_state_dict(state_dict)
elif args.model == 'carlini':
net = cifar_model.carlini(pretrained=args.mode == 'eval' , map_location=None if args.cuda else 'cpu', trained_adv=args.train_adv)
else:
net = cifar_model.cifar10(128, pretrained=args.mode == 'eval' , map_location=None if args.cuda else 'cpu', trained_adv=args.train_adv)
print(net)
def get_layers():
return itt.chain(net.features.children(), net.classifier.children())
def get_layer_names():
return [l.__class__.__name__ for l in get_layers()]
if args.cuda:
net.cuda()
def net_forward(x, layer_by_layer=False, from_layer=0):
x = x - nrms_mean # cannot be inplace
x.div_(nrms_std)
if not layer_by_layer:
return net(x)
cldr = list(net.children())
if args.model.startswith('resnet'):
x = net.conv1(x)
x = net.bn1(x)
x = net.relu(x)
x = net.maxpool(x)
x = net.layer1(x)
x = net.layer2(x)
x = net.layer3(x)
x = net.layer4(x)
outputs = [net.avgpool(x)]
flat_features = outputs[-1].view(x.size(0), -1)
outputs.append(net.fc(flat_features))
elif args.model.startswith('inception'):
# 299 x 299 x 3
x = net.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = net.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = net.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = net.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = net.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = net.Mixed_5b(x)
# 35 x 35 x 256
x = net.Mixed_5c(x)
# 35 x 35 x 288
x = net.Mixed_5d(x)
# 35 x 35 x 288
x = net.Mixed_6a(x)
# 17 x 17 x 768
x = net.Mixed_6b(x)
# 17 x 17 x 768
x = net.Mixed_6c(x)
# 17 x 17 x 768
x = net.Mixed_6d(x)
# 17 x 17 x 768
x = net.Mixed_6e(x)
# 17 x 17 x 768
x = net.Mixed_7a(x)
# 8 x 8 x 1280
x = net.Mixed_7b(x)
# 8 x 8 x 2048
x = net.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
outputs = [F.dropout(x, training=net.training)]
# 1 x 1 x 2048
flat_features = outputs[-1].view(x.size(0), -1)
# 2048
outputs.append(net.fc(flat_features))
# 1000 (num_classes)
else:
outputs = [net.features(x)]
for cidx, c in enumerate(net.classifier.children()):
flat_features = outputs[-1].view(x.size(0), -1)
outputs.append(c(flat_features))
return outputs
loss_fn = th.nn.CrossEntropyLoss(reduce=False)
loss_fn_adv = th.nn.CrossEntropyLoss(reduce=False)
if args.cuda:
loss_fn.cuda()
loss_fn_adv.cuda()
def get_outputs(x, y, from_layer=0):
outputs = net_forward(x, layer_by_layer=True, from_layer=from_layer)
logits = outputs[-1]
loss = loss_fn(logits, y)
_, preds = th.max(logits, 1)
return outputs, loss, preds
def get_loss_and_preds(x, y):
logits = net_forward(x, layer_by_layer=False)
loss = loss_fn(logits, y)
_, preds = th.max(logits, 1)
return loss, preds
def clip(x, cmin, cmax):
return th.min(th.max(x, cmin), cmax)
def project(x, x_orig, eps):
dx = x - x_orig
dx = dx.flatten(1)
dx /= th.norm(dx, p=2, dim=1, keepdim=True) + 1e-9
dx *= eps
return x_orig + dx.view(x.shape)
if args.attack == 'cw':
cw_attack = carlini_wagner_attack.AttackCarliniWagnerL2(cuda=args.cuda, clip_min=clip_min, clip_max=clip_max, confidence=args.cw_confidence, initial_const=args.cw_c / (255**2.), max_steps=args.cw_steps, search_steps=args.cw_search_steps, learning_rate=args.cw_lr)
def attack_cw(x, y):
x_adv = cw_attack.run(net_forward, x, y)
x_adv = th.from_numpy(x_adv)
if args.cuda:
x_adv = x_adv.cuda()
return x_adv
def attack_mean(x, y, eps=args.eps):
x_advs = attack_pgd(x, y, eps)
for _ in tqdm.trange(args.mean_samples, desc='mean attack samples'):
x_noisy = x + th.empty_like(x).uniform_(-args.mean_eps, args.mean_eps)
x_advs += attack_pgd(x_noisy, y, eps)
x_advs = x_advs / (args.mean_samples + 1)
x_advs.clamp_(clip_min, clip_max)
x_advs = clip(x_advs, x-eps, x+eps)
return x_advs
def attack_anti(x, y, eps=args.eps):
pass
def attack_pgd(x, y, eps=args.eps, l2=False):
if l2:
eps = np.sqrt(np.prod(x.shape[1:])) * eps
x_orig = x
x = th.empty_like(x).copy_(x)
x.requires_grad_(True)
x.data.add_(th.empty_like(x).uniform_(-eps, eps))
x.data.clamp_(clip_min, clip_max)
for i in range(args.iters):
if x.grad is not None:
x.grad.zero_()
logits = net_forward(x)
loss = th.sum(loss_fn_adv(logits, y))
loss.backward()
if args.constrained:
if l2:
gx = x.grad.flatten(1)
gx /= th.norm(gx, p=2, dim=-1, keepdim=True) + 1e-9
gx = gx.view(x.shape)
x.data.add_(args.attack_lr * eps * gx)
x.data = project(x.data, x_orig, eps)
else:
x.data.add_(args.attack_lr * eps * th.sign(x.grad))
x.data = clip(x.data, x_orig-eps, x_orig+eps)
x.data.clamp_(clip_min, clip_max)
else:
x.data += args.attack_lr * eps * x.grad
if args.debug:
break
if args.pgd_strength < 1.:
mask = (x.data.new_zeros(len(x)).uniform_() <= args.pgd_strength).float()
for _ in x.shape[1:]:
mask = mask[:, None]
x.data = x.data * mask + x_orig.data * (1. - mask)
x = x.detach()
inf_norm = (x - x_orig).abs().max().cpu().numpy().item()
if args.clamp_attack:
with th.no_grad():
diff = th.sign(x - x_orig) * inf_norm
x = x_orig + diff
x = clip(x, clip_min, clip_max)
# if args.constrained:
# assert inf_norm < eps * (1.001), 'inf norm {} > {}'.format(inf_norm, eps)
return x
eval_after = math.floor(args.epochs * len(train_ds) / args.batch_size / args.num_evals)
global_step = 0
def run_train():
nonlocal global_step # noqa: E999
if args.model == 'carlini':
optim = th.optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, nesterov=True)
elif args.model == 'cifar10':
optim = th.optim.Adam(net.parameters(), lr=args.lr)
else:
optim = th.optim.RMSprop(net.parameters(), lr=args.lr)
logging.info('train')
for epoch in tqdm.trange(args.epochs):
for batch in tqdm.tqdm(train_loader):
x, y = batch
if args.cuda:
x, y = x.cuda(), y.cuda()
if global_step % eval_after == 0:
run_eval_basic(True)
if args.train_adv:
x_adv = attack_pgd(x, y)
x = th.cat((x, x_adv))
y = th.cat((y, y))
net.zero_grad()
loss, _ = get_loss_and_preds(x, y)
loss = loss.mean()
net.zero_grad()
loss.backward()
optim.step()
if args.model == 'carlini' and not args.train_adv:
for pg in optim.param_groups:
pg['lr'] = args.lr * ((1. - 1e-6)**global_step)
global_step += 1
if args.model == 'cifar10' and not args.train_adv:
if epoch == 80 or epoch == 120:
for pg in optim.param_groups:
pg['lr'] *= .1
with open('logs/model.ckpt', 'wb') as f:
th.save(net.state_dict(), f)
def run_eval_basic(with_attack=True):
logging.info('eval')
eval_loss_clean = []
eval_acc_clean = []
eval_loss_rand = []
eval_acc_rand = []
eval_loss_pgd = []
eval_acc_pgd = []
eval_loss_pand = []
eval_acc_pand = []
all_outputs = []
diffs_rand, diffs_pgd, diffs_pand = [], [], []
eval_preds_clean, eval_preds_rand, eval_preds_pgd, eval_preds_pand = [], [], [], []
norms_clean, norms_pgd, norms_rand, norms_pand = [], [], [], []
norms_dpgd, norms_drand, norms_dpand = [], [], []
eval_important_valid = []
eval_loss_incr = []
eval_conf_pgd = []
wdiff_corrs = []
udiff_corrs = []
grad_corrs = []
minps_clean = []
minps_pgd = []
acc_clean_after_corr = []
acc_pgd_after_corr = []
eval_det_clean = []
eval_det_pgd = []
net.train(False)
for eval_batch in tqdm.tqdm(itt.islice(test_loader, args.eval_batches)):
x, y = eval_batch
if args.cuda:
x, y = x.cuda(), y.cuda()
loss_clean, preds_clean = get_loss_and_preds(x, y)
eval_loss_clean.append((loss_clean.data).cpu().numpy())
eval_acc_clean.append((th.eq(preds_clean, y).float()).cpu().numpy())
eval_preds_clean.extend(preds_clean)
if with_attack:
if args.clamp_uniform:
x_rand = x + th.sign(th.empty_like(x).uniform_(-args.eps_rand, args.eps_rand)) * args.eps_rand
else:
x_rand = x + th.empty_like(x).uniform_(-args.eps_rand, args.eps_rand)
loss_rand, preds_rand = get_loss_and_preds(x_rand, y)
eval_loss_rand.append((loss_rand.data).cpu().numpy())
eval_acc_rand.append((th.eq(preds_rand, y).float()).cpu().numpy())
eval_preds_rand.extend(preds_rand)
if not args.load_pgd_test_samples:
x_pgd = attack_pgd(x, preds_clean, eps=args.eps_eval)
loss_pgd, preds_pgd = get_loss_and_preds(x_pgd, y)
eval_loss_pgd.append((loss_pgd.data).cpu().numpy())
eval_acc_pgd.append((th.eq(preds_pgd, y).float()).cpu().numpy())
eval_preds_pgd.extend(preds_pgd)
loss_incr = loss_pgd - loss_clean
eval_loss_incr.append(loss_incr.detach().cpu())
x_pand = x_pgd + th.empty_like(x_pgd).uniform_(-args.eps_rand, args.eps_rand)
loss_pand, preds_pand = get_loss_and_preds(x_pand, y)
eval_loss_pand.append((loss_pand.data).cpu().numpy())
eval_acc_pand.append((th.eq(preds_pand, y).float()).cpu().numpy())
eval_preds_pand.extend(preds_pand)
if args.debug:
break
swriter.add_scalar('eval_loss_clean', np.concatenate(eval_loss_clean).mean(), global_step)
swriter.add_scalar('eval_acc_clean', np.concatenate(eval_acc_clean).mean(), global_step)
swriter.add_scalar('eval_loss_rand', np.concatenate(eval_loss_rand).mean(), global_step)
swriter.add_scalar('eval_acc_rand', np.concatenate(eval_acc_rand).mean(), global_step)
swriter.add_scalar('eval_loss_pgd', np.concatenate(eval_loss_pgd).mean(), global_step)
swriter.add_scalar('eval_acc_pgd', np.concatenate(eval_acc_pgd).mean(), global_step)
swriter.add_scalar('eval_loss_incr', th.cat(eval_loss_incr).mean(), global_step)
swriter.add_scalar('eval_loss_pand', np.concatenate(eval_loss_pand).mean(), global_step)
swriter.add_scalar('eval_acc_pand', np.concatenate(eval_acc_pand).mean(), global_step)
net.train(False)
def run_eval(with_attack=True):
logging.info('eval')
net.train(False)
eval_loss_clean = []
eval_acc_clean = []
eval_loss_rand = []
eval_acc_rand = []
eval_loss_pgd = []
eval_acc_pgd = []
eval_loss_pand = []
eval_acc_pand = []
all_outputs = []
diffs_rand, diffs_pgd, diffs_pand = [], [], []
eval_preds_clean, eval_preds_rand, eval_preds_pgd, eval_preds_pand = [], [], [], []
norms_clean, norms_pgd, norms_rand, norms_pand = [], [], [], []
norms_dpgd, norms_drand, norms_dpand = [], [], []
eval_important_valid = []
eval_loss_incr = []
eval_conf_pgd = []
wdiff_corrs = []
udiff_corrs = []
grad_corrs = []
minps_clean = []
minps_pgd = []
acc_clean_after_corr = []
acc_pgd_after_corr = []
eval_det_clean = []
eval_det_pgd = []
eval_x_pgd_l0 = []
eval_x_pgd_l2 = []
all_eval_important_pixels = []
all_eval_important_single_pixels = []
all_eval_losses_per_pixel = []
if args.save_pgd_samples:
for loader, name in ((train_loader, 'train'), (test_loader, 'test')):
train_x = []
train_y = []
train_pgd = []
for eval_batch in tqdm.tqdm(loader):
x, y = eval_batch
if args.cuda:
x, y = x.cuda(), y.cuda()
_, p = get_loss_and_preds(x, y)
train_pgd.append(attack_pgd(x, p, eps=args.eps_eval).cpu().numpy())
train_x.append(x.cpu().numpy())
train_y.append(y.cpu().numpy())
train_pgd = np.concatenate(train_pgd)
train_x = np.concatenate(train_x)
train_y = np.concatenate(train_y)
np.save('logs/{}_pgd.npy'.format(name), train_pgd)
np.save('logs/{}_clean.npy'.format(name), train_x)
np.save('logs/{}_y.npy'.format(name), train_y)
exit(0)
X, Y = [], []
with th.no_grad():
for eval_batch in tqdm.tqdm(train_loader):
x, y = eval_batch
X.append(x.cpu().numpy()), Y.append(y.cpu().numpy())
if args.n_collect > 0 and sum(len(x) for x in X) > args.n_collect:
if args.ds.startswith('imagenet'):
break
y_nc, y_cts = np.unique(Y, return_counts=True)
if y_nc.size == 1000:
if np.all(y_cts >= 5):
break
else:
break
logging.debug('need more samples, have {} classes with min size {}...'.format(y_nc.size, np.min(y_cts)))
if args.debug:
break
X, Y = map(np.concatenate, (X, Y))
pgd_train = None
if args.load_pgd_train_samples:
pgd_path = os.path.expanduser('~/data/advhyp/{}/samples'.format(args.load_pgd_train_samples))
X = np.load(os.path.join(pgd_path, 'train_clean.npy'))
Y = np.load(os.path.join(pgd_path, 'train_y.npy'))
pgd_train = np.load(os.path.join(pgd_path, 'train_pgd.npy'))
if X.shape[-1] == 3:
X = X.transpose((0, 3, 1, 2))
pgd_train = pgd_train.transpose((0, 3, 1, 2))
if len(Y.shape) == 2:
Y = Y.argmax(-1)
if args.model.startswith('resnet') or args.model.startswith('inception'):
w_cls = net.fc.weight
else:
w_cls = list(net.classifier.children())[-1].weight
nb_classes = w_cls.shape[0]
if args.n_collect > 0 and args.load_pgd_train_samples:
all_idcs = np.arange(len(X))
while True:
np.random.shuffle(all_idcs)
idcs = all_idcs[:args.n_collect]
Y_partial = Y[idcs]
y_nc = np.unique(Y_partial).size
if y_nc == nb_classes:
break
logging.debug('only have {} classes, reshuffling...'.format(y_nc))
X, Y = X[idcs], Y[idcs]
if pgd_train is not None:
pgd_train = pgd_train[idcs]
def latent_and_logits_fn(x):
lat, log = net_forward(x, True)[-2:]
lat = lat.reshape(lat.shape[0], -1)
return lat, log
noise_eps_detect = args.noise_eps_detect
if noise_eps_detect is None:
noise_eps_detect = args.noise_eps
predictor = tf_robustify.collect_statistics(X, Y, latent_and_logits_fn_th=latent_and_logits_fn, nb_classes=nb_classes, weights=w_cls, cuda=args.cuda, debug=args.debug, targeted=args.collect_targeted, noise_eps=args.noise_eps.split(','), noise_eps_detect=noise_eps_detect.split(','), num_noise_samples=args.wdiff_samples, batch_size=args.eval_bs, pgd_eps=args.eps, pgd_lr=args.attack_lr, pgd_iters=args.iters, clip_min=clip_min, clip_max=clip_max, p_ratio_cutoff=args.maxp_cutoff, save_alignments_dir='logs/stats' if args.save_alignments else None, load_alignments_dir=os.path.expanduser('~/data/advhyp/{}/stats'.format(args.model)) if args.load_alignments else None, clip_alignments=args.clip_alignments, pgd_train=pgd_train, fit_classifier=args.fit_classifier, just_detect=args.just_detect)
next(predictor)
if args.save_alignments:
exit(0)
for eval_batch in tqdm.tqdm(itt.islice(test_loader, args.eval_batches)):
if args.load_pgd_test_samples:
x, y, x_pgd = eval_batch
else:
x, y = eval_batch
if args.cuda:
x, y = x.cuda(), y.cuda()
if args.load_pgd_test_samples:
x_pgd = x_pgd.cuda()
loss_clean, preds_clean = get_loss_and_preds(x, y)
eval_loss_clean.append((loss_clean.data).cpu().numpy())
eval_acc_clean.append((th.eq(preds_clean, y).float()).cpu().numpy())
eval_preds_clean.extend(preds_clean)
if with_attack:
if args.clamp_uniform:
x_rand = x + th.sign(th.empty_like(x).uniform_(-args.eps_rand, args.eps_rand)) * args.eps_rand
else:
x_rand = x + th.empty_like(x).uniform_(-args.eps_rand, args.eps_rand)
loss_rand, preds_rand = get_loss_and_preds(x_rand, y)
eval_loss_rand.append((loss_rand.data).cpu().numpy())
eval_acc_rand.append((th.eq(preds_rand, y).float()).cpu().numpy())
eval_preds_rand.extend(preds_rand)
if args.attack == 'pgd':
if not args.load_pgd_test_samples:
x_pgd = attack_pgd(x, preds_clean, eps=args.eps_eval)
elif args.attack == 'pgdl2':
x_pgd = attack_pgd(x, preds_clean, eps=args.eps_eval, l2=True)
elif args.attack == 'cw':
x_pgd = attack_cw(x, preds_clean)
elif args.attack == 'mean':
x_pgd = attack_mean(x, preds_clean, eps=args.eps_eval)
eval_x_pgd_l0.append(th.max(th.abs((x - x_pgd).view(x.size(0), -1)), -1)[0].detach().cpu().numpy())
eval_x_pgd_l2.append(th.norm((x - x_pgd).view(x.size(0), -1), p=2, dim=-1).detach().cpu().numpy())
loss_pgd, preds_pgd = get_loss_and_preds(x_pgd, y)
eval_loss_pgd.append((loss_pgd.data).cpu().numpy())
eval_acc_pgd.append((th.eq(preds_pgd, y).float()).cpu().numpy())
conf_pgd = confusion_matrix(preds_clean.cpu(), preds_pgd.cpu(), np.arange(nb_classes))
conf_pgd -= np.diag(np.diag(conf_pgd))
eval_conf_pgd.append(conf_pgd)
eval_preds_pgd.extend(preds_pgd)
loss_incr = loss_pgd - loss_clean
eval_loss_incr.append(loss_incr.detach().cpu())
x_pand = x_pgd + th.empty_like(x_pgd).uniform_(-args.eps_rand, args.eps_rand)
loss_pand, preds_pand = get_loss_and_preds(x_pand, y)
eval_loss_pand.append((loss_pand.data).cpu().numpy())
eval_acc_pand.append((th.eq(preds_pand, y).float()).cpu().numpy())
eval_preds_pand.extend(preds_pand)
preds_clean_after_corr, det_clean = predictor.send(x.cpu().numpy()).T
preds_pgd_after_corr, det_pgd = predictor.send(x_pgd.cpu().numpy()).T
acc_clean_after_corr.append(preds_clean_after_corr == y.cpu().numpy())
acc_pgd_after_corr.append(preds_pgd_after_corr == y.cpu().numpy())
eval_det_clean.append(det_clean)
eval_det_pgd.append(det_pgd)
if args.debug:
break
swriter.add_scalar('eval_loss_clean', np.concatenate(eval_loss_clean).mean(), global_step)
swriter.add_scalar('eval_acc_clean', np.concatenate(eval_acc_clean).mean(), global_step)
swriter.add_scalar('eval_loss_rand', np.concatenate(eval_loss_rand).mean(), global_step)
swriter.add_scalar('eval_acc_rand', np.concatenate(eval_acc_rand).mean(), global_step)
swriter.add_scalar('eval_loss_pgd', np.concatenate(eval_loss_pgd).mean(), global_step)
swriter.add_scalar('eval_acc_pgd', np.concatenate(eval_acc_pgd).mean(), global_step)
swriter.add_scalar('eval_loss_incr', th.cat(eval_loss_incr).mean(), global_step)
swriter.add_scalar('eval_loss_pand', np.concatenate(eval_loss_pand).mean(), global_step)
swriter.add_scalar('eval_acc_pand', np.concatenate(eval_acc_pand).mean(), global_step)
swriter.add_histogram('class_dist_clean', th.stack(eval_preds_clean), global_step)
swriter.add_histogram('class_dist_rand', th.stack(eval_preds_rand), global_step)
swriter.add_histogram('class_dist_pgd', th.stack(eval_preds_pgd), global_step)
swriter.add_histogram('class_dist_pand', th.stack(eval_preds_pand), global_step)
swriter.add_scalar('acc_clean_after_corr', np.concatenate(acc_clean_after_corr).mean(), global_step)
swriter.add_scalar('acc_pgd_after_corr', np.concatenate(acc_pgd_after_corr).mean(), global_step)
swriter.add_scalar('det_clean', np.concatenate(eval_det_clean).mean(), global_step)
swriter.add_scalar('det_pgd', np.concatenate(eval_det_pgd).mean(), global_step)
swriter.add_scalar('x_pgd_l0', np.concatenate(eval_x_pgd_l0).mean(), global_step)
swriter.add_scalar('x_pgd_l2', np.concatenate(eval_x_pgd_l2).mean(), global_step)
net.train(True)
if args.mode == 'eval':
for p in net.parameters():
p.requires_grad_(False)
run_eval()
elif args.mode == 'train':
run_train()
if __name__ == '__main__':
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import functools
import cleverhans.model
import torch
from cleverhans import utils_tf
from cleverhans.attacks import Attack
import cleverhans.attacks
from cleverhans.utils_tf import clip_eta
# disable tf logging
# some of these might have to be commented out to use verbose=True in the
# adaptive attack
import warnings
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import os
import math
import numpy as np
import tensorflow as tf
from cleverhans.attacks import MadryEtAl
from cleverhans.dataset import CIFAR10
from cleverhans.model_zoo.madry_lab_challenges.cifar10_model import \
make_wresnet as ResNet
from cleverhans.utils_tf import initialize_uninitialized_global_variables
import tf_robustify
from cleverhans.augmentation import random_horizontal_flip, random_shift
from active_tests.decision_boundary_binarization import \
interior_boundary_discrimination_attack, format_result
from argparse_utils import DecisionBoundaryBinarizationSettings
from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
from logit_matching_attack import \
ProjectedGradientDescentWithDetectorLogitMatching
def init_defense(sess, x, preds, batch_size, multi_noise=False):
data = CIFAR10()
if multi_noise:
defense_data_path = os.path.join("checkpoints/tf_madry_wrn_vanilla",
"defense_alignment_data_multi_noise")
else:
defense_data_path = os.path.join("checkpoints/tf_madry_wrn_vanilla",
"defense_alignment_data")
if os.path.exists(defense_data_path):
print("Trying to load defense statistics")
load_alignments_dir = defense_data_path
save_alignments_dir = None
else:
print("Defense statistics not found; generating and saving them now.")
load_alignments_dir = None
save_alignments_dir = defense_data_path
dataset_size = data.x_train.shape[0]
dataset_train = data.to_tensorflow()[0]
dataset_train = dataset_train.map(
lambda x, y: (random_shift(random_horizontal_flip(x)), y), 4)
dataset_train = dataset_train.batch(batch_size)
dataset_train = dataset_train.prefetch(16)
x_train, y_train = data.get_set('train')
x_train *= 255
nb_classes = y_train.shape[1]
n_collect = 10000 # TODO: for debugging set to 100, otherwise to 10000
p_ratio_cutoff = .999
just_detect = True
clip_alignments = True
fit_classifier = True
noise_eps = 'n30.0'
num_noise_samples = 256
if multi_noise:
noises = 'n0.003,s0.003,u0.003,n0.005,s0.005,u0.005,s0.008,n0.008,u0.008'.split(
',')
noise_eps_detect = []
for n in noises:
new_noise = n[0] + str(float(n[1:]) * 255)
noise_eps_detect.append(new_noise)
else:
noise_eps_detect = 'n30.0'
# these attack parameters are just for initializing the defense
eps = 8.0
pgd_params = {
'eps': eps,
'eps_iter': (eps / 5),
'nb_iter': 10,
'clip_min': 0,
'clip_max': 255
}
logits_op = preds.op
while logits_op.type != 'MatMul':
logits_op = logits_op.inputs[0].op
latent_x_tensor, weights = logits_op.inputs
logits_tensor = preds
predictor = tf_robustify.collect_statistics(
x_train[:n_collect], y_train[:n_collect], x, sess,
logits_tensor=logits_tensor,
latent_x_tensor=latent_x_tensor,
weights=weights,
nb_classes=nb_classes,
p_ratio_cutoff=p_ratio_cutoff,
noise_eps=noise_eps,
noise_eps_detect=noise_eps_detect,
pgd_eps=pgd_params['eps'],
pgd_lr=pgd_params['eps_iter'] / pgd_params['eps'],
pgd_iters=pgd_params['nb_iter'],
save_alignments_dir=save_alignments_dir,
load_alignments_dir=load_alignments_dir,
clip_min=pgd_params['clip_min'],
clip_max=pgd_params['clip_max'],
batch_size=batch_size,
num_noise_samples=num_noise_samples,
debug_dict=None,
debug=False,
targeted=False,
pgd_train=None,
fit_classifier=fit_classifier,
clip_alignments=clip_alignments,
just_detect=just_detect,
)
next(predictor)
return predictor
def do_binarized_eval(sess, x, y, x_adv, binarized_logits, binarized_preds, x_set,
y_set, predictor, batch_size, binarized_sess_kwargs={},
inverted_detector_test=False):
n_batches = math.ceil(x_set.shape[0] / batch_size)
# first generative adversarial examples
x_adv_set, binarized_logits_set, binarized_p_set = [], [], []
for b in range(n_batches):
values = sess.run((x_adv, binarized_logits, binarized_preds),
{**binarized_sess_kwargs,
x: x_set[b * batch_size:(b + 1) * batch_size],
y: y_set[b * batch_size:(b + 1) * batch_size]})
x_adv_set.append(values[0])
binarized_logits_set.append(values[1])
binarized_p_set.append(values[2])
x_adv_set = np.concatenate(x_adv_set)
binarized_logits_set = np.concatenate(binarized_logits_set)
binarized_p_set = np.concatenate(binarized_p_set)
# now run test
p_set, p_det = np.concatenate(
[predictor.send(x_adv_set[b * batch_size:(b + 1) * batch_size]) for b in
range(n_batches)]).T
correctly_classified = np.equal(binarized_p_set,
y_set[:len(p_set)].argmax(-1))
if inverted_detector_test:
adversarial_example_detected = np.equal(p_det, False)
else:
adversarial_example_detected = np.equal(p_det, True)
# model_fooled = np.logical_or(
# np.logical_and(~correctly_classified, ~adversarial_example_detected), # fooled classifier & evaded detector
# np.logical_and(correctly_classified, adversarial_example_detected) # did not fool classifier but triggered detector (false positive)
# )
model_fooled = np.logical_and(~correctly_classified,
~adversarial_example_detected) # fooled classifier & evaded detector
# print(len(adversarial_example_detected), np.sum(~correctly_classified),
# np.sum(adversarial_example_detected))
# asr = model_fooled.mean()
# acc = correctly_classified.mean()
# print('Accuracy of base model: %0.4f' % acc)
# print('ASR (w/ detection defense): %0.4f' % asr)
#print(model_fooled, ~correctly_classified, ~adversarial_example_detected)
#print(binarized_logits_set)
return model_fooled, (x_adv_set, binarized_logits_set)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--multi-noise', action='store_true')
parser.add_argument("--n-samples", default=512, type=int)
parser.add_argument("--n-boundary-points", default=49, type=int)
parser.add_argument("--n-inner-points", default=10, type=int)
parser.add_argument("--batch-size", default=512, type=int)
parser.add_argument("--attack", choices=("original", "adaptive",
"adaptive-eot"),
default="original")
parser.add_argument("--dont-verify-training-data", action="store_true")
parser.add_argument("--use-boundary-adverarials", action="store_true")
parser.add_argument("--inverted-test", action="store_true")
args = parser.parse_args()
if args.inverted_test:
print("Running inverted test")
else:
print("Running normal/non-inverted test")
# load data
data = CIFAR10()
x_test, y_test = data.get_set('test')
sess = tf.Session()
img_rows, img_cols, nchannels = x_test.shape[1:4]
nb_classes = y_test.shape[1]
# define model & restore weights
# Define input TF placeholder
x_placeholder = tf.placeholder(
tf.float32, shape=(None, img_rows, img_cols, nchannels))
y_placeholder = tf.placeholder(tf.int32, shape=(None,))
# needed for adaptive attack
x_reference_placeholder = tf.placeholder(
tf.float32, shape=(None, img_rows, img_cols, nchannels))
cifar_model = ResNet(scope='ResNet')
ckpt = tf.train.get_checkpoint_state("checkpoints/tf_madry_wrn_vanilla")
saver = tf.train.Saver(var_list=dict(
(v.name.split('/', 1)[1].split(':')[0], v) for v in
tf.global_variables()))
saver.restore(sess, ckpt.model_checkpoint_path)
initialize_uninitialized_global_variables(sess)
class Model:
def __init__(self, model):
self.model = model
def __call__(self, x, features_only=True):
assert features_only
return self.get_features(x)
def get_features(self, x):
return self.model.fprop(x * 255.0)["Flatten2"]
def get_features_and_gradients(self, x):
features = self.model.fprop(x * 255.0)["Flatten2"]
grad = tf.gradients(features, x)[0]
return features, grad
def get_features_logits_and_gradients(self, x):
values = self.model.fprop(x * 255.0)
features = values["Flatten2"]
predictions = values["logits"]
grad = tf.gradients(features, x)[0]
return features, grad, predictions
model = Model(cifar_model)
features, feature_gradients, logits = model.get_features_logits_and_gradients(
x_placeholder)
# setup defense
# if multi_noise = True, instantiate the defense with 9 types of noise.
# if multi_noise = False, instantiate the defense with a single type of high-magnitude noise.
print("multi noise:", args.multi_noise)
defense_predictor = init_defense(sess, x_placeholder, logits, args.batch_size,
multi_noise=args.multi_noise)
class ModelWrapper(cleverhans.model.Model):
def __init__(self, model, weight_shape, bias_shape):
self.weight = tf.placeholder(dtype=tf.float32, shape=weight_shape)
self.bias = tf.placeholder(dtype=tf.float32, shape=bias_shape)
self.model = model
self.first = True
def fprop(self, x, **kwargs):
y = self.model.get_features(x, *kwargs)
logits = y @ tf.transpose(self.weight) + tf.reshape(self.bias, (1, -1))
return {"logits": logits}
def logits_and_predictions(self, x=None):
if x == None: assert not self.first
if self.first:
self.logits = self(x)
self.predictions = tf.argmax(self.logits, 1)
self.first = False
return self.logits, self.predictions
def run_features(x: np.ndarray, features_only=True,
features_and_logits=False):
if features_only:
assert not features_and_logits
targets = features
elif features_and_logits:
targets = (features, logits)
else:
targets = logits
x = x.transpose(0, 2, 3, 1) * 255.0
return sess.run(targets,
feed_dict={x_placeholder: x})
def run_features_and_gradients(x: np.ndarray):
x = x.transpose(0, 2, 3, 1) * 255.0
return sess.run((features, feature_gradients),
feed_dict={x_placeholder: x})
feature_extractor = TensorFlow1ToPyTorchWrapper(
logit_forward_pass=lambda x, features_only=False,
features_and_logits=False: run_features(x, features_only,
features_and_logits),
logit_forward_and_backward_pass=lambda x: run_features_and_gradients(x)
)
# prepare dataloader
random_indices = list(range(len(x_test)))
np.random.shuffle(random_indices)
x_batch = []
y_batch = []
for j in range(args.n_samples):
x_, y_ = x_test[random_indices[j]], y_test[random_indices[j]]
x_batch.append(x_)
y_batch.append(y_)
x_batch = np.array(x_batch).transpose((0, 3, 1, 2))
y_batch = np.array(y_batch)
from utils import build_dataloader_from_arrays
test_loader = build_dataloader_from_arrays(x_batch, y_batch, batch_size=32)
# TODO: update shapes? apparently not necessary...
wrapped_model = ModelWrapper(model, (2, 640), (2,))
baseline_cifar_pgd = MadryEtAl(cifar_model, sess=sess)
original_pgd_params = {
# ord: ,
'eps': 8,
'eps_iter': (8 / 5),
'nb_iter': 10,
'clip_min': 0,
'clip_max': 255
}
adaptive_pgd_params = {
# ord: ,
'eps': 8,
'eps_iter': 8.0 / 300,
'nb_iter': 300,
'clip_min': 0,
'clip_max': 255,
'x_reference': x_reference_placeholder,
'y': y_placeholder
}
if args.attack == "original":
pgd = MadryEtAl(wrapped_model, sess=sess)
print("Using MadryEtAl attack")
elif args.attack == "adaptive":
pgd = ProjectedGradientDescentWithDetectorLogitMatching(
wrapped_model,
lambda x: model.model.get_logits(x),
sess=sess,
verbose=False)
print("Using logit-matching attack")
elif args.attack == "adaptive-eot":
pgd = ProjectedGradientDescentWithDetectorLogitMatching(
wrapped_model,
lambda x: model.model.get_logits(x),
sess=sess,
eot_ensemble_size=20,
verbose=False)
print("Using logit-matching attack w/ EOT")
else:
raise ValueError("invalid attack")
# was 1.75
far_off_distance = 1.75 # TODO, was 1.01
larger_pgd_params = {**original_pgd_params}
larger_pgd_params["eps"] *= far_off_distance
pgd_params = original_pgd_params if args.attack == "original" else adaptive_pgd_params
adv_x = tf.stop_gradient(pgd.generate(x_placeholder, **pgd_params))
cifar_adv_x = tf.stop_gradient(
baseline_cifar_pgd.generate(x_placeholder, **original_pgd_params))
larger_cifar_adv_x = tf.stop_gradient(
baseline_cifar_pgd.generate(x_placeholder, **original_pgd_params))
adv_binarized_logits = wrapped_model.get_logits(adv_x)
adv_binarized_predictions = tf.argmax(adv_binarized_logits, 1)
def run_attack(m, l, kwargs, inverted_detector_test=False):
linear_layer = m[-1]
del m
weights_feed_dict = {
wrapped_model.weight: linear_layer.weight.data.numpy(),
wrapped_model.bias: linear_layer.bias.data.numpy()
}
if "reference_points_x" in kwargs:
weights_feed_dict[x_reference_placeholder] = \
kwargs["reference_points_x"].numpy().transpose((0, 2, 3, 1)) * 255.0
# should_be_rejected = ~verify_valid_input_data(kwargs["reference_points_x"])
# print("should_be_rejected", should_be_rejected)
for x, y in l:
x = x.numpy().transpose((0, 2, 3, 1)) * 255.0
y = y.numpy()
is_adv_np, (x_adv_np, logits_np) = do_binarized_eval(
sess=sess, x=x_placeholder, y=y_placeholder, x_adv=adv_x,
batch_size=args.batch_size,
binarized_logits=adv_binarized_logits,
binarized_preds=adv_binarized_predictions, x_set=x, y_set=y,
predictor=defense_predictor, binarized_sess_kwargs=weights_feed_dict,
inverted_detector_test=inverted_detector_test
)
# print(is_adv_np, y, logits_np)
return is_adv_np, (torch.Tensor(x_adv_np), torch.Tensor(logits_np))
def verify_valid_input_data(x_set):
"""Returns True if something is not detected as an adversarial example."""
x_set = x_set.numpy().transpose((0, 2, 3, 1)) * 255.0
n_batches = math.ceil(x_set.shape[0] / args.batch_size)
_, p_det = np.concatenate(
[defense_predictor.send(
x_set[b * args.batch_size:(b + 1) * args.batch_size]
) for b in range(n_batches)]
).T
# p_det is True of a possible adversarial example has been detected
valid_sample = np.equal(p_det, False)
return valid_sample
def get_boundary_adversarials(x, y, n_samples, epsilon):
"""Generate adversarial examples for the base classifier."""
assert len(x.shape) == 3
del y
device = x.device
x = x.unsqueeze(0).numpy()
x = x.transpose((0, 2, 3, 1)) * 255.0
x = np.repeat(x, n_samples, axis=0)
# select correct tf placeholder depending on the epsilon ball
if epsilon == pgd_params["eps"] / 255.0:
x_adv_ph = cifar_adv_x
elif epsilon == larger_pgd_params["eps"] / 255.0:
x_adv_ph = larger_cifar_adv_x
else:
raise ValueError("Cannot generate adversarials at eps =", epsilon)
for _ in range(10):
x_advs = []
for x_ in np.array_split(x, int(np.ceil(len(x) / args.batch_size))):
x_advs.append(sess.run(x_adv_ph, feed_dict={x_placeholder: x_}))
x_adv = np.concatenate(x_advs, 0)
x_adv = x_adv.transpose((0, 3, 1, 2)) / 255.0
x_adv = torch.Tensor(x_adv, device=device)
# make sure adversarial examples are really detected as adversarial examples
is_valid = verify_valid_input_data(x_adv)
is_invalid = ~is_valid
if np.all(is_invalid):
# generative until we finally found an adversarial example that gets
# detected
break
else:
warnings.warn("Could not generate adversarial example that gets "
"detected after 10 trials.")
return x_adv
if args.inverted_test:
additional_settings = dict(
n_boundary_points=args.n_boundary_points,
n_boundary_adversarial_points=1,
n_far_off_boundary_points=1,
n_far_off_adversarial_points=1,
)
else:
additional_settings = dict(
n_boundary_points=args.n_boundary_points,
n_boundary_adversarial_points=args.n_boundary_points - 1,
n_far_off_boundary_points=1,
n_far_off_adversarial_points=0,
)
scores_logit_differences_and_validation_accuracies = \
interior_boundary_discrimination_attack(
feature_extractor,
test_loader,
attack_fn=functools.partial(
run_attack,
inverted_detector_test=args.inverted_test
),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=8 / 255.0,
norm="linf",
lr=10000,
n_inner_points=args.n_inner_points,
adversarial_attack_settings=None,
optimizer="sklearn",
**additional_settings,
),
rescale_logits="adaptive",
n_samples=args.n_samples,
device="cpu",
batch_size=args.batch_size,
# decision_boundary_closeness=0.999,
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
# verify_valid_boundary_training_data_fn=None if args.dont_verify_training_data else verify_valid_input_data,
verify_valid_boundary_training_data_fn=verify_valid_input_data,
get_boundary_adversarials_fn=get_boundary_adversarials,
verify_valid_inner_training_data_fn=None,
verify_valid_input_validation_data_fn=None,
# verify_valid_input_data if args.use_boundary_adverarials else None,
# get_boundary_adversarials_fn=get_boundary_adversarials if args.use_boundary_adverarials else None,
fill_batches_for_verification=False,
far_off_distance=far_off_distance
)
print(format_result(scores_logit_differences_and_validation_accuracies,
args.n_samples))
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import functools
import cleverhans.model
import torch
from cleverhans import utils_tf
from cleverhans.attacks import Attack
import cleverhans.attacks
from cleverhans.utils_tf import clip_eta
import os
import math
import numpy as np
import tensorflow as tf
class ProjectedGradientDescentWithDetectorLogitMatching(Attack):
def __init__(self, model, get_features_for_detector,
sess=None, dtypestr='float32',
default_rand_init=True, verbose=False, eot_ensemble_size=None,
eot_multinoise=False, **kwargs):
"""
Create a ProjectedGradientDescent instance.
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
super(ProjectedGradientDescentWithDetectorLogitMatching, self).__init__(model, sess=sess,
dtypestr=dtypestr, **kwargs)
self.feedable_kwargs = ('eps', 'eps_iter', 'clip_min',
'clip_max', 'loss_lambda')
self.structural_kwargs = ['ord', 'nb_iter', 'rand_init', 'sanity_checks']
self.default_rand_init = default_rand_init
self.get_features_for_detector = get_features_for_detector
self.verbose = verbose
self.eot_ensemble_size = eot_ensemble_size
assert eot_ensemble_size is None or eot_ensemble_size > 0
self.eot_multinoise = eot_multinoise
def generate(self, x, x_reference, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
asserts = []
# If a data range was specified, check that the input was in that range
if self.clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(x,
tf.cast(self.clip_min,
x.dtype)))
if self.clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x,
tf.cast(self.clip_max,
x.dtype)))
# Initialize loop variables
if self.rand_init:
eta = tf.random_uniform(tf.shape(x),
tf.cast(-self.rand_minmax, x.dtype),
tf.cast(self.rand_minmax, x.dtype),
dtype=x.dtype)
else:
eta = tf.zeros(tf.shape(x))
# Clip eta
eta = clip_eta(eta, self.ord, self.eps)
adv_x = x + eta
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
fgm_params = {
'eps': self.eps_iter,
'ord': self.ord,
'clip_min': self.clip_min,
'clip_max': self.clip_max,
"eot_ensemble_size": self.eot_ensemble_size,
"eot_multinoise": self.eot_multinoise,
}
if self.ord == 1:
raise NotImplementedError("It's not clear that FGM is a good inner loop"
" step for PGD when ord=1, because ord=1 FGM "
" changes only one pixel at a time. We need "
" to rigorously test a strong ord=1 PGD "
"before enabling this feature.")
def cond(i, _):
return tf.less(i, self.nb_iter)
def body(i, adv_x):
adv_x = self.fgm_generate(x_adv=adv_x,
x_reference=x_reference,
**fgm_params, step=i)
# Clipping perturbation eta to self.ord norm ball
eta = adv_x - x
eta = clip_eta(eta, self.ord, self.eps)
adv_x = x + eta
# Redo the clipping.
# FGM already did it, but subtracting and re-adding eta can add some
# small numerical error.
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return i + 1, adv_x
_, adv_x = tf.while_loop(cond, body, [tf.zeros([]), adv_x], back_prop=True)
# Asserts run only on CPU.
# When multi-GPU eval code tries to force all PGD ops onto GPU, this
# can cause an error.
#asserts.append(utils_tf.assert_less_equal(tf.cast(self.eps_iter,
# dtype=self.eps.dtype),
# self.eps))
if self.ord == np.inf and self.clip_min is not None:
# The 1e-6 is needed to compensate for numerical error.
# Without the 1e-6 this fails when e.g. eps=.2, clip_min=.5,
# clip_max=.7
asserts.append(utils_tf.assert_less_equal(tf.cast(self.eps, x.dtype),
1e-6 + tf.cast(self.clip_max,
x.dtype)
- tf.cast(self.clip_min,
x.dtype)))
if self.sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x
def fgm_generate(self,
x_adv,
x_reference,
step,
eps=0.3,
ord=np.inf,
clip_min=None,
clip_max=None,
targeted=False,
sanity_checks=True,
eot_ensemble_size=None,
eot_multinoise=False):
asserts = []
# If a data range was specified, check that the input was in that range
if clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(
x_adv, tf.cast(clip_min, x_adv.dtype)))
if clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x_adv, tf.cast(clip_max, x_adv.dtype)))
if targeted:
raise ValueError("targeted mode not supported")
# while this check looks good in theory in pratice it doesnt make sense
# since the softmax op might not add an additional AddV2 operation at the end
# Make sure the caller has not passed probs by accident
#assert logits.op.type != 'Softmax'
#assert target_logits.op.type != 'Softmax'
target_detector_features = tf.stop_gradient(
self.get_features_for_detector(x_reference))
labels = tf.one_hot(self.y, 2)
if eot_ensemble_size is None:
# no EOT
detector_features = self.get_features_for_detector(x_adv)
classifier_logits = self.model.get_logits(x_adv)
real = tf.reduce_sum(labels * classifier_logits, -1)
other = tf.reduce_max((1-labels) * classifier_logits - (labels*10000), -1)
classifier_loss = -tf.clip_by_value(real - other, -1e-2, 1e9)
detector_features_matching_loss = -tf.reduce_mean(
tf.reduce_sum((detector_features - target_detector_features)**2,-1),
0)
loss = self.loss_lambda * classifier_loss + (1.0 - self.loss_lambda) * detector_features_matching_loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x_adv)
grad = tf.stop_gradient(grad)
else:
grads = []
for i in range(eot_ensemble_size):
if i == 0:
# dont add noise to first forward pass
x_adv_noisy = x_adv
else:
if eot_multinoise:
if i % 2 == 0:
noise = tf.random.normal(tf.shape(x_adv), 0.0, 1.0)
elif i % 2 == 1:
noise = tf.random.uniform(tf.shape(x_adv), -1.0, 1.0)
else:
# defined in https://github.com/wielandbrendel/adaptive_attacks_paper/blob/master/02_odds/Attack.ipynb
# but doesnt make sense to me since this never gets called
noise = tf.sign(tf.random.uniform(tf.shape(x_adv), -1.0, 1.0))
noise *= 0.01 * 255.0
else:
noise = tf.random.normal(tf.shape(x_adv), 0.0, 1.0)
noise *= 2.0
x_adv_noisy = tf.clip_by_value(x_adv + noise, 0, 255.0)
detector_features = self.get_features_for_detector(x_adv_noisy)
classifier_logits = self.model.get_logits(x_adv_noisy)
real = tf.reduce_sum(labels * classifier_logits, -1)
other = tf.reduce_max((1-labels) * classifier_logits - (labels*10000), -1)
classifier_loss = -tf.clip_by_value(real - other, -1e-2, 1e9)
detector_features_matching_loss = -tf.reduce_mean(
tf.reduce_sum((detector_features - target_detector_features)**2,-1),
0)
loss = self.loss_lambda * classifier_loss + (1.0 - self.loss_lambda) * detector_features_matching_loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x_adv_noisy)
grad = tf.stop_gradient(grad)
grads.append(grad)
grad = tf.reduce_mean(grads, axis=0)
optimal_perturbation = cleverhans.attacks.optimize_linear(grad, eps, ord)
# Add perturbation to original example to obtain adversarial example
adv_x = x_adv + optimal_perturbation
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
if sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
if self.verbose:
adv_x = tf.Print(adv_x, [step, loss, classifier_loss, detector_features_matching_loss])
return adv_x
def parse_params(self,
eps=0.3,
eps_iter=0.05,
nb_iter=10,
y=None,
ord=np.inf,
clip_min=None,
clip_max=None,
y_target=None,
rand_init=None,
rand_minmax=0.3,
sanity_checks=True,
loss_lambda=0.5,
**kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float) maximum distortion of adversarial example
compared to original input
:param eps_iter: (optional float) step size for each attack iteration
:param nb_iter: (optional int) Number of attack iterations.
:param y: (optional) A tensor with the true labels.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
:param sanity_checks: bool Insert tf asserts checking values
(Some tests need to run with no sanity checks because the
tests intentionally configure the attack strangely)
"""
# Save attack-specific parameters
self.eps = eps
if rand_init is None:
rand_init = self.default_rand_init
self.rand_init = rand_init
if self.rand_init:
self.rand_minmax = eps
else:
self.rand_minmax = 0.
self.eps_iter = eps_iter
self.nb_iter = nb_iter
self.y = y
self.y_target = y_target
self.ord = ord
self.clip_min = clip_min
self.clip_max = clip_max
self.loss_lambda = loss_lambda
if isinstance(eps, float) and isinstance(eps_iter, float):
# If these are both known at compile time, we can check before anything
# is run. If they are tf, we can't check them yet.
assert eps_iter <= eps, (eps_iter, eps)
if self.y is not None and self.y_target is not None:
raise ValueError("Must not set both y and y_target")
# Check if order of the norm is acceptable given current implementation
if self.ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
self.sanity_checks = sanity_checks
return True
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import os
import numpy as np
import tqdm
import math
import scipy.stats
from absl import logging
from collections import OrderedDict
from sklearn.linear_model import LogisticRegression
import itertools as itt
from types import SimpleNamespace
logging.set_verbosity(logging.INFO)
def collect_statistics(x_train, y_train, x_ph=None, sess=None,
latent_and_logits_fn_th=None, latent_x_tensor=None, logits_tensor=None,
nb_classes=None, weights=None, cuda=True, targeted=False, noise_eps=8e-3,
noise_eps_detect=None, num_noise_samples=256, batch_size=256,
pgd_eps=8/255, pgd_lr=1/4, pgd_iters=10, clip_min=-1., clip_max=1.,
p_ratio_cutoff=20., save_alignments_dir=None, load_alignments_dir=None,
debug_dict=None, debug=False, clip_alignments=True, pgd_train=None,
fit_classifier=False, just_detect=False):
assert len(x_train) == len(y_train)
if pgd_train is not None:
assert len(pgd_train) == len(x_train)
if x_ph is not None:
import tensorflow as tf
backend = 'tf'
assert sess is not None
assert latent_and_logits_fn_th is None
assert latent_x_tensor is not None
assert logits_tensor is not None
assert nb_classes is not None
assert weights is not None
else:
import torch as th
backend = 'th'
assert x_ph is None
assert sess is None
assert latent_and_logits_fn_th is not None
assert latent_x_tensor is None
assert logits_tensor is None
assert nb_classes is not None
assert weights is not None
cuda = th.cuda.is_available() and cuda
def latent_fn_th(x):
return to_np(latent_and_logits_fn_th(to_th(x))[0])
def logits_fn_th(x):
return latent_and_logits_fn_th(x)[1]
def to_th(x, dtype=np.float32):
x = th.from_numpy(x.astype(dtype))
if cuda:
x = x.cuda()
return x
def to_np(x):
return x.detach().cpu().numpy()
if debug:
logging.set_verbosity(logging.DEBUG)
try:
len(noise_eps)
if isinstance(noise_eps, str):
raise TypeError()
except TypeError:
noise_eps = [noise_eps]
if noise_eps_detect is None:
noise_eps_detect = noise_eps
try:
len(noise_eps_detect)
if isinstance(noise_eps_detect, str):
raise TypeError()
except TypeError:
noise_eps_detect = [noise_eps_detect]
noise_eps_all = set(noise_eps + noise_eps_detect)
pgd_lr = pgd_eps * pgd_lr
n_batches = math.ceil(x_train.shape[0] / batch_size)
if len(y_train.shape) == 2:
y_train = y_train.argmax(-1)
if backend == 'tf':
y_ph = tf.placeholder(tf.int64, [None])
loss_tensor = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_tensor, labels=y_ph))
pgd_gradients = tf.gradients(loss_tensor, x_ph)[0]
preds_tensor = tf.arg_max(logits_tensor, -1)
else:
loss_fn = th.nn.CrossEntropyLoss(reduce='sum')
if cuda:
loss_fn = loss_fn.cuda()
def get_noise_samples(x, num_samples, noise_eps, clip=False):
if isinstance(noise_eps, float):
kind = 'u'
eps = noise_eps
else:
kind, eps = noise_eps[:1], float(noise_eps[1:])
if isinstance(x, np.ndarray):
if kind == 'u':
noise = np.random.uniform(-1., 1., size=(num_samples,) + x.shape[1:])
elif kind == 'n':
noise = np.random.normal(0., 1., size=(num_samples,) + x.shape[1:])
elif kind == 's':
noise = np.random.uniform(-1., 1., size=(num_samples,) + x.shape[1:])
noise = np.sign(noise)
x_noisy = x + noise * eps
if clip:
x_noisy = x_noisy.clip(clip_min, clip_max)
elif backend == 'tf':
shape = (num_samples,) + tuple(s.value for s in x.shape[1:])
if kind == 'u':
noise = tf.random_uniform(shape=shape, minval=-1., maxval=1.)
elif kind == 'n':
noise = tf.random_normal(shape=shape, mean=0., stddev=1.)
elif kind == 's':
noise = tf.random_uniform(shape=shape, minval=-1., maxval=1.)
noise = tf.sign(noise)
x_noisy = x + noise * eps
if clip:
x_noisy = tf.clip_by_value(x_noisy, clip_min, clip_max)
elif backend == 'th':
if kind == 'u':
noise = x.new_zeros((num_samples,) + x.shape[1:]).uniform_(-1., 1.)
elif kind == 'n':
noise = x.new_zeros((num_samples,) + x.shape[1:]).normal_(0., 1.)
elif kind == 's':
noise = x.new_zeros((num_samples,) + x.shape[1:]).uniform_(-1., 1.)
noise.sign_()
x_noisy = x + noise * eps
if clip:
x_noisy.clamp_(clip_min, clip_max)
return x_noisy
def attack_pgd(x, x_pred, targeted=False):
x_pgd = get_noise_samples(x, x.shape[0], pgd_eps, clip=True)
for _ in range(pgd_iters):
if backend == 'tf':
x_grads = sess.run(pgd_gradients, {x_ph: x_pgd, y_ph: x_pred})
else:
x_th = to_th(x_pgd).requires_grad_(True)
x_grads = to_np(th.autograd.grad(loss_fn(logits_fn_th(x_th), to_th(x_pred, np.long)), [x_th])[0])
x_pgd += pgd_lr * np.sign(x_grads) * (-2. * (targeted - 1/2))
x_pgd = x_pgd.clip(x - pgd_eps, x + pgd_eps)
x_pgd = x_pgd.clip(clip_min, clip_max)
if debug:
break
return x_pgd
def get_latent_and_pred(x):
if backend == 'tf':
return sess.run([latent_x_tensor, preds_tensor], {x_ph: x})
else:
l, p = map(to_np, latent_and_logits_fn_th(to_th(x)))
return l, p.argmax(-1)
x_preds_clean = []
x_train_pgd = []
x_preds_pgd = []
latent_clean = []
latent_pgd = []
if not load_alignments_dir:
for b in tqdm.trange(n_batches, desc='creating adversarial samples'):
x_batch = x_train[b*batch_size:(b+1)*batch_size]
lc, pc = get_latent_and_pred(x_batch)
x_preds_clean.append(pc)
latent_clean.append(lc)
if not just_detect:
if pgd_train is not None:
x_pgd = pgd_train[b*batch_size:(b+1)*batch_size]
else:
if targeted:
x_pgd = np.stack([attack_pgd(x_batch, np.ones_like(pc) * i, targeted=True) for i in range(nb_classes)], 1)
else:
x_pgd = attack_pgd(x_batch, pc, targeted=False)
x_train_pgd.append(x_pgd)
if targeted:
pps, lps = [], []
for i in range(x_pgd.shape[1]):
lp, pp = get_latent_and_pred(x_pgd[:, i])
pps.append(pp)
lps.append(lp)
x_preds_pgd.append(np.stack(pps, 1))
latent_pgd.append(np.stack(lps, 1))
else:
lp, pp = get_latent_and_pred(x_pgd)
x_preds_pgd.append(pp)
latent_pgd.append(lp)
x_preds_clean, latent_clean = map(np.concatenate, (x_preds_clean, latent_clean))
if not just_detect:
x_train_pgd, x_preds_pgd, latent_pgd = map(np.concatenate, (x_train_pgd, x_preds_pgd, latent_pgd))
valid_idcs = []
if not just_detect:
for i, (pc, pp, y) in enumerate(zip(x_preds_clean, x_preds_pgd, y_train)):
if y == pc and pc != pp:
# if y == pc:
valid_idcs.append(i)
else:
valid_idcs = list(range(len(x_preds_clean)))
logging.info('valid idcs ratio: {}'.format(len(valid_idcs) / len(y_train)))
if targeted:
for i, xpp in enumerate(x_preds_pgd.T):
logging.info('pgd success class {}: {}'.format(i, (xpp == i).mean()))
x_train, y_train, x_preds_clean, latent_clean = (a[valid_idcs] for a in (x_train, y_train, x_preds_clean, latent_clean))
if not just_detect:
x_train_pgd, x_preds_pgd, latent_pgd = (a[valid_idcs] for a in (x_train_pgd, x_preds_pgd, latent_pgd))
if backend == 'tf':
weights = tf.transpose(weights, (1, 0))
weights_np = sess.run(weights)
else:
weights_np = weights.cpu().numpy()
big_memory = weights.shape[0] > 20
logging.info('BIG MEMORY: {}'.format(big_memory))
if not big_memory:
wdiffs = weights[None, :, :] - weights[:, None, :]
wdiffs_np = weights_np[None, :, :] - weights_np[:, None, :]
if backend == 'tf':
# lat_ph = tf.placeholder(tf.float32, [weights.shape[-1]])
# pred_ph = tf.placeholder(tf.int64)
# if big_memory:
# wdiffs_relevant = weights[pred_ph, None] - weights
# else:
# wdiffs_relevant = wdiffs[:, pred_ph]
# lat_diff_tensor = lat_ph[None] - latent_x_tensor
# alignments_tensor = tf.matmul(lat_diff_tensor, wdiffs_relevant, transpose_b=True)
# def _compute_neps_alignments(x, lat, pred, idx_wo_pc, neps):
# x_noisy = get_noise_samples(x[None], num_noise_samples, noise_eps=neps, clip=clip_alignments)
# return sess.run(alignments_tensor, {x_ph: x_noisy, lat_ph: lat, pred_ph: pred})[:, idx_wo_pc]
lat_ph = tf.placeholder(tf.float32, [weights.shape[-1]])
wdiffs_relevant_ph = tf.placeholder(tf.float32, [weights.shape[-1], nb_classes])
lat_diff_tensor = lat_ph[None] - latent_x_tensor
alignments_tensor = tf.matmul(lat_diff_tensor, wdiffs_relevant_ph)
def _compute_neps_alignments(x, lat, pred, idx_wo_pc, neps):
if big_memory:
wdiffs_relevant = weights_np[pred, None] - weights_np
else:
wdiffs_relevant = wdiffs_np[:, pred]
x_noisy = get_noise_samples(x[None], num_noise_samples, noise_eps=neps, clip=clip_alignments)
# return sess.run(alignments_tensor, {x_ph: x_noisy, lat_ph: lat, wdiffs_relevant_ph:wdiffs_relevant.T})[:, idx_wo_pc]
lat_x = sess.run(latent_x_tensor, {x_ph: x_noisy})
lat_diffs = lat[None] - lat_x
return np.matmul(lat_diffs, wdiffs_relevant.T)[:, idx_wo_pc]
else:
def _compute_neps_alignments(x, lat, pred, idx_wo_pc, neps):
x, lat = map(to_th, (x, lat))
if big_memory:
wdiffs_relevant = weights[pred, None] - weights
else:
wdiffs_relevant = wdiffs[:, pred]
x_noisy = get_noise_samples(x[None], num_noise_samples, noise_eps=neps, clip=clip_alignments)
lat_noisy, _ = latent_and_logits_fn_th(x_noisy)
lat_diffs = lat[None] - lat_noisy
return to_np(th.matmul(lat_diffs, wdiffs_relevant.transpose(1, 0)))[:, idx_wo_pc]
if debug_dict is not None:
debug_dict['weights'] = weights_np
debug_dict['wdiffs'] = wdiffs_np
def _compute_alignments(x, lat, pred, source=None, noise_eps=noise_eps_all):
if source is None:
idx_wo_pc = [i for i in range(nb_classes) if i != pred]
assert len(idx_wo_pc) == nb_classes - 1
else:
idx_wo_pc = source
alignments = OrderedDict()
for neps in noise_eps:
alignments[neps] = _compute_neps_alignments(x, lat, pred, idx_wo_pc, neps)
# if debug_dict is not None:
# debug_dict.setdefault('lat', []).append(lat)
# debug_dict.setdefault('lat_noisy', []).append(lat_noisy)
# debug_dict['weights'] = weights
# debug_dict['wdiffs'] = wdiffs
return alignments, idx_wo_pc
def _collect_wdiff_stats(x_set, latent_set, x_preds_set, clean, save_alignments_dir=None, load_alignments_dir=None):
if clean:
wdiff_stats = {(tc, tc, e): [] for tc in range(nb_classes) for e in noise_eps_all}
name = 'clean'
else:
wdiff_stats = {(sc, tc, e): [] for sc in range(nb_classes) for tc in range(nb_classes) for e in noise_eps_all if sc != tc}
name = 'adv'
def _compute_stats_from_values(v, raw=False):
if not v.shape:
return None
v = v.mean(1)
if debug:
v = np.concatenate([v, v*.5, v*1.5])
if clean or not fit_classifier:
if v.shape[0] < 3:
return None
return v.mean(0), v.std(0)
else:
return v
for neps in noise_eps_all:
neps_keys = {k for k in wdiff_stats.keys() if k[-1] == neps}
loading = load_alignments_dir
if loading:
for k in neps_keys:
fn = 'alignments_{}_{}.npy'.format(name, str(k))
load_fn = os.path.join(load_alignments_dir, fn)
if not os.path.exists(load_fn):
loading = False
break
v = np.load(load_fn)
wdiff_stats[k] = _compute_stats_from_values(v)
logging.info('loading alignments from {} for {}'.format(load_alignments_dir, neps))
if not loading:
for x, lc, pc, pcc in tqdm.tqdm(zip(x_set, latent_set, x_preds_set, x_preds_clean), total=len(x_set), desc='collecting stats for {}'.format(neps)):
if len(lc.shape) == 2:
alignments = []
for i, (xi, lci, pci) in enumerate(zip(x, lc, pc)):
if i == pcc:
continue
alignments_i, _ = _compute_alignments(xi, lci, i, source=pcc, noise_eps=[neps])
for e, a in alignments_i.items():
wdiff_stats[(pcc, i, e)].append(a)
else:
alignments, idx_wo_pc = _compute_alignments(x, lc, pc, noise_eps=[neps])
for e, a in alignments.items():
wdiff_stats[(pcc, pc, e)].append(a)
saving = save_alignments_dir and not loading
if saving:
logging.info('saving alignments to {} for {}'.format(save_alignments_dir, neps))
if debug:
some_v = None
for k in neps_keys:
some_v = some_v or wdiff_stats[k]
for k in neps_keys:
wdiff_stats[k] = wdiff_stats[k] or some_v
for k in neps_keys:
wdsk = wdiff_stats[k]
if len(wdsk):
wdiff_stats[k] = np.stack(wdsk)
else:
wdiff_stats[k] = np.array(None)
if saving:
fn = 'alignments_{}_{}.npy'.format(name, str(k))
save_fn = os.path.join(save_alignments_dir, fn)
os.makedirs(os.path.dirname(save_fn), exist_ok=True)
wds = wdiff_stats[k]
np.save(save_fn, wds)
wdiff_stats[k] = _compute_stats_from_values(wdiff_stats[k])
return wdiff_stats
save_alignments_dir_clean = os.path.join(save_alignments_dir, 'clean') if save_alignments_dir else None
save_alignments_dir_pgd = os.path.join(save_alignments_dir, 'pgd') if save_alignments_dir else None
load_alignments_dir_clean = os.path.join(load_alignments_dir, 'clean') if load_alignments_dir else None
load_alignments_dir_pgd = os.path.join(load_alignments_dir, 'pgd') if load_alignments_dir else None
if load_alignments_dir:
load_alignments_dir_clean, load_alignments_dir_pgd = map(lambda s: '{}_{}'.format(s, 'clip' if clip_alignments else 'noclip'), (load_alignments_dir_clean, load_alignments_dir_pgd))
if save_alignments_dir:
save_alignments_dir_clean, save_alignments_dir_pgd = map(lambda s: '{}_{}'.format(s, 'clip' if clip_alignments else 'noclip'), (save_alignments_dir_clean, save_alignments_dir_pgd))
wdiff_stats_clean = _collect_wdiff_stats(x_train, latent_clean, x_preds_clean, clean=True, save_alignments_dir=save_alignments_dir_clean, load_alignments_dir=load_alignments_dir_clean)
if not just_detect:
wdiff_stats_pgd = _collect_wdiff_stats(x_train_pgd, latent_pgd, x_preds_pgd, clean=False, save_alignments_dir=save_alignments_dir_pgd, load_alignments_dir=load_alignments_dir_pgd)
if debug_dict is not None and False:
esizes = OrderedDict((k, []) for k in noise_eps_all)
for k, (mc, sc) in wdiff_stats_clean.items():
mp, sp = wdiff_stats_pgd[k]
esizes[k[-1]].append(np.abs(mp - mc) / ((sp + sc) / 2.))
debug_dict['effect_sizes'] = OrderedDict((k, np.array(v)) for k, v in esizes.items())
wdiff_stats_clean_detect = [np.stack([wdiff_stats_clean[(p, p, eps)] for eps in noise_eps_detect]) for p in range(nb_classes)]
wdiff_stats_clean_detect = [s.transpose((1, 0, 2)) if len(s.shape) == 3 else None for s in wdiff_stats_clean_detect]
wdiff_stats_pgd_classify = []
if not just_detect:
for tc in range(nb_classes):
tc_stats = []
for sc in range(nb_classes):
if sc == tc:
continue
sc_stats = [wdiff_stats_pgd[(sc, tc, eps)] for eps in noise_eps]
if sc_stats[0] is None:
tc_stats.append(None)
else:
tc_stats.append(np.stack(sc_stats, 1))
wdiff_stats_pgd_classify.append(tc_stats)
if fit_classifier:
logging.info('fitting classifier')
for tc in tqdm.trange(nb_classes):
tc_X = []
tc_Y = []
idx_wo_tc = [sc for sc in range(nb_classes) if sc != tc]
for i, sc in enumerate(idx_wo_tc):
sc_data = wdiff_stats_pgd_classify[tc][i]
if sc_data is not None:
sc_data = sc_data.reshape(sc_data.shape[0], -1)
for d in sc_data:
tc_X.append(d.ravel())
tc_Y.append(sc)
Y_unq = np.unique(tc_Y)
if len(Y_unq) == 0:
lr = SimpleNamespace(predict=lambda x: np.array(tc))
elif len(Y_unq) == 1:
lr = SimpleNamespace(predict=lambda x: np.array(tc_Y[0]))
else:
tc_X = np.stack(tc_X)
tc_Y = np.array(tc_Y)
lr = LogisticRegression(solver='lbfgs', multi_class='multinomial', max_iter=1000)
lr.fit(tc_X, tc_Y)
wdiff_stats_pgd_classify[tc] = lr
batch = yield
while batch is not None:
batch_latent, batch_pred = get_latent_and_pred(batch)
if debug_dict is not None:
debug_dict.setdefault('batch_pred', []).append(batch_pred)
corrected_pred = []
detection = []
for b, lb, pb in zip(batch, batch_latent, batch_pred):
b_align, idx_wo_pb = _compute_alignments(b, lb, pb)
b_align_det = np.stack([b_align[eps] for eps in noise_eps_detect])
b_align = np.stack([b_align[eps] for eps in noise_eps])
wdsc_det_pb = wdiff_stats_clean_detect[pb]
if wdsc_det_pb is None:
z_hit = False
else:
wdm_det, wds_det = wdsc_det_pb
z_clean = (b_align_det - wdm_det[:, None]) / wds_det[:, None]
z_clean_mean = z_clean.mean(1)
z_cutoff = scipy.stats.norm.ppf(p_ratio_cutoff)
z_hit = z_clean_mean.mean(0).max(-1) > z_cutoff
if not just_detect:
if fit_classifier:
lr = wdiff_stats_pgd_classify[pb]
b_align = b_align.mean(1).reshape((1, -1))
lr_pred = lr.predict(b_align)
else:
wdp = wdiff_stats_pgd_classify[pb]
if wdp is None:
z_pgd_mode = None
else:
wdp_not_none_idcs = [i for i, w in enumerate(wdp) if w is not None]
if len(wdp_not_none_idcs) == 0:
z_pgd_mode = None
else:
wdp = np.stack([wdp[i] for i in wdp_not_none_idcs], 2)
idx_wo_pb_wdp = [idx_wo_pb[i] for i in wdp_not_none_idcs]
ssidx = np.arange(wdp.shape[-2])
wdp = wdp[:, :, ssidx, ssidx]
wdmp, wdsp = wdp
b_align = b_align[:, :, wdp_not_none_idcs]
z_pgd = (b_align - wdmp[:, None]) / wdsp[:, None]
z_pgd_mean = z_pgd.mean(1)
z_pgd_mode = scipy.stats.mode(z_pgd_mean.argmax(-1)).mode[0]
if z_hit:
if not just_detect:
if fit_classifier:
print(lr_pred)
pb = lr_pred.item()
else:
if z_pgd_mode is not None:
pb = idx_wo_pb_wdp[z_pgd_mode]
detection.append(True)
else:
detection.append(False)
if debug_dict is not None:
debug_dict.setdefault('b_align', []).append(b_align)
# debug_dict.setdefault('stats', []).append((wdm_det, wds_det, wdmp, wdsp))
# debug_dict.setdefault('p_ratio', []).append(p_ratio)
# debug_dict.setdefault('p_clean', []).append(p_clean)
# debug_dict.setdefault('p_pgd', []).append(p_pgd)
debug_dict.setdefault('z_clean', []).append(z_clean)
# debug_dict.setdefault('z_conf', []).append(z_conf)
# debug_dict.setdefault('z_pgdm', []).append(z_pgdm)
# debug_dict.setdefault('z_pgd', []).append(z_pgd)
corrected_pred.append(pb)
if debug_dict is not None:
debug_dict.setdefault('detection', []).append(detection)
debug_dict.setdefault('corrected_pred', []).append(corrected_pred)
batch = yield np.stack((corrected_pred, detection), -1)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Some utility functions
'''
import os
import sys
import time
import datetime
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import numpy as np
import torch
def one_hot_tensor(y_batch_tensor, num_classes, device):
y_tensor = torch.cuda.FloatTensor(y_batch_tensor.size(0),
num_classes).fill_(0)
y_tensor[np.arange(len(y_batch_tensor)), y_batch_tensor] = 1.0
return y_tensor
def label_smoothing(y_batch_tensor, num_classes, delta):
y_batch_smooth = (1 - delta - delta / (num_classes - 1)) * \
y_batch_tensor + delta / (num_classes - 1)
return y_batch_smooth
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
class softCrossEntropy(nn.Module):
def __init__(self, reduce=True):
super(softCrossEntropy, self).__init__()
self.reduce = reduce
return
def forward(self, inputs, targets):
"""
:param inputs: predictions
:param targets: target labels in vector form
:return: loss
"""
log_likelihood = -F.log_softmax(inputs, dim=1)
sample_num, class_num = targets.shape
if self.reduce:
loss = torch.sum(torch.mul(log_likelihood, targets)) / sample_num
else:
loss = torch.sum(torch.mul(log_likelihood, targets), 1)
return loss
class CWLoss(nn.Module):
def __init__(self, num_classes, margin=50, reduce=True):
super(CWLoss, self).__init__()
self.num_classes = num_classes
self.margin = margin
self.reduce = reduce
return
def forward(self, logits, targets):
"""
:param inputs: predictions
:param targets: target labels
:return: loss
"""
onehot_targets = one_hot_tensor(targets, self.num_classes,
targets.device)
self_loss = torch.sum(onehot_targets * logits, dim=1)
other_loss = torch.max(
(1 - onehot_targets) * logits - onehot_targets * 1000, dim=1)[0]
loss = -torch.sum(torch.clamp(self_loss - other_loss + self.margin, 0))
if self.reduce:
sample_num = onehot_targets.shape[0]
loss = loss / sample_num
return loss
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from attacks import autopgd
from attacks import pgd
from models import *
from torch.autograd import Variable
import utils
from ftsc_utils import softCrossEntropy
from ftsc_utils import one_hot_tensor
import ot
import pickle
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class Attack_None(nn.Module):
def __init__(self, basic_net, config):
super(Attack_None, self).__init__()
self.train_flag = True if 'train' not in config.keys(
) else config['train']
self.basic_net = basic_net
def forward(self, inputs, targets, attack=None, batch_idx=-1):
if self.train_flag:
self.basic_net.train()
else:
self.basic_net.eval()
result = self.basic_net(inputs)
if isinstance(result, tuple):
outputs, _ = self.basic_net(inputs)
else:
outputs = result
return outputs, None, None
class Attack_PGD(nn.Module):
# Back-propogate
def __init__(self, basic_net, config, attack_net=None):
super(Attack_PGD, self).__init__()
self.basic_net = basic_net
self.attack_net = attack_net
self.rand = config['random_start']
self.step_size = config['step_size']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
self.loss_func = torch.nn.CrossEntropyLoss(
reduction='none') if 'loss_func' not in config.keys(
) else config['loss_func']
self.train_flag = True if 'train' not in config.keys(
) else config['train']
self.box_type = 'white' if 'box_type' not in config.keys(
) else config['box_type']
def forward(self,
inputs,
targets,
attack=True,
targeted_label=-1,
batch_idx=0):
if not attack:
outputs = self.basic_net(inputs)[0]
return outputs, None
if self.box_type == 'white':
# aux_net = pickle.loads(pickle.dumps(self.basic_net))
aux_net = self.basic_net
elif self.box_type == 'black':
assert self.attack_net is not None, "should provide an additional net in black-box case"
aux_net = pickle.loads(pickle.dumps(self.basic_net))
aux_net.eval()
output = aux_net(inputs)
if isinstance(output, tuple):
logits_pred_nat = output[0]
else:
logits_pred_nat = output
targets_prob = F.softmax(logits_pred_nat.float(), dim=1)
num_classes = targets_prob.size(1)
y_tensor_adv = targets
step_sign = 1.0
x = inputs.detach()
if self.rand:
x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
x_org = x.detach()
loss_array = np.zeros((inputs.size(0), self.num_steps))
for i in range(self.num_steps):
x.requires_grad_()
if x.grad is not None:
x.grad.zero_()
if x.grad is not None:
x.grad.data.fill_(0)
aux_net.eval()
output = aux_net(x)
if isinstance(output, tuple):
logits = output[0]
else:
logits = output
loss = self.loss_func(logits, y_tensor_adv)
loss = loss.mean()
aux_net.zero_grad()
loss.backward()
x_adv = x.data + step_sign * self.step_size * torch.sign(
x.grad.data)
x_adv = torch.min(torch.max(x_adv, inputs - self.epsilon),
inputs + self.epsilon)
x_adv = torch.clamp(x_adv, -1.0, 1.0)
x = Variable(x_adv)
if self.train_flag:
self.basic_net.train()
else:
self.basic_net.eval()
output = aux_net(x.detach())
if isinstance(output, tuple):
logits_pert = output[0]
else:
logits_pert = output
return logits_pert, targets_prob.detach(), x.detach()
class Attack_BetterPGD(nn.Module):
# Back-propogate
def __init__(self, basic_net, config, attack_net=None):
super(Attack_BetterPGD, self).__init__()
self.basic_net = basic_net
self.attack_net = attack_net
self.rand = config['random_start']
self.step_size = config['step_size']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
self.loss_func = torch.nn.CrossEntropyLoss(
reduction='none') if 'loss_func' not in config.keys(
) else config['loss_func']
self.train_flag = True if 'train' not in config.keys(
) else config['train']
self.box_type = 'white' if 'box_type' not in config.keys(
) else config['box_type']
def forward(self,
inputs,
targets,
attack=True,
targeted_label=-1,
batch_idx=0):
def net(x):
output = self.basic_net(x)
if isinstance(output, tuple):
return output[0]
else:
return output
if attack:
sign = 1.0 if targeted_label != -1 else -1.0
x_adv = pgd.general_pgd(
loss_fn=lambda x, y: sign * self.loss_func(net(x), y),
is_adversarial_fn=lambda x, y: net(x).argmax(-1) == y
if targeted_label != -1 else net(x).argmax(-1) != y,
x=inputs, y=targets, n_steps=self.num_steps,
step_size=self.step_size,
epsilon=self.epsilon,
norm="linf",
random_start=self.rand
)[0]
else:
x_adv = inputs
logits_pert = net(x_adv)
targets_prob = torch.softmax(logits_pert, -1)
return logits_pert, targets_prob.detach(), x_adv.detach()
class Attack_AutoPGD(nn.Module):
# Back-propogate
def __init__(self, basic_net, config, attack_net=None):
super(Attack_AutoPGD, self).__init__()
self.basic_net = basic_net
self.attack_net = attack_net
self.epsilon = config['epsilon']
self.n_restarts = 0 if "num_restarts" not in config else \
config["num_restarts"]
self.num_steps = config['num_steps']
self.loss_func = "ce" if 'loss_func' not in config.keys(
) else config['loss_func']
self.train_flag = True if 'train' not in config.keys(
) else config['train']
self.box_type = 'white' if 'box_type' not in config.keys(
) else config['box_type']
self.targeted = False if 'targeted' not in config.keys(
) else config['targeted']
self.n_classes = 10 if 'n_classes' not in config.keys(
) else config['n_classes']
def forward(self,
inputs,
targets,
attack=True,
targeted_label=-1,
batch_idx=0):
assert targeted_label == -1
def net(x):
output = self.basic_net(x)
if isinstance(output, tuple):
return output[0]
else:
return output
if attack:
temp = autopgd.auto_pgd(
model=net,
x=inputs, y=targets, n_steps=self.num_steps,
loss=self.loss_func,
epsilon=self.epsilon,
norm="linf",
n_restarts=self.n_restarts,
targeted=self.targeted,
n_averaging_steps=1,
n_classes=self.n_classes
)
x_adv = temp[0]
else:
x_adv = inputs
logits_pert = net(x_adv)
targets_prob = torch.softmax(logits_pert, -1)
return logits_pert, targets_prob.detach(), x_adv.detach()
class Attack_FeaScatter(nn.Module):
def __init__(self, basic_net, config, attack_net=None):
super(Attack_FeaScatter, self).__init__()
self.basic_net = basic_net
self.attack_net = attack_net
self.rand = config['random_start']
self.step_size = config['step_size']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
self.train_flag = True if 'train' not in config.keys(
) else config['train']
self.box_type = 'white' if 'box_type' not in config.keys(
) else config['box_type']
self.ls_factor = 0.1 if 'ls_factor' not in config.keys(
) else config['ls_factor']
def forward(self,
inputs,
targets,
attack=True,
targeted_label=-1,
batch_idx=0):
if not attack:
outputs, _ = self.basic_net(inputs)
return outputs, None
if self.box_type == 'white':
aux_net = pickle.loads(pickle.dumps(self.basic_net))
elif self.box_type == 'black':
assert self.attack_net is not None, "should provide an additional net in black-box case"
aux_net = pickle.loads(pickle.dumps(self.basic_net))
aux_net.eval()
batch_size = inputs.size(0)
m = batch_size
n = batch_size
logits = aux_net(inputs)[0]
num_classes = logits.size(1)
outputs = aux_net(inputs)[0]
targets_prob = F.softmax(outputs.float(), dim=1)
y_tensor_adv = targets
step_sign = 1.0
x = inputs.detach()
x_org = x.detach()
x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
if self.train_flag:
self.basic_net.train()
else:
self.basic_net.eval()
logits_pred_nat, fea_nat = aux_net(inputs)
num_classes = logits_pred_nat.size(1)
y_gt = one_hot_tensor(targets, num_classes, device)
loss_ce = softCrossEntropy()
iter_num = self.num_steps
for i in range(iter_num):
x.requires_grad_()
if x.grad is not None:
x.grad.zero_()
if x.grad is not None:
x.grad.data.fill_(0)
logits_pred, fea = aux_net(x)
ot_loss = ot.sinkhorn_loss_joint_IPOT(1, 0.00, logits_pred_nat,
logits_pred, None, None,
0.01, m, n)
aux_net.zero_grad()
adv_loss = ot_loss
adv_loss.backward(retain_graph=True)
x_adv = x.data + self.step_size * torch.sign(x.grad.data)
x_adv = torch.min(torch.max(x_adv, inputs - self.epsilon),
inputs + self.epsilon)
x_adv = torch.clamp(x_adv, -1.0, 1.0)
x = Variable(x_adv)
logits_pred, fea = self.basic_net(x)
self.basic_net.zero_grad()
y_sm = utils.label_smoothing(y_gt, y_gt.size(1), self.ls_factor)
adv_loss = loss_ce(logits_pred, y_sm.detach())
return logits_pred, adv_loss
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OT using IPOT and Sinkhorn algorithm
"""
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from ftsc_utils import softCrossEntropy
import numpy as np
def sinkhorn_loss_joint_IPOT(alpha, beta, x_feature, y_feature, x_label,
y_label, epsilon, m, n):
C_fea = get_cost_matrix(x_feature, y_feature)
C = C_fea
T = sinkhorn(C, 0.01, 100)
# T = IPOT(C, 1)
batch_size = C.size(0)
cost_ot = torch.sum(T * C)
return cost_ot
def sinkhorn(C, epsilon, niter=50, device='cuda'):
m = C.size(0)
n = C.size(1)
mu = Variable(1. / m * torch.FloatTensor(m).fill_(1).to('cuda'),
requires_grad=False)
nu = Variable(1. / n * torch.FloatTensor(n).fill_(1).to('cuda'),
requires_grad=False)
# Parameters of the Sinkhorn algorithm.
rho = 1 # (.5) **2 # unbalanced transport
tau = -.8 # nesterov-like acceleration
lam = rho / (rho + epsilon) # Update exponent
thresh = 10**(-1) # stopping criterion
# Elementary operations .....................................................................
def ave(u, u1):
"Barycenter subroutine, used by kinetic acceleration through extrapolation."
return tau * u + (1 - tau) * u1
def M(u, v):
"Modified cost for logarithmic updates"
"$M_{ij} = (-c_{ij} + u_i + v_j) / \epsilon$"
return (-C + u.unsqueeze(1) + v.unsqueeze(0)) / epsilon
def lse(A):
"log-sum-exp"
return torch.log(torch.exp(A).sum(1, keepdim=True) +
1e-6) # add 10^-6 to prevent NaN
# Actual Sinkhorn loop ......................................................................
u, v, err = 0. * mu, 0. * nu, 0.
actual_nits = 0 # to check if algorithm terminates because of threshold or max iterations reached
for i in range(niter):
u1 = u # useful to check the update
u = epsilon * (torch.log(mu) - lse(M(u, v)).squeeze()) + u
v = epsilon * (torch.log(nu) - lse(M(u, v).t()).squeeze()) + v
# accelerated unbalanced iterations
# u = ave( u, lam * ( epsilon * ( torch.log(mu) - lse(M(u,v)).squeeze() ) + u ) )
# v = ave( v, lam * ( epsilon * ( torch.log(nu) - lse(M(u,v).t()).squeeze() ) + v ) )
err = (u - u1).abs().sum()
actual_nits += 1
if (err < thresh).cpu().data.numpy():
break
U, V = u, v
pi = torch.exp(M(U, V)) # Transport plan pi = diag(a)*K*diag(b)
pi = pi.to('cuda').float()
return pi # return the transport
def IPOT(cost_matrix, beta=1, device='cuda'):
m = cost_matrix.size(0)
n = cost_matrix.size(1)
sigma = 1.0 / n * torch.ones([n, 1]).to(device)
T = torch.ones([m, n]).to(device)
A = torch.exp(-cost_matrix / beta)
for t in range(50):
# BUG: should be elementwise product, * in numpy
#Q = torch.mm(A, T)
Q = A * T # Hardmard product
for k in range(1):
delta = 1.0 / (m * torch.mm(Q, sigma))
sigma = 1.0 / (n * torch.mm(delta.t(), Q)).t()
#sigma = 1.0 / (n * torch.mv(Q, delta))
tmp = torch.mm(construct_diag(torch.squeeze(delta)), Q)
T = torch.mm(tmp, construct_diag(torch.squeeze(sigma)))
return T
def construct_diag(d):
n = d.size(0)
x = torch.zeros([n, n]).to(d.device)
x[range(n), range(n)] = d.view(-1)
return x
def get_cost_matrix(x_feature, y_feature):
C_fea = cost_matrix_cos(x_feature, y_feature) # Wasserstein cost function
return C_fea
def cost_matrix_cos(x, y, p=2):
# return the m*n sized cost matrix
"Returns the matrix of $|x_i-y_j|^p$."
# un squeeze differently so that the tensors can broadcast
# dim-2 (summed over) is the feature dim
x_col = x.unsqueeze(1)
y_lin = y.unsqueeze(0)
cos = nn.CosineSimilarity(dim=2, eps=1e-6)
c = torch.clamp(1 - cos(x_col, y_lin), min=0)
return c |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Train Adversarially Robust Models with Feature Scattering'''
from __future__ import print_function
import time
import numpy as np
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from torch.autograd.gradcheck import zero_gradients
import copy
from torch.autograd import Variable
from PIL import Image
import os
import argparse
import datetime
from tqdm import tqdm
from models import *
import utils
from utils import softCrossEntropy
from utils import one_hot_tensor
from attack_methods import Attack_FeaScatter
torch.set_printoptions(threshold=10000)
np.set_printoptions(threshold=np.inf)
parser = argparse.ArgumentParser(description='Feature Scatterring Training')
# add type keyword to registries
parser.register('type', 'bool', utils.str2bool)
parser.add_argument('--resume',
'-r',
action='store_true',
help='resume from checkpoint')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--adv_mode',
default='feature_scatter',
type=str,
help='adv_mode (feature_scatter)')
parser.add_argument('--model_dir', type=str, help='model path')
parser.add_argument('--init_model_pass',
default='-1',
type=str,
help='init model pass (-1: from scratch; K: checkpoint-K)')
parser.add_argument('--max_epoch',
default=200,
type=int,
help='max number of epochs')
parser.add_argument('--save_epochs', default=100, type=int, help='save period')
parser.add_argument('--decay_epoch1',
default=60,
type=int,
help='learning rate decay epoch one')
parser.add_argument('--decay_epoch2',
default=90,
type=int,
help='learning rate decay point two')
parser.add_argument('--decay_rate',
default=0.1,
type=float,
help='learning rate decay rate')
parser.add_argument('--batch_size_train',
default=128,
type=int,
help='batch size for training')
parser.add_argument('--momentum',
default=0.9,
type=float,
help='momentum (1-tf.momentum)')
parser.add_argument('--weight_decay',
default=2e-4,
type=float,
help='weight decay')
parser.add_argument('--log_step', default=10, type=int, help='log_step')
# number of classes and image size will be updated below based on the dataset
parser.add_argument('--num_classes', default=10, type=int, help='num classes')
parser.add_argument('--image_size', default=32, type=int, help='image size')
parser.add_argument('--dataset', default='cifar10', type=str,
help='dataset') # concat cascade
args = parser.parse_args()
if args.dataset == 'cifar10':
print('------------cifar10---------')
args.num_classes = 10
args.image_size = 32
elif args.dataset == 'cifar100':
print('----------cifar100---------')
args.num_classes = 100
args.image_size = 32
if args.dataset == 'svhn':
print('------------svhn10---------')
args.num_classes = 10
args.image_size = 32
device = 'cuda' if torch.cuda.is_available() else 'cpu'
start_epoch = 0
# Data
print('==> Preparing data..')
if args.dataset == 'cifar10' or args.dataset == 'cifar100':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
])
elif args.dataset == 'svhn':
transform_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
])
if args.dataset == 'cifar10':
trainset = torchvision.datasets.CIFAR10(root='./data',
train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='./data',
train=False,
download=True,
transform=transform_test)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck')
elif args.dataset == 'cifar100':
trainset = torchvision.datasets.CIFAR100(root='./data',
train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR100(root='./data',
train=False,
download=True,
transform=transform_test)
elif args.dataset == 'svhn':
trainset = torchvision.datasets.SVHN(root='./data',
split='train',
download=True,
transform=transform_train)
testset = torchvision.datasets.SVHN(root='./data',
split='test',
download=True,
transform=transform_test)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size_train,
shuffle=True,
num_workers=2)
print('==> Building model..')
if args.dataset == 'cifar10' or args.dataset == 'cifar100' or args.dataset == 'svhn':
print('---wide resenet-----')
basic_net = WideResNet(depth=28,
num_classes=args.num_classes,
widen_factor=10)
def print_para(net):
for name, param in net.named_parameters():
if param.requires_grad:
print(name)
print(param.data)
break
basic_net = basic_net.to(device)
# config for feature scatter
config_feature_scatter = {
'train': True,
'epsilon': 8.0 / 255 * 2,
'num_steps': 1,
'step_size': 8.0 / 255 * 2,
'random_start': True,
'ls_factor': 0.5,
}
if args.adv_mode.lower() == 'feature_scatter':
print('-----Feature Scatter mode -----')
net = Attack_FeaScatter(basic_net, config_feature_scatter)
else:
print('-----OTHER_ALGO mode -----')
raise NotImplementedError("Please implement this algorithm first!")
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
optimizer = optim.SGD(net.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.resume and args.init_model_pass != '-1':
# Load checkpoint.
print('==> Resuming from checkpoint..')
f_path_latest = os.path.join(args.model_dir, 'latest')
f_path = os.path.join(args.model_dir,
('checkpoint-%s' % args.init_model_pass))
if not os.path.isdir(args.model_dir):
print('train from scratch: no checkpoint directory or file found')
elif args.init_model_pass == 'latest' and os.path.isfile(f_path_latest):
checkpoint = torch.load(f_path_latest)
net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch'] + 1
print('resuming from epoch %s in latest' % start_epoch)
elif os.path.isfile(f_path):
checkpoint = torch.load(f_path)
net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch'] + 1
print('resuming from epoch %s' % (start_epoch - 1))
elif not os.path.isfile(f_path) or not os.path.isfile(f_path_latest):
print('train from scratch: no checkpoint directory or file found')
soft_xent_loss = softCrossEntropy()
def train_fun(epoch, net):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
# update learning rate
if epoch < args.decay_epoch1:
lr = args.lr
elif epoch < args.decay_epoch2:
lr = args.lr * args.decay_rate
else:
lr = args.lr * args.decay_rate * args.decay_rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_acc(outputs, targets):
_, predicted = outputs.max(1)
total = targets.size(0)
correct = predicted.eq(targets).sum().item()
acc = 1.0 * correct / total
return acc
iterator = tqdm(trainloader, ncols=0, leave=False)
for batch_idx, (inputs, targets) in enumerate(iterator):
start_time = time.time()
inputs, targets = inputs.to(device), targets.to(device)
adv_acc = 0
optimizer.zero_grad()
# forward
outputs, loss_fs = net(inputs.detach(), targets)
optimizer.zero_grad()
loss = loss_fs.mean()
loss.backward()
optimizer.step()
train_loss = loss.item()
duration = time.time() - start_time
if batch_idx % args.log_step == 0:
if adv_acc == 0:
adv_acc = get_acc(outputs, targets)
iterator.set_description(str(adv_acc))
nat_outputs, _ = net(inputs, targets, attack=False)
nat_acc = get_acc(nat_outputs, targets)
print(
"epoch %d, step %d, lr %.4f, duration %.2f, training nat acc %.2f, training adv acc %.2f, training adv loss %.4f"
% (epoch, batch_idx, lr, duration, 100 * nat_acc,
100 * adv_acc, train_loss))
if epoch % args.save_epochs == 0 or epoch >= args.max_epoch - 2:
print('Saving..')
f_path = os.path.join(args.model_dir, ('checkpoint-%s' % epoch))
state = {
'net': net.state_dict(),
# 'optimizer': optimizer.state_dict()
}
if not os.path.isdir(args.model_dir):
os.mkdir(args.model_dir)
torch.save(state, f_path)
if epoch >= 0:
print('Saving latest @ epoch %s..' % (epoch))
f_path = os.path.join(args.model_dir, 'latest')
state = {
'net': net.state_dict(),
'epoch': epoch,
'optimizer': optimizer.state_dict()
}
if not os.path.isdir(args.model_dir):
os.mkdir(args.model_dir)
torch.save(state, f_path)
for epoch in range(start_epoch, args.max_epoch):
train_fun(epoch, net)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import inspect
import os
import sys
import time
import warnings
from functools import partial
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from tqdm import tqdm
import active_tests.decision_boundary_binarization
import ftsc_utils as utils
import networks
from attack_methods import Attack_BetterPGD
from attack_methods import Attack_None
from attack_methods import Attack_PGD
from attack_methods import Attack_AutoPGD
from ftsc_utils import CWLoss
from models import *
warnings.simplefilter('once', RuntimeWarning)
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
grandarentdir = os.path.dirname(os.path.dirname(currentdir))
sys.path.insert(0, grandarentdir)
parser = argparse.ArgumentParser(
description='Feature Scattering Adversarial Training')
parser.register('type', 'bool', utils.str2bool)
parser.add_argument('--resume',
'-r',
action='store_true',
help='resume from checkpoint')
parser.add_argument('--binarization-test', action="store_true")
parser.add_argument('--attack', default=True, type='bool', help='attack')
parser.add_argument('--model_dir', type=str, help='model path')
parser.add_argument('--model-path', type=str, help='model path', default=None)
parser.add_argument('--init_model_pass',
default='-1',
type=str,
help='init model pass')
parser.add_argument('--attack_method',
default='pgd',
type=str,
help='adv_mode (natural, pdg or cw)')
parser.add_argument('--attack_method_list', type=str)
parser.add_argument('--log_step', default=7, type=int, help='log_step')
# dataset dependent
parser.add_argument('--num_classes', default=10, type=int, help='num classes')
parser.add_argument('--dataset', default='cifar10', type=str,
help='dataset') # concat cascade
parser.add_argument('--batch_size_test',
default=100,
type=int,
help='batch size for testing')
parser.add_argument('--image_size', default=32, type=int, help='image size')
parser.add_argument('--num_samples_test',
default=-1,
type=int)
parser.add_argument('--n-inner-points',
default=50,
type=int)
parser.add_argument('--n-boundary-points',
default=10,
type=int)
parser.add_argument("--epsilon", type=int, default=8)
parser.add_argument("--more-steps", action="store_true")
parser.add_argument("--sample-from-corners", action="store_true")
args = parser.parse_args()
if args.binarization_test:
assert args.batch_size_test == 1
if args.dataset == 'cifar10':
print('------------cifar10---------')
args.num_classes = 10
args.image_size = 32
elif args.dataset == 'cifar100':
print('----------cifar100---------')
args.num_classes = 100
args.image_size = 32
if args.dataset == 'svhn':
print('------------svhn10---------')
args.num_classes = 10
args.image_size = 32
elif args.dataset == 'mnist':
print('----------mnist---------')
args.num_classes = 10
args.image_size = 28
device = 'cuda' if torch.cuda.is_available() else 'cpu'
start_epoch = 0
# Data
print('==> Preparing data..')
if args.dataset == 'cifar10' or args.dataset == 'cifar100':
transform_test = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
])
elif args.dataset == 'svhn':
transform_test = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # [-1 1]
])
if args.dataset == 'cifar10':
testset = torchvision.datasets.CIFAR10(root='./data',
train=False,
download=True,
transform=transform_test)
elif args.dataset == 'cifar100':
testset = torchvision.datasets.CIFAR100(root='./data',
train=False,
download=True,
transform=transform_test)
elif args.dataset == 'svhn':
testset = torchvision.datasets.SVHN(root='./data',
split='test',
download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset,
batch_size=args.batch_size_test,
shuffle=False,
num_workers=2)
print('==> Building model..')
if args.dataset == 'cifar10' or args.dataset == 'cifar100' or args.dataset == 'svhn':
print('---wide resenet-----')
basic_net = WideResNet(depth=28,
num_classes=args.num_classes,
widen_factor=10)
basic_net = basic_net.to(device)
class ZeroOneOneOneNetwork(nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x, **kwargs):
return self.model((x - 0.5) / 0.5, **kwargs)
if args.binarization_test:
args.num_classes = 2
if args.num_samples_test == -1:
num_samples_test = len(testset)
# configs
config_natural = {'train': False}
config_fgsm = {
'train': False,
'targeted': False,
'epsilon': args.epsilon / 255.0,
'num_steps': 1,
'step_size': args.epsilon / 255.0,
'random_start': True
}
config_pgd = {
'train': False,
'targeted': False,
'epsilon': args.epsilon / 255.0,
'num_steps': 20,
'step_size': args.epsilon / 4.0 / 255.0,
'random_start': True,
'loss_func': torch.nn.CrossEntropyLoss(reduction='none')
}
config_cw = {
'train': False,
'targeted': False,
'epsilon': args.epsilon / 255.0,
'num_steps': 20,
'step_size': args.epsilon / 4.0 / 255.0,
'random_start': True,
'loss_func': CWLoss(args.num_classes)
}
config_auto_pgd_ce = {
'train': False,
'targeted': False,
'epsilon': args.epsilon / 255.0,
'num_steps': 20,
'loss_func': "ce"
}
config_auto_pgd_dlr = {
'train': False,
'targeted': False,
'epsilon': args.epsilon / 255.0,
'num_steps': 20,
'loss_func': "logit-diff"
}
config_auto_pgd_dlr_t = {
**config_auto_pgd_dlr,
"targeted": True,
"n_classes": 10,
}
config_auto_pgd_ce_plus = {
**config_auto_pgd_ce,
"n_restarts": 4
}
config_auto_pgd_dlr_plus = {
**config_auto_pgd_dlr,
"n_restarts": 4
}
class __KwargsSequential(torch.nn.Sequential):
"""
Modification of a torch.nn.Sequential model that allows kwargs in the
forward pass. These will be passed to the first module of the network.
"""
def forward(self, input, **kwargs):
for idx, module in enumerate(self):
if idx == 0:
input = module(input, **kwargs)
else:
input = module(input)
return input
def train_classifier(n_features,
train_loader,
raw_train_loader,
logits,
device,
rescale_logits,
classifier):
del raw_train_loader
x_ = train_loader.dataset.tensors[0]
y_ = train_loader.dataset.tensors[1]
x_original = x_[0]
x_boundary = x_[y_ == 1]
assert len(x_boundary) == 1, "Method only works for a single boundary point"
x_boundary = x_boundary[0]
margin = 0.99999999999
delta = x_boundary - x_original
delta = delta / (torch.dot(delta, delta))
w = delta
b = -torch.dot(x_original, delta) - margin
binary_classifier = torch.nn.Linear(n_features, 2)
binary_classifier.weight.data = torch.stack((-w, w), 0)
binary_classifier.bias.data = torch.stack((-b, b), 0)
binary_classifier = binary_classifier.to(device)
#import pdb; pdb.set_trace()
#for x, y in train_loader:
# x, y = x.to(device), y.to(device)
#
# l = binary_classifier(x)
# p = l.argmax(-1)
# is_correct = p == y
linearized_model = __KwargsSequential(
networks.Lambda(
lambda x, **kwargs: classifier(x, features_only=True, **kwargs)),
binary_classifier)
return linearized_model
if not args.binarization_test:
config_fgsm["epsilon"] *= 2.0
config_pgd["epsilon"] *= 2.0
config_cw["epsilon"] *= 2.0
config_fgsm["step_size"] *= 2.0
config_pgd["step_size"] *= 2.0
config_cw["step_size"] *= 2.0
else:
config_auto_pgd_dlr_t["n_classes"] = 2
print(f"Epsilon: {args.epsilon}")
if args.more_steps:
config_pgd["step_size"] /= 5.0
config_cw["step_size"] /= 5.0
config_pgd["num_steps"] *= 10
config_cw["num_steps"] *= 10
config_auto_pgd_ce["num_steps"] *= 10
config_auto_pgd_dlr["num_steps"] *= 10
print("More & finer steps")
def test_test(net, feature_extractor, config):
from argparse_utils import DecisionBoundaryBinarizationSettings
print("num_samples_test:", args.num_samples_test)
print("test epsilon:", config["epsilon"])
scores_logit_differences_and_validation_accuracies = \
active_tests.decision_boundary_binarization.interior_boundary_discrimination_attack(
feature_extractor,
testloader,
attack_fn=lambda m, l, kwargs: test(0, create_attack(m), l, verbose=False,
inverse_acc=True, return_advs=True, **kwargs),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=config["epsilon"],
norm="linf",
lr=100000,
n_boundary_points=args.n_boundary_points,
n_inner_points=args.n_inner_points,
adversarial_attack_settings=None,
optimizer="sklearn"
),
n_samples=args.num_samples_test,
device=device,
n_samples_evaluation=200,#args.num_samples_test * 10
n_samples_asr_evaluation=200,
# TODO: use the right arguments here again!
# relative_inner_boundary_gap=0.00,
rescale_logits="adaptive",
decision_boundary_closeness=0.9999,
sample_training_data_from_corners=args.sample_from_corners,
#train_classifier_fn=partial(train_classifier, classifier=feature_extractor)
)
print(active_tests.decision_boundary_binarization.format_result(
scores_logit_differences_and_validation_accuracies,
args.num_samples_test))
def test(epoch, net, loader, verbose=True, inverse_acc=False,
return_advs=False):
# net.eval()
test_loss = 0
correct = 0
total = 0
if verbose:
iterator = tqdm(loader, ncols=0, leave=False)
else:
iterator = loader
if return_advs:
x_adv = []
logits_adv = []
else:
x_adv = None
logits_adv = None
for batch_idx, (inputs, targets) in enumerate(iterator):
start_time = time.time()
inputs, targets = inputs.to(device), targets.to(device)
pert_inputs = inputs.detach()
res = net(pert_inputs, targets)
if isinstance(res, tuple):
outputs, _, x_adv_it = res
else:
outputs = res
if return_advs:
x_adv.append(x_adv_it)
logits_adv.append(outputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
duration = time.time() - start_time
_, predicted = outputs.max(1)
batch_size = targets.size(0)
total += batch_size
correct_num = predicted.eq(targets).sum().item()
correct += correct_num
if verbose:
iterator.set_description(
"Accuracy:" + str(predicted.eq(targets).sum().item() / targets.size(0)))
if batch_idx % args.log_step == 0:
print(
"step %d, duration %.2f, test acc %.2f, avg-acc %.2f, loss %.2f"
% (batch_idx, duration, 100. * correct_num / batch_size,
100. * correct / total, test_loss / total))
if return_advs:
x_adv = torch.cat(x_adv, 0)
logits_adv = torch.cat(logits_adv, 0)
acc = 100. * correct / total
if inverse_acc:
acc = (100 - acc) / 100.0
if verbose:
print("Robust Accuracy:", acc)
# print('Val acc:', acc)
return acc, (x_adv, logits_adv)
if args.resume and args.init_model_pass != '-1':
# Load checkpoint.
print('==> Resuming from checkpoint..')
if args.model_dir is not None:
f_path_latest = os.path.join(args.model_dir, 'latest')
f_path = os.path.join(args.model_dir,
('checkpoint-%s' % args.init_model_pass))
if args.model_path is not None:
f_path = args.model_path
f_path_latest = args.model_path
if not os.path.isfile(f_path):
print('train from scratch: no checkpoint directory or file found')
elif args.init_model_pass == 'latest' and os.path.isfile(
f_path_latest):
checkpoint = torch.load(f_path_latest, map_location="cpu")
basic_net.load_state_dict(
{(k[len("module.basic_net."):] if k.startswith(
"module.basic_net.") else k): v
for k, v in checkpoint['net'].items()})
start_epoch = checkpoint['epoch']
print('resuming from epoch %s in latest' % start_epoch)
elif os.path.isfile(f_path):
checkpoint = torch.load(f_path)
# net.load_state_dict(checkpoint['net'])
basic_net.load_state_dict(
{(k[len("module.basic_net."):] if k.startswith(
"module.basic_net.") else k): v
for k, v in checkpoint['net'].items()})
start_epoch = checkpoint['epoch']
print('resuming from epoch %s' % start_epoch)
elif not os.path.isfile(f_path) or not os.path.isfile(f_path_latest):
print('train from scratch: no checkpoint directory or file found')
attack_list = args.attack_method_list.split('-')
attack_num = len(attack_list)
for attack_idx in range(attack_num):
args.attack_method = attack_list[attack_idx]
if args.attack_method == 'natural':
print()
print('-----natural non-adv mode -----')
# config is only dummy, not actually used
create_attack = lambda n: Attack_None(n, config_natural)
elif args.attack_method.upper() == 'FGSM':
print()
print('-----FGSM adv mode -----')
create_attack = lambda n: Attack_PGD(n, config_fgsm)
elif args.attack_method.upper() == 'PGD':
print()
print('-----PGD adv mode -----')
create_attack = lambda n: Attack_PGD(n, config_pgd)
elif args.attack_method.upper() == 'CW':
print()
print('-----CW adv mode -----')
create_attack = lambda n: Attack_PGD(n, config_cw)
elif args.attack_method.upper() == 'BETTERPGD':
print()
print('-----Better PGD adv mode -----')
create_attack = lambda n: Attack_BetterPGD(n, config_pgd)
elif args.attack_method.upper() == 'BETTERCW':
print()
print('-----Better CW adv mode -----')
create_attack = lambda n: Attack_BetterPGD(n, config_cw)
elif args.attack_method.upper() == 'AUTOPGDCE':
print()
print('-----Auto PGD (CE) adv mode -----')
create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_ce)
elif args.attack_method.upper() == 'AUTOPGDDLR':
print()
print('-----Auto PGD (DLR) adv mode -----')
create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr)
elif args.attack_method.upper() == 'AUTOPGDDLRT':
print()
print('-----Auto PGD (DLR, targeted) adv mode -----')
create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr_t)
elif args.attack_method.upper() == 'AUTOPGDCE+':
print()
print('-----Auto PGD+ (CE) adv mode -----')
create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_ce_plus)
elif args.attack_method.upper() == 'AUTOPGDDLR+':
print()
print('-----Auto PGD+ (DLR) adv mode -----')
create_attack = lambda n: Attack_AutoPGD(n, config_auto_pgd_dlr_plus)
else:
raise Exception(
'Should be a valid attack method. The specified attack method is: {}'
.format(args.attack_method))
if args.binarization_test or args.attack_method.upper().startswith("AUTOPGD"):
specific_net = ZeroOneOneOneNetwork(basic_net)
specific_net.eval()
net = create_attack(specific_net)
else:
net = create_attack(basic_net)
if device == 'cuda':
net = torch.nn.DataParallel(net)
if "specific_net" in locals():
if not isinstance(specific_net, torch.nn.DataParallel):
specific_net = torch.nn.DataParallel(specific_net)
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
if args.binarization_test:
test_test(net, specific_net, config_pgd)
else:
test(0, net, testloader)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .wideresnet import *
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes,
out_planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self,
nb_layers,
in_planes,
out_planes,
block,
stride,
dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(
block(i == 0 and in_planes or out_planes, out_planes,
i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [
16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor
]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3,
nChannels[0],
kernel_size=3,
stride=1,
padding=1,
bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1,
dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2,
dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2,
dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x, features_only=False, features_and_logits=False):
if features_only: assert not features_and_logits
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
if features_only:
return out
elif features_and_logits:
return out, self.fc(out)
else:
return self.fc(out), out.view(x.size(0), -1)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import numpy as np
def extract_images(file_path):
'''Extract the images into a 4D uint8 numpy array [index, y, x, depth].'''
def _read32(bytestream):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
f = open(file_path, 'rb')
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def extract_labels(file_path):
'''Extract the labels into a 1D uint8 numpy array [index].'''
def _read32(bytestream):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
f = open(file_path, 'rb')
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = np.frombuffer(buf, dtype=np.uint8)
return labels
def load_mnist_data(data_path, is_uint8=False):
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
train_images = extract_images(os.path.join(data_path, TRAIN_IMAGES))
if not is_uint8:
train_images = 2 * (train_images / 255.0 - 0.5)
train_labels = extract_labels(os.path.join(data_path, TRAIN_LABELS))
test_images = extract_images(os.path.join(data_path, TEST_IMAGES))
if not is_uint8:
test_images = 2 * (test_images / 255.0 - 0.5)
test_labels = extract_labels(os.path.join(data_path, TEST_LABELS))
train_data = order_data(train_images, train_labels, 10)
test_data = order_data(test_images, test_labels, 10)
return dict(train_images=train_data['images'],
train_labels=train_data['labels'],
train_count=train_data['count'],
test_images=test_data['images'],
test_labels=test_data['labels'],
test_count=test_data['count'])
# python2
#def unpickle(file):
# import cPickle
# fo = open(file, 'rb')
# dict = cPickle.load(fo)
# fo.close()
# return dict
# python3
def unpickle(file):
import pickle
fo = open(file, 'rb')
dict = pickle.load(fo, encoding='bytes')
fo.close()
return dict
def load_cifar100_data(data_path, is_fine=True, is_uint8=False):
# train
train_set = unpickle(os.path.join(data_path, 'train'))
train_images = train_set[b'data']
train_images = np.dstack([
train_images[:, :1024], train_images[:, 1024:2048],
train_images[:, 2048:]
])
train_images = train_images.reshape([train_images.shape[0], 32, 32, 3])
if not is_uint8:
train_images = train_images / 255.0
train_images = 2.0 * (train_images - 0.5)
if is_fine:
train_labels = np.array(train_set[b'fine_labels'])
else:
train_labels = np.array(train_set[b'coarse_labels'])
# test
test_set = unpickle(os.path.join(data_path, 'test'))
test_images = test_set[b'data']
test_images = np.dstack([
test_images[:, :1024], test_images[:, 1024:2048], test_images[:, 2048:]
])
test_images = test_images.reshape([test_images.shape[0], 32, 32, 3])
if not is_uint8:
test_images = test_images / 255.0
test_images = 2.0 * (test_images - 0.5)
if is_fine:
test_labels = np.array(test_set[b'fine_labels'])
else:
test_labels = np.array(test_set[b'coarse_labels'])
return dict(train_images=train_images,
train_labels=train_labels,
test_images=test_images,
test_labels=test_labels)
def load_cifar10_data(data_path, is_uint8=False):
# train
train_names = [
'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',
'data_batch_5'
]
all_images = []
all_labels = []
for filename in train_names:
train_set = unpickle(os.path.join(data_path, filename))
all_images.append(train_set[b'data'])
all_labels.append(train_set[b'labels'])
train_images = np.concatenate(all_images, axis=0)
train_images = np.dstack([
train_images[:, :1024], train_images[:, 1024:2048],
train_images[:, 2048:]
])
train_images = train_images.reshape([train_images.shape[0], 32, 32, 3])
if not is_uint8:
train_images = train_images / 255.0
train_images = 2.0 * (train_images - 0.5)
train_labels = np.concatenate(all_labels, axis=0)
# test
test_set = unpickle(os.path.join(data_path, 'test_batch'))
test_images = test_set[b'data']
test_images = np.dstack([
test_images[:, :1024], test_images[:, 1024:2048], test_images[:, 2048:]
])
test_images = test_images.reshape([test_images.shape[0], 32, 32, 3])
if not is_uint8:
test_images = test_images / 255.0
test_images = 2.0 * (test_images - 0.5)
test_labels = np.array(test_set[b'labels'])
return dict(train_images=train_images,
train_labels=train_labels,
test_images=test_images,
test_labels=test_labels)
def preprocess_py(images, pad_size, target_size):
'''Preprocess images in python.
Args:
images: 4-D numpy array.
Returns:
preprocessed images, 4-D numpy array.
'''
assert images.shape[1] == images.shape[2], 'can only handle square images!'
image_number = images.shape[0]
image_size = images.shape[1]
# padding, with equal pad size on both sides.
padded_images = np.pad(images, [(0, 0), (pad_size, pad_size),
(pad_size, pad_size), (0, 0)],
mode='constant',
constant_values=0)
# random crop
idx = np.random.random_integers(low=0,
high=2 * pad_size,
size=[image_number, 2])
cropped_images = np.zeros([image_number, target_size, target_size, 3])
for i in np.arange(image_number):
cropped_images[i] = padded_images[i, idx[i, 0]:idx[i, 0] +
target_size, idx[i, 1]:idx[i, 1] +
target_size]
# random flip
if np.random.rand() > 0.5:
cropped_images = cropped_images[:, :, ::-1]
return cropped_images
def one_hot(y, dim):
y_dense = np.zeros([len(y), dim])
y_dense[np.arange(len(y)), y] = 1.0
return y_dense
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import time
import sys
import numpy as np
from bat_utils import *
from wide_resnet import Model
def load_data(FLAGS):
# load data
if FLAGS.dataset == 'SVHN':
raise ValueError("not supported")
elif FLAGS.dataset == 'CIFAR':
if FLAGS.num_classes == 10:
dataset = load_cifar10_data('data/cifar-10-batches-py/')
elif FLAGS.num_classes == 20:
dataset = load_cifar100_data('cifar100_data', is_fine=False)
elif FLAGS.num_classes == 100:
dataset = load_cifar100_data('cifar100_data', is_fine=True)
else:
raise ValueError('Number of classes not valid!')
train_images = dataset['train_images']
train_labels = dataset['train_labels']
test_images = dataset['test_images']
test_labels = dataset['test_labels']
else:
raise ValueError('Dataset not valid!')
return train_images, train_labels, test_images, test_labels
def load_model(FLAGS):
x_pl = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
y_pl = tf.placeholder(tf.int64, shape=[None], name='y')
is_train = tf.placeholder(tf.bool, name='is_train')
model = Model(is_train)
x_transformed = x_pl * 2.0 - 1.0
logits, _ = model.build_model(images=x_transformed, num_classes=FLAGS.num_classes)
prob = tf.nn.softmax(logits)
correct = tf.cast(tf.equal(tf.argmax(logits, axis=1), y_pl), tf.float32)
accuracy = tf.reduce_mean(correct)
saver = tf.train.Saver(max_to_keep=100)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
saver.restore(sess, FLAGS.ckpt_path)
print('restored checkpoint from %s' % FLAGS.ckpt_path)
return sess, (x_pl, y_pl, is_train, logits, accuracy)
def setup_attack(logits, x_pl, y_pl, FLAGS):
xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=y_pl)
# loss for adversarial attack
if FLAGS.loss_type == 'xent':
if FLAGS.targeted:
loss_att = tf.reduce_sum(xent)
else:
loss_att = -tf.reduce_sum(xent)
elif FLAGS.loss_type == 'CW':
y_loss = tf.one_hot(y_pl, FLAGS.num_classes)
self = tf.reduce_sum(y_loss * logits, axis=1)
other = tf.reduce_max((1 - y_loss) * logits - y_loss * 1e4, axis=1)
if FLAGS.targeted:
raise ValueError("not supported")
else:
loss_att = tf.reduce_sum(tf.maximum(self - other + FLAGS.margin, 0))
else:
raise ValueError('loss type not supported!')
grad, = tf.gradients(loss_att, x_pl)
return grad
def adv_attack(sess, grad, x_pl, y_pl, is_train, x_batch, y_batch, FLAGS):
epsilon = FLAGS.epsilon / 255.0
step_size = FLAGS.step_size / 255.0
if not FLAGS.targeted:
y_att = np.copy(y_batch)
else:
raise ValueError("targeted mode not supported")
# randomly perturb the original images
if FLAGS.random_start:
x = x_batch + np.random.uniform(-epsilon, epsilon, x_batch.shape)
else:
x = np.copy(x_batch)
for i in range(FLAGS.num_steps):
logits, grad_val = sess.run(grad,
feed_dict={
x_pl: x,
y_pl: y_att,
is_train: False
})
x = x - step_size * np.sign(grad_val)
x = np.clip(x, x_batch - epsilon, x_batch + epsilon)
x = np.clip(x, 0, 1.0)
return x
def run_eval(sess, grad, x_pl, y_pl, is_train, logits, FLAGS,
test_images, test_labels, num_classes=10):
test_size = test_images.shape[0]
epoch_steps = np.ceil(test_size / FLAGS.batch_size).astype(np.int32)
nat_total = 0.0
adv_total = 0.0
class_nat_total = np.zeros([num_classes])
class_adv_total = np.zeros([num_classes])
nat_cnt_list = np.zeros([test_size])
adv_cnt_list = np.zeros([test_size])
idx = np.random.permutation(test_size)
for step_idx in range(epoch_steps):
start = step_idx * FLAGS.batch_size
end = np.minimum((step_idx + 1) * FLAGS.batch_size,
test_size).astype(np.int32)
x_batch = test_images[idx[start:end]]
y_batch = test_labels[idx[start:end]]
start_time = time.time()
nat_logits = sess.run(logits,
feed_dict={
x_pl: x_batch,
is_train: False
})
nat_cnt = nat_logits.argmax(-1) == y_batch
x_batch_adv = adv_attack(sess, grad, x_pl, y_pl, is_train, x_batch, y_batch, FLAGS)
adv_logits = sess.run(logits,
feed_dict={
x_pl: x_batch_adv,
y_pl: y_batch,
is_train: False
})
adv_cnt = adv_logits.argmax(-1) == y_batch
nat_cnt_list[start:end] = nat_cnt
adv_cnt_list[start:end] = adv_cnt
for ii in range(FLAGS.num_classes):
class_nat_total[ii] += np.sum(nat_cnt[y_batch == ii])
class_adv_total[ii] += np.sum(adv_cnt[y_batch == ii])
nat_total += np.sum(nat_cnt)
adv_total += np.sum(adv_cnt)
duration = time.time() - start_time
print('finished batch %d/%d, duration %.2f, nat acc %.2f, adv acc %.2f' %
(step_idx, epoch_steps, duration, 100 * np.mean(nat_cnt),
100 * np.mean(adv_cnt)))
sys.stdout.flush()
nat_acc = nat_total / test_size
adv_acc = adv_total / test_size
class_nat_total /= (test_size / FLAGS.num_classes)
class_adv_total /= (test_size / FLAGS.num_classes)
print('clean accuracy: %.2f, adv accuracy: %.2f' %
(100 * nat_acc, 100 * adv_acc))
for ii in range(FLAGS.num_classes):
print('class %d, clean %.2f, adv %.2f' %
(ii, 100 * class_nat_total[ii], 100 * class_adv_total[ii]))
def parse_args():
tf.flags.DEFINE_string('ckpt_path', '', '')
tf.flags.DEFINE_string('dataset', 'CIFAR', '')
tf.flags.DEFINE_integer('num_classes', 10, '')
tf.flags.DEFINE_integer('batch_size', 100, '')
tf.flags.DEFINE_string('loss_type', 'xent', '')
tf.flags.DEFINE_float('margin', 50.0, '')
tf.flags.DEFINE_float('epsilon', 8.0, '')
tf.flags.DEFINE_integer('num_steps', 10, '')
tf.flags.DEFINE_float('step_size', 2.0, '')
tf.flags.DEFINE_boolean('random_start', False, '')
tf.flags.DEFINE_boolean('targeted', False, '')
tf.flags.DEFINE_integer('n_samples', 2048, '')
FLAGS = tf.flags.FLAGS
print(FLAGS.flag_values_dict())
return FLAGS
def main():
FLAGS = parse_args()
_, _, test_images, test_labels = load_data(FLAGS)
# normalize to [0,1] value range from [-1, 1] since we put normalization
# inside of the model
test_images = (test_images + 1.0) / 2.0
# subsample test data
if FLAGS.n_samples == -1:
FLAGS.n_samples = len(test_images)
idxs = np.arange(len(test_images))
np.random.shuffle(idxs)
idxs = idxs[:FLAGS.n_samples]
test_images, test_labels = test_images[idxs], test_labels[idxs]
sess, (x_pl, y_pl, is_train, logits, accuracy) = load_model(FLAGS)
attack_grad = setup_attack(logits, x_pl, y_pl, FLAGS)
run_eval(sess, attack_grad, x_pl, y_pl, is_train, logits, FLAGS,
test_images, test_labels)
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import sys
import numpy as np
import tensorflow as tf
from utils import *
from wide_resnet import Model
tf.flags.DEFINE_string('model_dir', '/tmp/adv_train/', '')
tf.flags.DEFINE_string('dataset', '', '')
tf.flags.DEFINE_integer('num_classes', 10, '')
tf.flags.DEFINE_string('restore_ckpt_path', '', '')
tf.flags.DEFINE_integer('start_epoch', 0, '')
tf.flags.DEFINE_integer('max_epoch', 201, '')
tf.flags.DEFINE_integer('decay_epoch1', 100, '')
tf.flags.DEFINE_integer('decay_epoch2', 150, '')
tf.flags.DEFINE_float('decay_rate', 0.1, '')
tf.flags.DEFINE_float('learning_rate', 0.1, '')
tf.flags.DEFINE_float('momentum', 0.9, '')
tf.flags.DEFINE_integer('batch_size', 128, '')
tf.flags.DEFINE_float('weight_decay', 2e-4, '')
tf.flags.DEFINE_float('margin', 50.0, '')
tf.flags.DEFINE_string('loss_type', 'xent', '')
tf.flags.DEFINE_float('epsilon', 8.0, '')
tf.flags.DEFINE_integer('num_steps', 7, '')
tf.flags.DEFINE_float('step_size', 2.0, '')
tf.flags.DEFINE_boolean('random_start', True, '')
tf.flags.DEFINE_boolean('targeted', True, '')
tf.flags.DEFINE_string('target_type', 'MC', '')
tf.flags.DEFINE_boolean('label_adversary', True, '')
tf.flags.DEFINE_float('multi', 9, '')
tf.flags.DEFINE_integer('log_steps', 10, '')
tf.flags.DEFINE_integer('save_epochs', 20, '')
tf.flags.DEFINE_integer('eval_epochs', 10, '')
tf.flags.DEFINE_string('cuda_device', '3', '')
FLAGS = tf.flags.FLAGS
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.cuda_device
print(FLAGS.flag_values_dict())
# load data
if FLAGS.dataset == 'SVHN':
train_data = np.load('svhn_data/train_32x32.npz')
train_images = train_data['arr_0']
train_images = 2.0 * (train_images / 255.0 - 0.5)
train_labels = train_data['arr_1']
test_data = np.load('svhn_data/test_32x32.npz')
test_images = test_data['arr_0']
test_images = 2 * (test_images / 255.0 - 0.5)
test_labels = test_data['arr_1']
elif FLAGS.dataset == 'CIFAR':
if FLAGS.num_classes == 10:
dataset = load_cifar10_data('cifar10_data')
elif FLAGS.num_classes == 20:
dataset = load_cifar100_data('cifar100_data', is_fine=False)
elif FLAGS.num_classes == 100:
dataset = load_cifar100_data('cifar100_data', is_fine=True)
else:
raise ValueError('Number of classes not valid!')
train_images = dataset['train_images']
train_labels = dataset['train_labels']
test_images = dataset['test_images']
test_labels = dataset['test_labels']
else:
raise ValueError('Dataset not valid!')
x_pl = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
y_pl = tf.placeholder(tf.int64, shape=[None], name='y')
y_loss = tf.placeholder(tf.float32,
shape=[None, FLAGS.num_classes],
name='y_loss')
lr = tf.placeholder(tf.float32, name='lr')
is_train = tf.placeholder(tf.bool, name='is_train')
global_step = tf.Variable(0, trainable=False, name='global_step')
model = Model(is_train)
logits, _ = model.build_model(images=x_pl, num_classes=FLAGS.num_classes)
prob = tf.nn.softmax(logits)
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_loss)
mean_xent = tf.reduce_mean(xent)
total_loss = mean_xent + FLAGS.weight_decay * model.weight_decay_loss
correct = tf.cast(tf.equal(tf.argmax(logits, axis=1), y_pl), tf.float32)
accuracy = tf.reduce_mean(correct)
# loss for adversarial attack
if FLAGS.loss_type == 'xent':
if FLAGS.targeted:
loss_att = tf.reduce_sum(xent)
else:
loss_att = -tf.reduce_sum(xent)
elif FLAGS.loss_type == 'CW':
self = tf.reduce_sum(y_loss * logits, axis=1)
other = tf.reduce_max((1 - y_loss) * logits - y_loss * 1000.0, axis=1)
if FLAGS.targeted:
loss_att = tf.reduce_sum(tf.maximum(other - self + FLAGS.margin, 0))
else:
loss_att = tf.reduce_sum(tf.maximum(self - other + FLAGS.margin, 0))
else:
raise ValueError('loss type not supported!')
grad, = tf.gradients(loss_att, x_pl)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
opt = tf.train.MomentumOptimizer(lr, FLAGS.momentum)
grads_and_vars = opt.compute_gradients(total_loss, tf.trainable_variables())
with tf.control_dependencies(update_ops):
train_step = opt.apply_gradients(grads_and_vars, global_step=global_step)
saver = tf.train.Saver(max_to_keep=100)
init_op = tf.global_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(init_op)
if FLAGS.restore_ckpt_path:
saver.restore(sess, os.path.abspath(FLAGS.restore_ckpt_path))
print('Restored checkpoints from %s' % FLAGS.restore_ckpt_path)
def adv_attack(nat_logits,
x_batch,
y_batch,
epsilon=FLAGS.epsilon,
step_size=FLAGS.step_size,
num_steps=FLAGS.num_steps):
epsilon = epsilon / 255.0 * 2.0
step_size = step_size / 255.0 * 2.0
y_batch_dense = one_hot(y_batch, FLAGS.num_classes)
if not FLAGS.targeted: # non-targeted
y_att = np.copy(y_batch)
elif FLAGS.target_type == 'MC': # most confusing target label
nat_logits[np.arange(y_batch.shape[0]), y_batch] = -1e4
y_att = np.argmax(nat_logits, axis=1)
elif FLAGS.target_type == 'RAND': # random target label
y_att = np.zeros_like(y_batch)
for ii in np.arange(y_batch.shape[0]):
tmp = np.ones([FLAGS.num_classes]) / (FLAGS.num_classes - 1)
tmp[y_batch[ii]] = 0.0
y_att[ii] = np.random.choice(FLAGS.num_classes, p=tmp)
elif FLAGS.target_type == 'MOSA': # most one-step adversarial one-step target label
weight = sess.run(tf.get_default_graph().get_tensor_by_name(
'logit/DW:0')).T # num_classes * num_features
dist = euclidean_distances(
weight[y_batch],
weight) + y_batch_dense # batch_size * num_classes
gt_logits = np.sum(nat_logits * y_batch_dense, axis=1)
diff_logits = np.reshape(gt_logits, [-1, 1]) - nat_logits
truncated = np.where(diff_logits > 1e-4, diff_logits, 1e4)
y_att = np.argmin(truncated / dist, axis=1)
elif FLAGS.target_type == 'MIX': # mix of MC and MOSA
weight = sess.run(tf.get_default_graph().get_tensor_by_name(
'logit/DW:0')).T # num_classes * num_features
dist = euclidean_distances(
weight[y_batch],
weight) + y_batch_dense # batch_size * num_classes
gt_logits = np.sum(nat_logits * y_batch_dense, axis=1)
diff_logits = np.reshape(gt_logits, [-1, 1]) - nat_logits
truncated = np.where(diff_logits > 1e-4, diff_logits, 1e4)
y_att_MOSA = np.argmin(truncated / dist, axis=1)
y_att_MC = np.argmax((1.0 - y_batch_dense) * nat_logits, axis=1)
y_att = np.where(
np.argmax(nat_logits, axis=1) == y_batch, y_att_MOSA, y_att_MC)
else:
raise ValueError('Target type not valid!')
y_att_dense = one_hot(y_att, FLAGS.num_classes)
# randomly perturb as initialization
if FLAGS.random_start:
noise = np.random.uniform(-epsilon, epsilon, x_batch.shape)
x = x_batch + noise
else:
x = np.copy(x_batch)
for i in range(num_steps):
grad_val = sess.run(grad,
feed_dict={
x_pl: x,
y_loss: y_att_dense,
is_train: False
})
x = x - step_size * np.sign(grad_val)
x = np.clip(x, x_batch - epsilon, x_batch + epsilon)
x = np.clip(x, -1.0, 1.0)
return x
def adv_labels(nat_prob, y_batch, gamma=0.01):
L = -np.log(nat_prob + 1e-8) # log-likelihood
LL = np.copy(L)
LL[np.arange(y_batch.shape[0]), y_batch] = 1e4
minval = np.min(LL, axis=1)
LL[np.arange(y_batch.shape[0]), y_batch] = -1e4
maxval = np.max(LL, axis=1)
denom = np.sum(L, axis=1) - L[np.arange(y_batch.shape[0]), y_batch] - (
FLAGS.num_classes - 1) * (minval - gamma)
delta = 1 / (1 + FLAGS.multi * (maxval - minval + gamma) / denom)
alpha = delta / denom
y_batch_adv = np.reshape(
alpha, [-1, 1]) * (L - np.reshape(minval, [-1, 1]) + gamma)
y_batch_adv[np.arange(y_batch.shape[0]), y_batch] = 1.0 - delta
return y_batch_adv
# training loop
train_size = train_images.shape[0]
epoch_steps = np.ceil(train_size / FLAGS.batch_size).astype(np.int32)
for epoch_idx in np.arange(FLAGS.start_epoch, FLAGS.max_epoch):
if epoch_idx < FLAGS.decay_epoch1:
lr_val = FLAGS.learning_rate
elif epoch_idx < FLAGS.decay_epoch2:
lr_val = FLAGS.learning_rate * FLAGS.decay_rate
else:
lr_val = FLAGS.learning_rate * FLAGS.decay_rate * FLAGS.decay_rate
# each epoch random shuffle of training images
idx = np.random.permutation(train_size)
for step_idx in np.arange(epoch_steps):
start = step_idx * FLAGS.batch_size
end = np.minimum((step_idx + 1) * FLAGS.batch_size,
train_size).astype(np.int32)
x_batch = preprocess_py(train_images[idx[start:end]], 4, 32)
y_batch = train_labels[idx[start:end]]
y_batch_dense = one_hot(y_batch, FLAGS.num_classes)
start_time = time.time()
nat_prob, nat_logits = sess.run([prob, logits],
feed_dict={
x_pl: x_batch,
is_train: False
})
# generate adversarial images
x_batch_adv = adv_attack(nat_logits, x_batch, y_batch)
# generate adversarial labels
y_batch_adv = adv_labels(nat_prob, y_batch)
# eval accuracy
if step_idx % FLAGS.log_steps == 0:
nat_acc = sess.run(accuracy,
feed_dict={
x_pl: x_batch,
y_pl: y_batch,
is_train: False
})
adv_acc = sess.run(accuracy,
feed_dict={
x_pl: x_batch_adv,
y_pl: y_batch,
is_train: False
})
# training step
if FLAGS.label_adversary:
_, loss_val = sess.run([train_step, total_loss],
feed_dict={
x_pl: x_batch_adv,
y_loss: y_batch_adv,
is_train: True,
lr: lr_val
})
else:
_, loss_val = sess.run(
[train_step, total_loss],
feed_dict={
x_pl: x_batch_adv,
y_loss: y_batch_dense,
is_train: True,
lr: lr_val
})
duration = time.time() - start_time
# print to stdout
if step_idx % FLAGS.log_steps == 0:
print(
"epoch %d, step %d, lr %.4f, duration %.2f, training nat acc %.2f, training adv acc %.2f, training adv loss %.4f"
% (epoch_idx, step_idx, lr_val, duration, 100 * nat_acc,
100 * adv_acc, loss_val))
sys.stdout.flush()
# save checkpoint
if epoch_idx % FLAGS.save_epochs == 0:
saver.save(sess, os.path.join(FLAGS.model_dir, 'checkpoint'),
epoch_idx)
# evaluate
def eval_once():
eval_size = test_images.shape[0]
epoch_steps = np.ceil(eval_size / FLAGS.batch_size).astype(np.int32)
# random shuffle of test images does not affect the result
idx = np.random.permutation(eval_size)
count = 0.0
for step_idx in np.arange(epoch_steps):
start = step_idx * FLAGS.batch_size
end = np.minimum((step_idx + 1) * FLAGS.batch_size,
eval_size).astype(np.int32)
x_batch = test_images[idx[start:end]]
y_batch = test_labels[idx[start:end]]
nat_logits = sess.run(logits,
feed_dict={
x_pl: x_batch,
is_train: False
})
x_batch_adv = adv_attack(nat_logits, x_batch, y_batch)
count += np.sum(
sess.run(correct,
feed_dict={
x_pl: x_batch_adv,
y_pl: y_batch,
is_train: False
}))
acc = count / eval_size
return acc
if epoch_idx % FLAGS.eval_epochs == 0:
print('epoch %d, adv acc %.2f' % (epoch_idx, 100 * eval_once()))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import sys
import numpy as np
import tensorflow as tf
from utils import *
from wide_resnet import Model
tf.flags.DEFINE_string('ckpt_path', '', '')
tf.flags.DEFINE_string('dataset', 'CIFAR', '')
tf.flags.DEFINE_integer('num_classes', 10, '')
tf.flags.DEFINE_integer('batch_size', 100, '')
tf.flags.DEFINE_string('loss_type', 'xent', '')
tf.flags.DEFINE_float('margin', 50.0, '')
tf.flags.DEFINE_float('epsilon', 8.0, '')
tf.flags.DEFINE_integer('num_steps', 10, '')
tf.flags.DEFINE_float('step_size', 2.0, '')
tf.flags.DEFINE_boolean('random_start', False, '')
tf.flags.DEFINE_boolean('targeted', False, '')
tf.flags.DEFINE_string('cuda_device', '3', '')
FLAGS = tf.flags.FLAGS
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.cuda_device
print(FLAGS.flag_values_dict())
# load data
if FLAGS.dataset == 'SVHN':
train_data = np.load('svhn_data/train_32x32.npz')
train_images = train_data['arr_0']
train_images = 2.0 * (train_images / 255.0 - 0.5)
train_labels = train_data['arr_1']
test_data = np.load('svhn_data/test_32x32.npz')
test_images = test_data['arr_0']
test_images = 2 * (test_images / 255.0 - 0.5)
test_labels = test_data['arr_1']
elif FLAGS.dataset == 'CIFAR':
if FLAGS.num_classes == 10:
dataset = load_cifar10_data('cifar10_data')
elif FLAGS.num_classes == 20:
dataset = load_cifar100_data('cifar100_data', is_fine=False)
elif FLAGS.num_classes == 100:
dataset = load_cifar100_data('cifar100_data', is_fine=True)
else:
raise ValueError('Number of classes not valid!')
train_images = dataset['train_images']
train_labels = dataset['train_labels']
test_images = dataset['test_images']
test_labels = dataset['test_labels']
else:
raise ValueError('Dataset not valid!')
x_pl = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
y_pl = tf.placeholder(tf.int64, shape=[None], name='y')
is_train = tf.placeholder(tf.bool, name='is_train')
model = Model(is_train)
logits, _ = model.build_model(images=x_pl, num_classes=FLAGS.num_classes)
prob = tf.nn.softmax(logits)
xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=y_pl)
correct = tf.cast(tf.equal(tf.argmax(logits, axis=1), y_pl), tf.float32)
accuracy = tf.reduce_mean(correct)
# loss for adversarial attack
if FLAGS.loss_type == 'xent':
if FLAGS.targeted:
loss_att = tf.reduce_sum(xent)
else:
loss_att = -tf.reduce_sum(xent)
elif FLAGS.loss_type == 'CW':
y_loss = tf.one_hot(y_pl, FLAGS.num_classes)
self = tf.reduce_sum(y_loss * logits, axis=1)
other = tf.reduce_max((1 - y_loss) * logits - y_loss * 1e4, axis=1)
if FLAGS.targeted:
loss_att = tf.reduce_sum(tf.maximum(other - self + FLAGS.margin, 0))
else:
loss_att = tf.reduce_sum(tf.maximum(self - other + FLAGS.margin, 0))
else:
raise ValueError('loss type not supported!')
grad, = tf.gradients(loss_att, x_pl)
saver = tf.train.Saver(max_to_keep=100)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
saver.restore(sess, FLAGS.ckpt_path)
print('restored checkpoint from %s' % FLAGS.ckpt_path)
def adv_attack(nat_prob, x_batch, y_batch):
epsilon = FLAGS.epsilon / 255.0 * 2
step_size = FLAGS.step_size / 255.0 * 2
if not FLAGS.targeted:
y_att = np.copy(y_batch)
else:
# most confusing targeted attack
nat_prob[np.arange(y_batch.shape[0]), y_batch] = 0.0
y_att = np.argmax(nat_prob, axis=1)
# randomly perturb the original images
if FLAGS.random_start:
x = x_batch + np.random.uniform(-epsilon, epsilon, x_batch.shape)
else:
x = np.copy(x_batch)
for i in range(FLAGS.num_steps):
grad_val = sess.run(grad,
feed_dict={
x_pl: x,
y_pl: y_att,
is_train: False
})
x = x - step_size * np.sign(grad_val)
x = np.clip(x, x_batch - epsilon, x_batch + epsilon)
x = np.clip(x, -1.0, 1.0)
return x
test_size = test_images.shape[0]
epoch_steps = np.ceil(test_size / FLAGS.batch_size).astype(np.int32)
nat_total = 0.0
adv_total = 0.0
class_nat_total = np.zeros([FLAGS.num_classes])
class_adv_total = np.zeros([FLAGS.num_classes])
nat_cnt_list = np.zeros([test_size])
adv_cnt_list = np.zeros([test_size])
idx = np.random.permutation(test_size)
for step_idx in range(epoch_steps):
start = step_idx * FLAGS.batch_size
end = np.minimum((step_idx + 1) * FLAGS.batch_size,
test_size).astype(np.int32)
x_batch = test_images[idx[start:end]]
y_batch = test_labels[idx[start:end]]
start_time = time.time()
nat_cnt, nat_prob = sess.run([correct, prob],
feed_dict={
x_pl: x_batch,
y_pl: y_batch,
is_train: False
})
x_batch_adv = adv_attack(nat_prob, x_batch, y_batch)
adv_cnt = sess.run(correct,
feed_dict={
x_pl: x_batch_adv,
y_pl: y_batch,
is_train: False
})
nat_cnt_list[start:end] = nat_cnt
adv_cnt_list[start:end] = adv_cnt
for ii in range(FLAGS.num_classes):
class_nat_total[ii] += np.sum(nat_cnt[y_batch == ii])
class_adv_total[ii] += np.sum(adv_cnt[y_batch == ii])
nat_total += np.sum(nat_cnt)
adv_total += np.sum(adv_cnt)
duration = time.time() - start_time
print('finished batch %d/%d, duration %.2f, nat acc %.2f, adv acc %.2f' %
(step_idx, epoch_steps, duration, 100 * np.mean(nat_cnt),
100 * np.mean(adv_cnt)))
sys.stdout.flush()
nat_acc = nat_total / test_size
adv_acc = adv_total / test_size
class_nat_total /= (test_size / FLAGS.num_classes)
class_adv_total /= (test_size / FLAGS.num_classes)
print('clean accuracy: %.2f, adv accuracy: %.2f' %
(100 * nat_acc, 100 * adv_acc))
for ii in range(FLAGS.num_classes):
print('class %d, clean %.2f, adv %.2f' %
(ii, 100 * class_nat_total[ii], 100 * class_adv_total[ii]))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# based on https://github.com/tensorflow/models/tree/master/resnet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class Model(object):
"""ResNet model."""
def __init__(self, is_train):
"""ResNet constructor."""
self.is_train = is_train
def add_internal_summaries(self):
pass
def build_model(self, images, num_classes):
"""Build the core model within the graph."""
with tf.variable_scope('input'):
x = self._conv('init_conv', images, 3, 3, 16, self._stride_arr(1))
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
res_func = self._residual
# Uncomment the following codes to use w28-10 wide residual network.
# It is more memory efficient than very deep residual network and has
# comparably good performance.
# https://arxiv.org/pdf/1605.07146v1.pdf
#filters = [16, 16, 32, 64] # WRN-28-1
#filters = [16, 80, 160, 320] # WRN-28-5
filters = [16, 160, 320, 640] # WRN-28-10
#filters = [16, 256, 512, 1024] # WRN-28-16
# Update hps.num_residual_units to 9
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1],
self._stride_arr(strides[0]),
activate_before_residual[0])
for i in range(1, 5):
with tf.variable_scope('unit_1_%d' % i):
x = res_func(x, filters[1], filters[1], self._stride_arr(1),
False)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2],
self._stride_arr(strides[1]),
activate_before_residual[1])
for i in range(1, 5):
with tf.variable_scope('unit_2_%d' % i):
x = res_func(x, filters[2], filters[2], self._stride_arr(1),
False)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3],
self._stride_arr(strides[2]),
activate_before_residual[2])
for i in range(1, 5):
with tf.variable_scope('unit_3_%d' % i):
x = res_func(x, filters[3], filters[3], self._stride_arr(1),
False)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, 0.1)
x = self._global_avg_pool(x)
with tf.variable_scope('logit'):
self.pre_softmax = self._fully_connected(x, num_classes)
self.weight_decay_loss = self._decay()
return self.pre_softmax, x
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(
inputs=x,
decay=.9,
center=True,
scale=True,
activation_fn=None,
updates_collections=tf.GraphKeys.UPDATE_OPS,
is_training=self.is_train)
def _residual(self,
x,
in_filter,
out_filter,
stride,
activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, 0.1)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter - in_filter) // 2,
(out_filter - in_filter) // 2]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32,
initializer=tf.random_normal_initializer(stddev=np.sqrt(2.0 /
n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from functools import partial
from typing import Tuple
import torch
from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import time
import sys
import numpy as np
from bat_utils import *
from wide_resnet import Model
from utils import build_dataloader_from_arrays
from active_tests import decision_boundary_binarization as dbb
from argparse_utils import DecisionBoundaryBinarizationSettings
def load_data(FLAGS):
# load data
if FLAGS.dataset == 'SVHN':
raise ValueError("not supported")
elif FLAGS.dataset == 'CIFAR':
if FLAGS.num_classes == 10:
dataset = load_cifar10_data('data/cifar-10-batches-py/')
elif FLAGS.num_classes == 20:
dataset = load_cifar100_data('cifar100_data', is_fine=False)
elif FLAGS.num_classes == 100:
dataset = load_cifar100_data('cifar100_data', is_fine=True)
else:
raise ValueError('Number of classes not valid!')
train_images = dataset['train_images']
train_labels = dataset['train_labels']
test_images = dataset['test_images']
test_labels = dataset['test_labels']
else:
raise ValueError('Dataset not valid!')
return train_images, train_labels, test_images, test_labels
def load_model(FLAGS):
x_pl = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='x')
y_pl = tf.placeholder(tf.int64, shape=[None], name='y')
is_train = tf.placeholder(tf.bool, name='is_train')
model = Model(is_train)
x_transformed = x_pl * 2.0 - 1.0
fe_logits, features = model.build_model(images=x_transformed,
num_classes=FLAGS.num_classes)
saver = tf.train.Saver(max_to_keep=100)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
saver.restore(sess, FLAGS.ckpt_path)
print('restored checkpoint from %s' % FLAGS.ckpt_path)
# create binary classifier
bro_w = tf.get_variable(
'DW', [features.shape[-1], 2],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
bro_b = tf.get_variable('biases', [2], initializer=tf.constant_initializer())
bro_w_pl = tf.placeholder(tf.float32, shape=[features.shape[-1], 2])
bro_b_pl = tf.placeholder(tf.float32, shape=[2])
bro_w_set_weight = bro_w.assign(bro_w_pl)
bro_b_set_weight = bro_b.assign(bro_b_pl)
logits = tf.nn.xw_plus_b(features, bro_w, bro_b)
prob = tf.nn.softmax(logits)
correct = tf.cast(tf.equal(tf.argmax(logits, axis=1), y_pl), tf.float32)
accuracy = tf.reduce_mean(correct)
return sess, (x_pl, y_pl, is_train, logits, fe_logits, features, accuracy), \
(bro_w_pl, bro_b_pl, bro_w_set_weight, bro_b_set_weight)
def setup_attack(logits, x_pl, y_pl, FLAGS):
xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=y_pl)
# loss for adversarial attack
if FLAGS.loss_type == 'xent':
if FLAGS.targeted:
loss_att = tf.reduce_sum(xent)
else:
loss_att = -tf.reduce_sum(xent)
elif FLAGS.loss_type == 'CW':
y_loss = tf.one_hot(y_pl, FLAGS.num_classes)
self = tf.reduce_sum(y_loss * logits, axis=1)
other = tf.reduce_max((1 - y_loss) * logits - y_loss * 1e4, axis=1)
if FLAGS.targeted:
raise ValueError("not supported")
else:
loss_att = tf.reduce_sum(tf.maximum(self - other + FLAGS.margin, 0))
else:
raise ValueError('loss type not supported!')
grad, = tf.gradients(loss_att, x_pl)
return grad
def adv_attack(sess, grad, x_pl, y_pl, is_train, x_batch, y_batch, FLAGS):
epsilon = FLAGS.epsilon / 255.0
step_size = FLAGS.step_size / 255.0
if not FLAGS.targeted:
y_att = np.copy(y_batch)
else:
raise ValueError("targeted mode not supported")
# randomly perturb the original images
if FLAGS.random_start:
x = x_batch + np.random.uniform(-epsilon, epsilon, x_batch.shape)
else:
x = np.copy(x_batch)
for i in range(FLAGS.num_steps):
grad_val = sess.run(grad,
feed_dict={
x_pl: x,
y_pl: y_att,
is_train: False
})
x = x - step_size * np.sign(grad_val)
x = np.clip(x, x_batch - epsilon, x_batch + epsilon)
x = np.clip(x, 0, 1.0)
return x
def parse_args():
tf.flags.DEFINE_string('ckpt_path', '', '')
tf.flags.DEFINE_string('dataset', 'CIFAR', '')
tf.flags.DEFINE_integer('num_classes', 10, '')
tf.flags.DEFINE_integer('batch_size', 100, '')
tf.flags.DEFINE_string('loss_type', 'xent', '')
tf.flags.DEFINE_float('margin', 50.0, '')
tf.flags.DEFINE_float('epsilon', 8.0, '')
tf.flags.DEFINE_integer('num_steps', 10, '')
tf.flags.DEFINE_float('step_size', 2.0, '')
tf.flags.DEFINE_boolean('random_start', False, '')
tf.flags.DEFINE_boolean('targeted', False, '')
tf.flags.DEFINE_integer('n_samples', 2048, '')
tf.flags.DEFINE_integer('n_boundary_points', 1, '')
tf.flags.DEFINE_integer('n_inner_points', 999, '')
tf.flags.DEFINE_boolean('sample_from_corners', False, '')
FLAGS = tf.flags.FLAGS
# print(FLAGS.flag_values_dict())
return FLAGS
def run_attack(m, l, sess, logits, x_pl, is_train, bro_w_pl, bro_b_pl,
bro_w_assign, bro_b_assign, attack_fn):
linear_layer = m[-1]
del m
sess.run(bro_w_assign, {bro_w_pl: linear_layer.weight.data.numpy().T})
sess.run(bro_b_assign, {bro_b_pl: linear_layer.bias.data.numpy()})
for x, y in l:
x, y = x.numpy(), y.numpy()
x = x.transpose((0, 2, 3, 1))
x_batch_adv = attack_fn(x, y)
adv_logits: np.ndarray = sess.run(logits,
feed_dict={
x_pl: x_batch_adv,
is_train: False
})
is_adv: np.ndarray = adv_logits.argmax(-1) != y
return is_adv, (torch.tensor(x_batch_adv.transpose((0, 3, 1, 2))),
torch.tensor(adv_logits))
def main():
FLAGS = parse_args()
_, _, test_images, test_labels = load_data(FLAGS)
print(FLAGS.flag_values_dict())
# normalize to [0,1] value range from [-1, 1] since we put normalization
# inside of the model
test_images = (test_images + 1.0) / 2.0
# subsample test data
if FLAGS.n_samples == -1:
FLAGS.n_samples = len(test_images)
idxs = np.arange(len(test_images))
np.random.shuffle(idxs)
test_images, test_labels = test_images[idxs], test_labels[idxs]
test_images = test_images.transpose((0, 3, 1, 2))
test_loader = build_dataloader_from_arrays(test_images, test_labels,
FLAGS.batch_size)
sess, (x_pl, y_pl, is_train, logits, fe_logits, features, accuracy), \
(bro_w_pl, bro_b_pl, bro_w_set_weight, bro_b_set_weight) = load_model(FLAGS)
attack_grad = setup_attack(logits, x_pl, y_pl, FLAGS)
attack_fn = lambda x, y: adv_attack(sess, attack_grad, x_pl, y_pl, is_train,
x, y, FLAGS)
def feature_extractor_forward_pass(x, features_and_logits: bool = False,
features_only: bool = False):
if features_and_logits:
assert not features_only, "Only one of the flags must be set."
if features_and_logits:
f, l = sess.run(
(features, fe_logits),
feed_dict={x_pl: x.transpose(0, 2, 3, 1), is_train: False})
return f, l
elif features_only:
return sess.run(
features,
feed_dict={x_pl: x.transpose(0, 2, 3, 1), is_train: False})
else:
return sess.run(
fe_logits,
feed_dict={x_pl: x.transpose(0, 2, 3, 1), is_train: False})
feature_extractor = TensorFlow1ToPyTorchWrapper(
logit_forward_pass=feature_extractor_forward_pass,
logit_forward_and_backward_pass=None,
)
scores_logit_differences_and_validation_accuracies = \
dbb.interior_boundary_discrimination_attack(
feature_extractor,
test_loader,
# m, l, sess, logits, x_pl, is_train, bro_w_pl, bro_b_pl,
# bro_w_assign, bro_b_assign, attack_fn)
attack_fn=lambda m, l, kwargs: partial(run_attack,
sess=sess, logits=logits, x_pl=x_pl, is_train=is_train,
bro_w_pl=bro_w_pl, bro_b_pl=bro_b_pl, bro_w_assign=bro_w_set_weight,
bro_b_assign=bro_b_set_weight,
attack_fn=attack_fn)(m, l),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=FLAGS.epsilon / 255.0,
norm="linf",
lr=10000,
n_boundary_points=FLAGS.n_boundary_points,
n_inner_points=FLAGS.n_inner_points,
adversarial_attack_settings=None,
optimizer="sklearn"
),
n_samples=FLAGS.n_samples,
device="cpu",
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
# rescale_logits="adaptive",
sample_training_data_from_corners=FLAGS.sample_from_corners,
decision_boundary_closeness=0.9999,
# args.num_samples_test * 10
)
print(dbb.format_result(scores_logit_differences_and_validation_accuracies,
FLAGS.n_samples))
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# disable tf logging
# some of these might have to be commented out to use verbose=True in the
# adaptive attack
import os
from cleverhans.utils_keras import KerasModelWrapper
from ml_loo import collect_layers
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
try:
import cPickle as pickle
except:
import _pickle as pickle
import numpy as np
from sklearn.linear_model import LogisticRegressionCV
from tqdm import tqdm
from build_model import ImageModel
from load_data import ImageData, split_data
from attack_model import BIM, CW, FMA
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type = str,
choices = ['cifar10'],
default = 'cifar10')
parser.add_argument('--model_name', type = str,
choices = ['resnet'],
default = 'resnet')
parser.add_argument(
'--attack',
type = str,
choices = ['cw', 'bim', 'bim2', 'fma'],
default = 'cw'
)
parser.add_argument("--batch-size", type=int, default=50)
parser.add_argument("--detector-attack", choices=['cw', 'bim', 'bim2'], default='cw')
parser.add_argument("--n-samples", type=int, default=500)
# default equals value for FPPR5; obtained from train_and_evaluate.py
parser.add_argument("--detector-threshold", type=float, default=0.6151412488088068)
args = parser.parse_args()
dict_a = vars(args)
args.data_model = args.dataset_name + args.model_name
# load detector
with open(f"{args.data_model}/models/ml_loo_{args.detector_attack}_lr.pkl", "rb") as f:
lr = pickle.load(f)
print('Loading dataset...')
dataset = ImageData(args.dataset_name)
model = ImageModel(args.model_name, args.dataset_name, train = False, load = True)
if args.dataset_name == 'cifar10':
X_train, Y_train, X_test, Y_test = split_data(dataset.x_val,
dataset.y_val, model, num_classes = 10,
split_rate = 0.8, sample_per_class = 1000)
else:
raise NotImplementedError()
if args.n_samples == -1:
args.n_samples = len(X_test)
X_test = X_test[:args.n_samples]
Y_test = Y_test[:args.n_samples]
from ml_loo import get_ml_loo_features
if args.model_name == 'resnet':
interested_layers = [14,24,35,45,56,67,70]
else:
raise ValueError()
# only relevant feature used by logistic regression model
stat_names = ['quantile']
reference = - dataset.x_train_mean
get_ml_loo_features_ = lambda x: get_ml_loo_features(model, x, reference, interested_layers, stat_names=stat_names)[:, 0]
detector = lambda x: lr.predict_proba(get_ml_loo_features_(x))[:, 1]
batch_size = args.batch_size
detector_threshold = args.detector_threshold
if args.attack == 'cw':
if args.dataset_name in ['cifar10']:
if args.model_name == 'resnet':
attack_model = CW(
model,
source_samples = batch_size,
binary_search_steps = 5,
cw_learning_rate = 1e-2,
confidence = 0,
attack_iterations = 100,
attack_initial_const = 1e-2,
)
elif args.attack == "bim":
if args.dataset_name in ['cifar10']:
if args.model_name == 'resnet':
attack_model = BIM(
KerasModelWrapper(model.model),
model.sess,
model.input_ph,
model.num_classes,
attack_iterations = 100,
epsilon=0.03,
learning_rate=2.5 * 0.03 / 100,
random_init=True
)
elif args.attack == "bim2":
if args.dataset_name in ['cifar10']:
if args.model_name == 'resnet':
attack_model = BIM(
KerasModelWrapper(model.model),
model.sess,
model.input_ph,
model.num_classes,
attack_iterations = 10,
epsilon=0.03,
learning_rate=2.5 * 0.03 / 10,
random_init=True
)
elif args.attack == "fma":
if args.dataset_name in ['cifar10']:
if args.model_name == 'resnet':
target_samples = []
for y in range(10):
target_samples.append(X_train[np.argmax(Y_train == y)])
target_samples = np.array(target_samples)
attack_model = FMA(
model,
KerasModelWrapper(model.model),
model.sess,
model.input_ph,
model.num_classes,
target_samples=target_samples,
reference=reference,
features=collect_layers(model, interested_layers),
attack_iterations = 500,
epsilon=0.03,
learning_rate=4 * 0.03 / 100,
num_random_features=3100,
random_init=True
)
n_batches = int(np.ceil(len(X_test) / batch_size))
all_is_adv = []
all_is_detected = []
all_is_adv_not_detected = []
pbar = tqdm(range(n_batches))
for i in pbar:
x = X_test[i * batch_size:(i+1) * batch_size]
y = Y_test[i * batch_size:(i+1) * batch_size]
# undo one-hot encoding
y = y.argmax(-1)
x_adv = attack_model.attack(x)
y_adv = model.predict(x_adv, verbose=False, logits=False).argmax(-1)
is_adv = y_adv != y
is_detected = detector(x_adv) > detector_threshold
all_is_adv.append(is_adv)
all_is_detected.append(is_detected)
is_adv_not_detected = np.logical_and(is_adv, ~is_detected)
all_is_adv_not_detected.append(is_adv_not_detected)
pbar.set_description(
f"ASR (w/o detector): {np.mean(np.concatenate(all_is_adv))} "
f"ASR (w/ detector): {np.mean(np.concatenate(all_is_adv_not_detected))}")
all_is_adv = np.concatenate(all_is_adv)
all_is_detected = np.concatenate(all_is_detected)
all_is_adv_not_detected = np.concatenate(all_is_adv_not_detected)
print("ASR (w/o detector):", np.mean(all_is_adv))
print("ASR (w/ detector):", np.mean(all_is_adv_not_detected))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import os
from keras.utils import to_categorical
import math
import time
import numpy as np
import sys
import os
import math
from build_model import ImageModel
from load_data import ImageData, split_data
import pickle as pkl
from keras.models import Model
from scipy.stats import kurtosis, skew
from scipy.spatial.distance import pdist
import time
from sklearn.linear_model import LogisticRegressionCV
from sklearn.metrics import precision_recall_curve, roc_curve, auc, average_precision_score
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import kurtosis, skew
from scipy.spatial.distance import pdist
def con(score):
# score (n, d)
score = score.reshape(len(score), -1)
score_mean = np.mean(score, -1, keepdims = True)
c_score = score - score_mean
c_score = np.abs(c_score)
return np.mean(c_score, axis = -1)
def mad(score):
pd = []
for i in range(len(score)):
d = score[i]
median = np.median(d)
abs_dev = np.abs(d - median)
med_abs_dev = np.median(abs_dev)
pd.append(med_abs_dev)
pd = np.array(pd)
return pd
def med_pdist(score):
pd = []
for i in range(len(score)):
d = score[i]
k = np.median(pdist(d.reshape(-1,1)))
pd.append(k)
pd = np.array(pd)
return pd
def pd(score):
pd = []
for i in range(len(score)):
d = score[i]
k = np.mean(pdist(d.reshape(-1,1)))
pd.append(k)
pd = np.array(pd)
return pd
def neg_kurtosis(score):
k = []
for i in range(len(score)):
di = score[i]
ki = kurtosis(di, nan_policy = 'raise')
k.append(ki)
k = np.array(k)
return -k
def quantile(score):
# score (n, d)
score = score.reshape(len(score), -1)
score_75 = np.percentile(score, 75, -1)
score_25 = np.percentile(score, 25, -1)
score_qt = score_75 - score_25
return score_qt
def calculate(score, stat_name):
if stat_name == 'variance':
results = np.var(score, axis = -1)
elif stat_name == 'std':
results = np.std(score, axis = -1)
elif stat_name == 'pdist':
results = pd(score)
elif stat_name == 'con':
results = con(score)
elif stat_name == 'med_pdist':
results = med_pdist(score)
elif stat_name == 'kurtosis':
results = neg_kurtosis(score)
elif stat_name == 'skewness':
results = -skew(score, axis = -1)
elif stat_name == 'quantile':
results = quantile(score)
elif stat_name == 'mad':
results = mad(score)
#print('results.shape', results.shape)
return results
def collect_layers(model, interested_layers):
if model.framework == 'keras':
outputs = [layer.output for layer in model.layers]
elif model.framework == 'tensorflow':
outputs = model.layers
outputs = [output for i, output in enumerate(outputs) if i in interested_layers]
#print(outputs)
features = []
for output in outputs:
#print(output)
if len(output.get_shape())== 4:
features.append(
tf.reduce_mean(output, axis = (1, 2))
)
else:
features.append(output)
return features
def evaluate_features(x, model, features, batch_size=500):
x = np.array(x)
if len(x.shape) == 3:
_x = np.expand_dims(x, 0)
else:
_x = x
num_iters = int(math.ceil(len(_x) * 1.0 / batch_size))
outs = []
for i in range(num_iters):
x_batch = _x[i * batch_size: (i+1) * batch_size]
out = model.sess.run(features,
feed_dict = {model.input_ph: x_batch})
outs.append(out)
num_layers = len(outs[0])
outputs = []
for l in range(num_layers):
outputs.append(np.concatenate([outs[s][l] for s in range(len(outs))]))
# (3073, 64)
# (3073, 64)
# (3073, 128)
# (3073, 128)
# (3073, 256)
# (3073, 256)
# (3073, 10)
# (3073, 1)
outputs = np.concatenate(outputs, axis = 1)
prob = outputs[:,-model.num_classes:]
label = np.argmax(prob[-1])
#print('outputs', outputs.shape)
#print('prob[:, label]', np.expand_dims(prob[:, label], axis = 1).shape)
outputs = np.concatenate([outputs, np.expand_dims(prob[:, label], axis = 1)], axis = 1)
return outputs
def loo_ml_instance(sample, reference, model, features, batch_size=500):
h,w,c = sample.shape
sample = sample.reshape(-1)
reference = reference.reshape(-1)
data = []
st = time.time()
positions = np.ones((h*w*c + 1, h*w*c), dtype = np.bool)
for i in range(h*w*c):
positions[i, i] = False
data = np.where(positions, sample, reference)
data = data.reshape((-1, h, w, c))
features_val = evaluate_features(data, model, features, batch_size=batch_size) # (3072+1, 906+1)
st1 = time.time()
return features_val
def get_ml_loo_features(model, x, reference, interested_layers, batch_size=3100,
stat_names=['std', 'variance', 'con', 'kurtosis', 'skewness', 'quantile', 'mad']):
# copied from generate_ml_loo_features
features = collect_layers(model, interested_layers)
all_features = []
for sample in x:
features_val = loo_ml_instance(sample, reference, model, features,
batch_size=batch_size)
features_val = np.transpose(features_val)[:,:-1]
single_feature = []
for stat_name in stat_names:
single_feature.append(calculate(features_val, stat_name))
single_feature = np.array(single_feature)
all_features.append(single_feature)
all_features = np.array(all_features)
return all_features
def generate_ml_loo_features(args, data_model, reference, model, x, interested_layers, batch_size=500):
# print(args.attack)
# x = load_examples(data_model, attack)
features = collect_layers(model, interested_layers)
cat = {'original':'ori', 'adv':'adv', 'noisy':'noisy'}
dt = {'train':'train', 'test':'test'}
stat_names = ['std', 'variance', 'con', 'kurtosis', 'skewness', 'quantile', 'mad']
combined_features = {data_type: {} for data_type in ['test', 'train']}
for data_type in ['test', 'train']:
print('data_type', data_type)
for category in ['original', 'adv']:
print('category', category)
all_features = []
for i, sample in enumerate(x[data_type][category]):
print('Generating ML-LOO for {}th sample...'.format(i))
features_val = loo_ml_instance(sample, reference, model, features, batch_size=batch_size)
# (3073, 907)
#print('features_val.shape', features_val.shape)
features_val = np.transpose(features_val)[:,:-1]
#print('features_val.shape', features_val.shape)
# (906, 3073)
single_feature = []
for stat_name in stat_names:
#print('stat_name', stat_name)
single_feature.append(calculate(features_val, stat_name))
single_feature = np.array(single_feature)
#print('single_feature', single_feature.shape)
# (k, 906)
all_features.append(single_feature)
print('all_features', np.array(all_features).shape)
combined_features[data_type][category] = np.array(all_features)
np.save('{}/data/{}_{}_{}_{}_{}.npy'.format(
data_model,
args.data_sample,
dt[data_type],
cat[category],
args.attack,
args.det),
combined_features[data_type][category])
return combined_features
def compute_stat_single_layer(output):
# l2dist = pdist(output)
# l1dist = pdist(output, 'minkowski', p = 1)
# sl2dist = pdist(X, 'seuclidean')
variance = np.sum(np.var(output, axis = 0))
# on = np.sum(np.linalg.norm(output, ord = 1, axis = 0))
con = np.sum(np.linalg.norm(output - np.mean(output, axis = 0), ord = 1, axis = 0))
return variance, con
def load_features(data_model, attacks):
def softmax(x, axis):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis = axis, keepdims = True))
return e_x / e_x.sum(axis=axis, keepdims = True) # only difference
cat = {'original':'', 'adv':'_adv', 'noisy':'_noisy'}
dt = {'train':'_train', 'test':''}
features = {attack: {'train': {}, 'test': {}} for attack in attacks}
normalizer = {}
for attack in attacks:
for data_type in ['train', 'test']:
for category in ['original', 'adv']:
print('Loading data...')
feature = np.load('{}/data/{}{}{}_{}_{}.npy'.format(data_model,'x_val200',
dt[data_type],
cat[category],
attack,
'ml_loo')) # [n, 3073, ...]
n = len(feature)
print('Processing...')
nums = [0,64,64,128,128,256,256,10]
splits = np.cumsum(nums) # [0,64,128,...]
processed = []
for j, s in enumerate(splits):
if j < len(splits) - 1:
separated = feature[:, :-1, s:splits[j+1]]
if j == len(splits) - 2:
separated = softmax(separated, axis = -1)
dist = np.var(separated, axis = 1) # [n, ...]
if data_type == 'train' and category == 'original' and attack == 'linfpgd':
avg_dist = np.mean(dist, axis = 0)
normalizer[j] = avg_dist
# dist /= normalizer[j]
dist = np.sqrt(dist)
# max_dist = np.max(dist, axis = -1)
print(np.mean(dist))
processed.append(dist.T)
processed = np.concatenate(processed, axis = 0).T
# processed = np.concatenate(processed, axis = )
print(processed.shape)
features[attack][data_type][category] = processed
return features
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import logging
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
from cleverhans.attacks import CarliniWagnerL2
from cleverhans.dataset import MNIST
from cleverhans.loss import CrossEntropy
from cleverhans.utils import grid_visual, AccuracyReport
from cleverhans.utils import set_log_level
from cleverhans.utils_tf import model_eval, tf_model_load
from cleverhans.train import train
from cleverhans.utils_keras import KerasModelWrapper
from build_model import ImageModel
from load_data import ImageData, split_data
import pickle as pkl
from attack_model import Attack, CW
import scipy
from ml_loo import generate_ml_loo_features
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type = str,
choices = ['cifar10'],
default = 'cifar10')
parser.add_argument('--model_name', type = str,
choices = ['resnet'],
default = 'resnet')
parser.add_argument('--data_sample', type = str,
choices = ['x_train', 'x_val', 'x_val200'],
default = 'x_val200')
parser.add_argument(
'--attack',
type = str,
choices = ['cw', 'bim', 'bim2'],
default = 'cw'
)
parser.add_argument("--batch-size", default=500, type=int)
parser.add_argument(
'--det',
type = str,
choices = ['ml_loo'],
default = 'ml_loo'
)
args = parser.parse_args()
dict_a = vars(args)
data_model = args.dataset_name + args.model_name
print('Loading dataset...')
dataset = ImageData(args.dataset_name)
model = ImageModel(args.model_name, args.dataset_name, train = False, load = True)
###########################################################
# Loading original, adversarial and noisy samples
###########################################################
print('Loading original, adversarial and noisy samples...')
X_test = np.load('{}/data/{}_{}_{}.npy'.format(data_model, args.data_sample, args.attack, 'ori'))
X_test_adv = np.load('{}/data/{}_adv_{}_{}.npy'.format(data_model, args.data_sample, args.attack, 'ori'))
X_train = np.load('{}/data/{}_train_{}_{}.npy'.format(data_model, args.data_sample, args.attack, 'ori'))
X_train_adv = np.load('{}/data/{}_train_adv_{}_{}.npy'.format(data_model, args.data_sample, args.attack, 'ori'))
Y_test = model.predict(X_test)
print("X_test_adv: ", X_test_adv.shape)
x = {
'train': {
'original': X_train,
'adv': X_train_adv,
},
'test': {
'original': X_test,
'adv': X_test_adv,
},
}
#################################################################
# Extracting features for original, adversarial and noisy samples
#################################################################
cat = {'original':'ori', 'adv':'adv', 'noisy':'noisy'}
dt = {'train':'train', 'test':'test'}
if args.det in ['ml_loo']:
if args.model_name == 'resnet':
interested_layers = [14,24,35,45,56,67,70]
print('extracting layers ', interested_layers)
reference = - dataset.x_train_mean
combined_features = generate_ml_loo_features(args, data_model, reference, model, x, interested_layers, batch_size=args.batch_size)
for data_type in ['test', 'train']:
for category in ['original', 'adv']:
np.save('{}/data/{}_{}_{}_{}_{}.npy'.format(
data_model,
args.data_sample,
dt[data_type],
cat[category],
args.attack,
args.det),
combined_features[data_type][category])
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains a ResNet on the CIFAR10 dataset.
ResNet v1
[a] Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
ResNet v2
[b] Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
import numpy as np
import os
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
def lr_schedule_cifar100(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-4
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
def lr_schedule_sgd(epoch):
decay = epoch >= 122 and 2 or epoch >= 81 and 1 or 0
lr = 1e-1 * 0.1 ** decay
print('Learning rate: ', lr)
return lr
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
# v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
pool_size = int(x.get_shape()[1])
x = AveragePooling2D(pool_size=pool_size)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation=None,
kernel_initializer='he_normal')(y)
outputs = Activation('softmax')(outputs)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model, inputs, outputs
def create_resnet_generator(x_train):
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by std of dataset
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# epsilon for ZCA whitening
zca_epsilon=1e-06,
# randomly rotate images in the range (deg 0 to 180)
rotation_range=0,
# randomly shift images horizontally
width_shift_range=0.1,
# randomly shift images vertically
height_shift_range=0.1,
# set range for random shear
shear_range=0.,
# set range for random zoom
zoom_range=0.,
# set range for random channel shifts
channel_shift_range=0.,
# set mode for filling points outside the input boundaries
fill_mode='nearest',
# value used for fill_mode = "constant"
cval=0.,
# randomly flip images
horizontal_flip=True,
# randomly flip images
vertical_flip=False,
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
return datagen
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
import cPickle as pickle
except:
import _pickle as pickle
import logging
import os
import numpy as np
import time
import os
import math
import pickle as pkl
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from sklearn.preprocessing import scale, MinMaxScaler, StandardScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score
from scipy.stats import pearsonr
from scipy.stats import kurtosis, skew
from scipy.spatial.distance import pdist
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn.metrics import precision_recall_curve, roc_curve, auc, average_precision_score
color_dict = {
'ml_loo': 'red',
}
linestyles = {
'ml_loo': '-',
}
labels = {
'ml_loo': 'ML-LOO',
}
labels_attack = {
'cw': 'C&W',
'bim': 'PGD',
'bim2': 'PGD (fewer steps)',
}
labels_data = {
'cifar10': 'CIFAR-10',
}
labels_model = {
'resnet': 'ResNet',
}
def load_data(args, attack, det, magnitude = 0.0):
x, y = {}, {}
for data_type in ['train', 'test']:
if det == 'ml_loo':
data_ori = np.load('{}/data/{}_{}_{}_{}_{}.npy'.format(
args.data_model,
args.data_sample,
data_type,
'ori',
attack,
'ml_loo'))
data_adv = np.load('{}/data/{}_{}_{}_{}_{}.npy'.format(
args.data_model,
args.data_sample,
data_type,
'adv',
attack,
'ml_loo'))
d = len(data_ori)
print('detect using {}'.format(det))
print('using adv only')
print('data_ori', data_ori.shape)
# Use only IQR features (5th dimension).
data_ori = data_ori[:, [5], :]
data_adv = data_adv[:, [5], :]
data_ori = data_ori.reshape(d, -1)
data_adv = data_adv.reshape(d, -1)
# (200, 1)
d = len(data_ori)
x[data_type] = np.vstack([data_ori, data_adv])
y[data_type] = np.concatenate((np.zeros(d), np.ones(d)))
idx_train = np.random.permutation(len(x['train']))
x['train'] = x['train'][idx_train]
y['train'] = y['train'][idx_train]
return x, y
def train_and_evaluate(args, detections, attack, fpr_upper = 1.0):
plt.figure(figsize = (10, 8))
font = {'weight': 'bold', 'size': 16}
matplotlib.rc('font', **font)
auc_dict = {}
tpr1 = {}
tpr5 = {}
tpr10 = {}
for det in detections:
# Load data
x, y = load_data(args, attack, det)
x_train, y_train = x['train'], y['train']
x_test, y_test = x['test'], y['test']
x_train = x_train.reshape(len(x_train), -1)
x_test = x_test.reshape(len(x_test), -1)
# Train
# TODO: verify this. max_iter=10000 was added by AUTHOR/zimmerrol
lr = LogisticRegressionCV(n_jobs=-1, max_iter=10000).fit(x_train, y_train)
with open(f"{args.data_model}/models/{det}_{attack}_lr.pkl", "wb") as f:
pickle.dump(lr, f)
# Predict
pred = lr.predict_proba(x_test)[:, 1]
pred = lr.predict_proba(x_test)[:, 1]
# Evaluate.
fpr, tpr, thresholds = roc_curve(y_test, pred, drop_intermediate=False)
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
auc_dict[det] = auc(fpr, tpr)
tpr1[det] = tpr[find_nearest(fpr, 0.01)]
tpr5[det] = tpr[find_nearest(fpr, 0.05)]
tpr10[det] = tpr[find_nearest(fpr, 0.10)]
plt.plot(
fpr, tpr,
label="{0} (AUC: {1:0.3f})".format(labels[det], auc(fpr, tpr)),
color=color_dict[det],
linestyle=linestyles[det],
linewidth=4)
print("Threshold for FPR1:", thresholds[find_nearest(fpr, 0.01)])
print("Threshold for FPR5:", thresholds[find_nearest(fpr, 0.05)])
print("Threshold for FPR10:", thresholds[find_nearest(fpr, 0.10)])
# 0.2 was fpr_upper before
plt.xlim([0.0, 0.2])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate', fontsize = 32)
plt.ylabel('True Positive Rate', fontsize = 32)
plt.title('{} ({}, {})'.format(labels_attack[attack], labels_data[args.dataset_name], labels_model[args.model_name]), fontsize = 32)
plt.legend(loc="lower right", fontsize = 22)
plt.show()
figure_name = '{}/figs/mad_transfer_roc_{}_{}_{}.pdf'.format(args.data_model, args.data_sample, attack, attack)
plt.savefig(figure_name)
return auc_dict, tpr1, tpr5, tpr10
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type = str,
choices = ['cifar10'],
default = 'cifar10')
parser.add_argument('--model_name', type = str,
choices = ['resnet'],
default = 'resnet')
parser.add_argument('--data_sample', type = str,
choices = ['x_val200'],
default = 'x_val200')
parser.add_argument(
'--attack',
type = str,
choices = ['cw', 'bim', 'bim2'],
default = 'cw'
)
args = parser.parse_args()
dict_a = vars(args)
args.data_model = args.dataset_name + args.model_name
train_and_evaluate(args, ['ml_loo'], args.attack, fpr_upper = 1.0)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This tutorial shows how to generate adversarial examples
using C&W attack in white-box setting.
The original paper can be found at:
https://nicholas.carlini.com/papers/2017_sp_nnrobustattacks.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
from keras.layers import Input
from cleverhans.attacks import CarliniWagnerL2
from cleverhans.dataset import MNIST
from cleverhans.loss import CrossEntropy
from cleverhans.utils import grid_visual, AccuracyReport
from cleverhans.utils import set_log_level
from cleverhans.utils_tf import model_eval, tf_model_load
from cleverhans.train import train
from cleverhans.utils_keras import KerasModelWrapper
import pickle as pkl
from keras import backend as K
from cleverhans.attacks import CarliniWagnerL2, FastGradientMethod, SaliencyMapMethod, DeepFool, BasicIterativeMethod
class Attack(object):
def __init__(self, model, *args):
self.model = model
def attack(self, x, *args):
raise NotImplementedError
class CleverhansAttackFeedableRunMixin:
def generate_np(self, x_val, feedable_dict={}, **kwargs):
"""
Generate adversarial examples and return them as a NumPy array.
Sub-classes *should not* implement this method unless they must
perform special handling of arguments.
:param x_val: A NumPy array with the original inputs.
:param **kwargs: optional parameters used by child classes.
:return: A NumPy array holding the adversarial examples.
"""
if self.sess is None:
raise ValueError("Cannot use `generate_np` when no `sess` was"
" provided")
packed = self.construct_variables(kwargs)
fixed, feedable, _, hash_key = packed
if hash_key not in self.graphs:
self.construct_graph(fixed, feedable, x_val, hash_key)
else:
# remove the None arguments, they are just left blank
for k in list(feedable.keys()):
if feedable[k] is None:
del feedable[k]
x, new_kwargs, x_adv = self.graphs[hash_key]
feed_dict = {x: x_val}
for name in feedable:
feed_dict[new_kwargs[name]] = feedable[name]
return self.sess.run(x_adv, {**feed_dict, **feedable_dict})
class FeedableRunCarliniWagnerL2(CleverhansAttackFeedableRunMixin, CarliniWagnerL2):
pass
class FeedableRunBasicIterativeMethod(CleverhansAttackFeedableRunMixin, BasicIterativeMethod):
pass
class CW(Attack):
def __init__(self, model, sess, input_ph, num_classes, source_samples = 2, binary_search_steps = 5, cw_learning_rate = 5e-3, confidence = 0, attack_iterations = 1000, attack_initial_const = 1e-2):
super(Attack, self).__init__()
self.model = model
self.sess = sess
self.x = input_ph
self.y = Input(shape=(num_classes,), dtype = 'float32')
abort_early = True
self.cw = FeedableRunCarliniWagnerL2(self.model, sess=self.sess)
self.cw_params = {
'binary_search_steps': binary_search_steps,
"y": None,
'abort_early': True,
'max_iterations': attack_iterations,
'learning_rate': cw_learning_rate ,
'batch_size': source_samples,
'initial_const': attack_initial_const ,
'confidence': confidence,
'clip_min': 0.0,
}
def attack(self, x, y = None, feedable_dict={}):
# print(self.cw_params)
adv = self.cw.generate_np(x, **self.cw_params, feedable_dict=feedable_dict)
if y:
eval_params = {'batch_size': 100}
preds = self.model.get_logits(self.x)
acc = model_eval(self.sess, self.x, self.y, preds, adv, y, args=eval_params)
adv_success = 1 - acc
print('The adversarial success rate is {}.'.format(adv_success))
return adv
# added by AUTHOR according to the description in the paper
class BIM(Attack):
def __init__(self, model, sess, input_ph, num_classes, epsilon=0.03, learning_rate = 5e-3, attack_iterations = 1000, random_init=True):
super(Attack, self).__init__()
self.model = model
self.sess = sess
self.x = input_ph
self.y = Input(shape=(num_classes,), dtype = 'float32')
self.bim = FeedableRunBasicIterativeMethod(self.model, sess=self.sess)
self.bim_params = {
"y": None,
'nb_iter': attack_iterations,
'eps_iter': learning_rate,
'eps': epsilon,
'rand_init': random_init,
'clip_min': 0.0,
'clip_max': 1.0,
}
def attack(self, x, y = None, feedable_dict={}):
# print(self.bim_params)
adv = self.bim.generate_np(x, **self.bim_params, feedable_dict=feedable_dict)
if y:
eval_params = {'batch_size': 100}
preds = self.model.get_logits(self.x)
acc = model_eval(self.sess, self.x, self.y, preds, adv, y, args=eval_params)
adv_success = 1 - acc
print('The adversarial success rate is {}.'.format(adv_success))
return adv
class FMA(Attack):
def __init__(self, raw_model, model, sess, input_ph, num_classes, target_samples,
reference,
features, epsilon=0.03, num_random_features=1000,
learning_rate = 5e-3, attack_iterations = 1000, random_init=True,
verbose=False):
super(Attack, self).__init__()
self.raw_model = raw_model
self.model = model
self.sess = sess
self.reference = reference
self.features = features
assert len(target_samples) == num_classes, (len(target_samples), num_classes)
self.target_samples = target_samples
self.x = input_ph
self.y = Input(shape=(num_classes,), dtype = 'float32')
self.logits = model.get_logits(input_ph)
self.epsilon = epsilon
self.learning_rate = learning_rate
self.attack_iterations = attack_iterations
self.random_init = random_init
self.all_features = tf.concat(features, 1)
num_random_features = min(num_random_features, self.all_features.shape[1].value)
self.num_random_features = num_random_features
self.feature_indices_ph = tf.placeholder(tf.int32, shape=(num_random_features,))
self.target_features_ph = tf.placeholder(tf.float32,
shape=self.all_features.shape)
self.loss = tf.nn.l2_loss(tf.gather(self.all_features -
tf.expand_dims(self.target_features_ph, 0), self.feature_indices_ph, axis=1))
self.gradient = tf.gradients(self.loss, self.x)[0]
self.verbose = verbose
def attack(self, x, y = None, feedable_dict={}):
assert len(x) == 1, "attack can only process a single sample at a time"
# print(self.bim_params)
y = self.sess.run(self.logits, {self.x: x, **feedable_dict}).argmax(-1)[0]
x_target = self.target_samples[(y + 1) % 10]
from ml_loo import loo_ml_instance
target_features = loo_ml_instance(x_target, self.reference, self.raw_model, self.features,
batch_size=3100)[:, :-1]
if not self.random_init:
x_adv = x
else:
x_adv = np.clip(x + np.random.uniform(-self.epsilon, +self.epsilon, x.shape), 0, 1)
for i in range(self.attack_iterations):
feature_indices = np.random.choice(
np.arange(self.all_features.shape[-1].value),
self.num_random_features)
loss_value, logits_value, gradient_value = self.sess.run(
(self.loss, self.logits, self.gradient),
{
self.x: x_adv,
self.target_features_ph: target_features,
self.feature_indices_ph: feature_indices,
**feedable_dict
}
)
gradient_value = np.sign(gradient_value)
x_adv -= self.learning_rate * gradient_value
delta = np.clip(x_adv - x, -self.epsilon, +self.epsilon)
x_adv = np.clip(x + delta, 0, 1)
if self.verbose:
print(loss_value, logits_value)
return x_adv
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
import os
from keras.layers import Flatten, Conv2D, MaxPooling2D, Conv2DTranspose, UpSampling2D, Convolution2D, BatchNormalization, Dense, Dropout, Activation, Embedding, Conv1D, Input, GlobalMaxPooling1D, Multiply, Lambda, Permute, GlobalAveragePooling2D
from keras.preprocessing import sequence
from keras.datasets import imdb, mnist
from keras.callbacks import ModelCheckpoint
from keras.models import Model, Sequential
from keras.objectives import binary_crossentropy
from keras.metrics import binary_accuracy as accuracy
from keras.optimizers import RMSprop
from keras import backend as K
from keras import optimizers
import math
def construct_original_network(dataset_name, model_name, train):
data_model = dataset_name + model_name
if dataset_name == 'mnist':
input_size = 28
num_classes = 10
channel = 1
elif dataset_name == 'cifar10':
# Define the model
input_size = 32
num_classes = 10
channel = 3
elif dataset_name == 'cifar100':
# Define the model
input_size = 32
num_classes = 100
channel = 3
if model_name == 'scnn':
image_ph = Input(shape=(input_size,input_size,channel),dtype = 'float32')
net = Convolution2D(32, kernel_size=(5, 5),padding = 'same',
activation='relu', name = 'conv1')(image_ph)
net = MaxPooling2D(pool_size=(2, 2))(net)
net = Convolution2D(64, (5, 5),padding = 'same',
activation='relu', name = 'conv2')(net)
net = MaxPooling2D(pool_size=(2, 2))(net)
net = Flatten()(net)
net = Dense(1024, activation='relu',name='fc1')(net)
net = Dense(num_classes, activation='softmax',name='fc2')(net)
preds = Activation('softmax')(net)
model = Model(image_ph, preds)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
elif model_name == 'cnn':
image_ph = Input(shape=(input_size,input_size,channel),dtype = 'float32')
net = Convolution2D(48, (3,3), padding='same', input_shape=(32, 32, 3))(image_ph)
net = Activation('relu')(net)
net = Convolution2D(48, (3, 3))(net)
net = Activation('relu')(net)
net = MaxPooling2D(pool_size=(2, 2))(net)
net = Dropout(0.25)(net)
net = Convolution2D(96, (3,3), padding='same')(net)
net = Activation('relu')(net)
net = Convolution2D(96, (3, 3))(net)
net = Activation('relu')(net)
net = MaxPooling2D(pool_size=(2, 2))(net)
net = Dropout(0.25)(net)
net = Convolution2D(192, (3,3), padding='same')(net)
net = Activation('relu')(net)
net = Convolution2D(192, (3, 3))(net)
net = Activation('relu')(net)
net = MaxPooling2D(pool_size=(2, 2))(net)
net = Dropout(0.25)(net)
net = Flatten()(net)
net = Dense(512)(net)
net = Activation('relu')(net)
net = Dropout(0.5)(net)
net = Dense(256)(net)
net = Activation('relu')(net)
net = Dropout(0.5)(net)
net = Dense(num_classes, activation=None)(net)
preds = Activation('softmax')(net)
model = Model(image_ph, preds)
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['acc'])
# Compile the model
elif model_name == 'fc':
image_ph = Input(shape=(input_size,input_size,channel),dtype = 'float32')
net = Flatten()(image_ph)
net = Dense(256)(net)
net = Activation('relu')(net)
net = Dense(256)(net)
net = Activation('relu')(net)
net = Dense(256)(net)
net = Activation('relu')(net)
preds = Dense(num_classes, activation='softmax')(net)
model = Model(image_ph, preds)
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['acc'])
elif model_name == 'resnet':
from resnet import resnet_v2, lr_schedule, lr_schedule_sgd
model, image_ph, preds = resnet_v2(input_shape=(input_size, input_size, channel), depth=20, num_classes = num_classes)
optimizer = optimizers.SGD(lr=0.1, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
elif model_name == 'densenet':
from densenet import DenseNet
nb_filter = -1#12 if dataset_name == 'cifar100' else -1
image_ph = Input(shape=(input_size,input_size,channel),dtype = 'float32')
model, preds = DenseNet((input_size,input_size,channel),
classes=num_classes, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=nb_filter, dropout_rate=0.0, weights=None, input_tensor = image_ph)
optimizer = optimizers.SGD(lr=0.1, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])
grads = []
for c in range(num_classes):
grads.append(tf.gradients(preds[:,c], image_ph))
grads = tf.concat(grads, axis = 0)
approxs = grads * tf.expand_dims(image_ph, 0)
logits = [layer.output for layer in model.layers][-2]
print(logits)
sess = K.get_session()
return image_ph, preds, grads, approxs, sess, model, num_classes, logits
class ImageModel():
def __init__(self, model_name, dataset_name, train = False, load = False, **kwargs):
self.model_name = model_name
self.dataset_name = dataset_name
self.data_model = dataset_name + model_name
self.framework = 'keras'
# if not train:
# K.set_learning_phase(0)
print('Constructing network...')
self.input_ph, self.preds, self.grads, self.approxs, self.sess, self.model, self.num_classes, self.logits = construct_original_network(self.dataset_name, self.model_name, train = train)
self.layers = self.model.layers
self.last_hidden_layer = self.model.layers[-3]
self.y_ph = tf.placeholder(tf.float32, shape = [None, self.num_classes])
if load:
if load == True:
print('Loading model weights...')
self.model.load_weights('{}/models/original.hdf5'.format(self.data_model),
by_name=True)
elif load != False:
self.model.load_weights('{}/models/{}.hdf5'.format(self.data_model, load),
by_name=True)
self.pred_counter = 0
def train(self, dataset):
print('Training...')
if self.dataset_name == 'mnist':
assert self.model_name in ['cnn', 'scnn']
data_model = self.dataset_name + self.model_name
filepath="{}/models/original.hdf5".format(data_model)
checkpoint = ModelCheckpoint(filepath, monitor='val_acc',
verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
history = self.model.fit(dataset.x_train, dataset.y_train,
validation_data=(dataset.x_val, dataset.y_val),
callbacks = callbacks_list,
epochs=100, batch_size=128)
# print(history.history)
elif self.dataset_name in ['cifar10', 'cifar100']:
from keras.preprocessing.image import ImageDataGenerator
if self.model_name == 'cnn':
datagen = ImageDataGenerator(zoom_range=0.2, horizontal_flip=True)
# zoom 0.2
datagen = create_resnet_generator(dataset.x_train)
callbacks_list = []
batch_size = 128
num_epochs = 200
elif self.model_name in ['resnet', 'densenet']:
from resnet import lr_schedule, create_resnet_generator
from keras.callbacks import LearningRateScheduler, ReduceLROnPlateau, EarlyStopping
# zoom 0.2 horizontal_filp always True. change optimizer to sgd, and batch_size to 128.
datagen = ImageDataGenerator(rotation_range=15,
width_shift_range=5./32,
height_shift_range=5./32,
horizontal_flip = True,
zoom_range = 0.2)
datagen.fit(dataset.x_train, seed=0)
from resnet import lr_schedule_sgd
from keras.callbacks import LearningRateScheduler
lr_scheduler = LearningRateScheduler(lr_schedule_sgd)
callbacks_list = [lr_scheduler]
batch_size = 128 if self.dataset_name == 'cifar10' else 64
num_epochs = 200
filepath="{}/models/original.hdf5".format(self.data_model)
checkpoint = ModelCheckpoint(filepath, monitor='val_acc',
verbose=1, save_best_only=True, mode='max')
callbacks_list.append(checkpoint)
model_info = self.model.fit_generator(datagen.flow(dataset.x_train,
dataset.y_train, batch_size = batch_size),
epochs = num_epochs,
steps_per_epoch = dataset.x_train.shape[0] // batch_size,
callbacks = callbacks_list,
validation_data = (dataset.x_val, dataset.y_val),
verbose = 2,
workers = 4)
def adv_train(self, dataset, attack_name):
from cleverhans.attacks import FastGradientMethod, ProjectedGradientDescent
from cleverhans.utils_keras import KerasModelWrapper
from cleverhans.loss import CrossEntropy
from cleverhans.train import train
from cleverhans.utils_tf import model_eval
import time, datetime
if attack_name == 'fgsm' and self.dataset_name == 'mnist':
wrap = KerasModelWrapper(self.model)
params = {'eps': 0.3,
'clip_min': -1.,
'clip_max': 1.}
attacker = FastGradientMethod(wrap, sess=self.sess)
def attack(x):
return attacker.generate(x, **params)
preds_adv = self.model(attack(self.input_ph))
loss = CrossEntropy(wrap, smoothing=0.1, attack=attack)
y_ph = tf.placeholder(tf.float32, shape = (None, self.num_classes))
def evaluate():
# Accuracy of adversarially trained model on legitimate test inputs
eval_params = {'batch_size': 128}
accuracy = model_eval(self.sess, self.input_ph, y_ph, self.preds, dataset.x_val, dataset.y_val, args=eval_params)
print('Test accuracy on legitimate examples: %0.4f' % accuracy)
# Accuracy of the adversarially trained model on adversarial examples
accuracy = model_eval(self.sess, self.input_ph, y_ph, preds_adv, dataset.x_val, dataset.y_val, args=eval_params)
print('Test accuracy on adversarial examples: %0.4f' % accuracy)
# if self.dataset_name == 'mnist':
train_params = {
'nb_epochs': 20,
'batch_size': 128,
'learning_rate': 0.001,
'train_dir': '{}/models'.format(self.data_model),
'filename': 'adv.cpkt'
}
# Perform and evaluate adversarial training
train(self.sess, loss, dataset.x_train, dataset.y_train, evaluate=evaluate,
args=train_params, rng=np.random.RandomState([2017, 8, 30]))
self.model.save_weights('{}/models/{}.hdf5'.format(self.data_model, 'adv-{}'.format(attack_name)))
elif attack_name == 'pgd':
if self.dataset_name == 'mnist':
params = {'eps': 0.1,
# 'clip_min': -1.0,
# 'clip_max': 1.0,
'eps_iter': 0.01,
'nb_iter': 20,
'epochs': 100,
'batch_size': 50,
}
elif self.dataset_name == 'cifar10':
params = {'eps': 8.0 / 255 * 2,
# 'clip_min': -1.0,
# 'clip_max': 1.0,
'eps_iter': 2.0 / 255 * 2,
'nb_iter': 10,#10,#1,
'epochs': 200,
'batch_size': 128,
}
# attacker = ProjectedGradientDescent(wrap, sess=self.sess)
# import foolbox
# from foolbox.attacks import ProjectedGradientDescentAttack
from attack_model import LinfPGDAttack
# Main training loop
# fmodel = foolbox.models.KerasModel(self.model, bounds=(-1, 1), preprocessing=(0, 1))
attacker = LinfPGDAttack(self, params['eps'], k = params['nb_iter'], a = params['eps_iter'], clip_min = dataset.clip_min, clip_max = dataset.clip_max,
random_start = True, loss_func = 'xent')
def attack(x, y):
# return attacker(x, label=label, unpack=True, binary_search=False, epsilon=params['eps'], stepsize=params['eps_iter'],
# iterations=params['nb_iter'],
# random_start=False, return_early=True)
return attacker.attack(x, np.argmax(y, axis = -1))
from resnet import lr_schedule, create_resnet_generator, lr_schedule_sgd
from keras.preprocessing.image import ImageDataGenerator
# datagen = create_resnet_generator(dataset.x_train)
datagen = ImageDataGenerator(rotation_range=15,
width_shift_range=5./32,
height_shift_range=5./32,
horizontal_flip = True,
zoom_range = 0.2)
datagen.fit(dataset.x_train, seed=0)
xent = tf.reduce_mean(K.categorical_crossentropy(self.y_ph, self.preds), name='y_xent')
global_step = tf.train.get_or_create_global_step()
if self.dataset_name == 'cifar10':
momentum = 0.9
weight_decay = 0.0002
costs = []
print('number of trainable variables: ',len(tf.trainable_variables()))
for var in tf.trainable_variables():
if 'kernel' in var.name:
costs.append(tf.nn.l2_loss(var))
penalty = tf.add_n(costs)
loss = xent + weight_decay * penalty
elif self.dataset_name == 'mnist':
loss = xent
if self.dataset_name == 'cifar10':
boundaries = [40000,60000]
values = [0.1,0.01,0.001]
learning_rate = tf.train.piecewise_constant(
tf.cast(global_step, tf.int32),
boundaries,
values)
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
elif self.dataset_name == 'mnist':
boundaries = [50000]
values = [1e-3,1e-4]
learning_rate = tf.train.piecewise_constant(
tf.cast(global_step, tf.int32),
boundaries,
values)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_step = optimizer.minimize(loss, global_step=global_step)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.preds, 1),
tf.argmax(self.y_ph, 1)), tf.float32))
num_output_steps = 100 # 100
num_checkpoint_steps = 1000
batch_size = params['batch_size']
ii = 0
epochs = params['epochs']
for e in range(epochs):
num_batches = 0
for x_batch, y_batch in datagen.flow(dataset.x_train, dataset.y_train, batch_size=batch_size):
# Compute Adversarial Perturbations
start = time.time()
x_batch_adv = attack(x_batch, y_batch)
nat_dict = {self.input_ph: x_batch,
self.y_ph: y_batch}
adv_dict = {self.input_ph: x_batch_adv,
self.y_ph: y_batch}
eval_dict = {self.input_ph: dataset.x_train[:1000],
self.y_ph: dataset.y_train[:1000]}
val_dict = {self.input_ph: dataset.x_val[:1000],
self.y_ph: dataset.y_val[:1000]}
# Output to stdout
if ii % num_output_steps == 0:
nat_acc = self.sess.run(accuracy, feed_dict=eval_dict)
val_acc = self.sess.run(accuracy, feed_dict=val_dict)
adv_acc = self.sess.run(accuracy, feed_dict=adv_dict)
print('Step {} '.format(ii))
print(' training nat accuracy {:.4}%'.format(nat_acc * 100))
print(' validation accuracy {:.4}%'.format(val_acc * 100))
print(' training adv accuracy {:.4}%'.format(adv_acc * 100))
if ii != 0:
print(' {} examples per second'.format(
num_output_steps * batch_size / training_time))
training_time = 0.0
# Write a checkpoint
if ii % num_checkpoint_steps == 0:
self.model.save_weights('{}/models/adv-{}-{}.hdf5'.format(self.data_model, attack_name, ii))
# Actual training step
_ = self.sess.run(train_step, feed_dict=adv_dict)
# print(step)
end = time.time()
training_time = end - start
ii += 1
num_batches += 1
if num_batches >= len(dataset.x_train) / batch_size:
break
self.model.save_weights('{}/models/adv-{}.hdf5'.format(self.data_model, attack_name))
def predict(self, x, verbose=0, batch_size = 500, logits = False):
x = np.array(x)
if len(x.shape) == 3:
_x = np.expand_dims(x, 0)
else:
_x = x
self.pred_counter += len(_x)
if not logits:
prob = self.model.predict(_x, batch_size = batch_size,
verbose = verbose)
else:
num_iters = int(math.ceil(len(_x) * 1.0 / batch_size))
probs = []
for i in range(num_iters):
# print('{} samples predicted'.format(i * batch_size))
x_batch = _x[i * batch_size: (i+1) * batch_size]
prob = self.sess.run(self.logits,
feed_dict = {self.input_ph: x_batch})
probs.append(prob)
prob = np.concatenate(probs, axis = 0)
if len(x.shape) == 3:
prob = prob.reshape(-1)
return prob
def compute_saliency(self, x, saliency_type = 'gradient'):
x = np.array(x)
if self.dataset_name in ['mnist', 'cifar10', 'cifar100']:
batchsize = 128 #if self.data in ['imdbcnn','imdblstm'] else 20
num_iters = int(math.ceil(len(x) * 1.0 / batchsize))
approxs_val = []
for i in range(num_iters):
batch_data = x[i * batchsize: (i+1) * batchsize]
if saliency_type == 'gradient':
approxs = self.grads
elif saliency_type == 'taylor':
approxs = self.approxs
batch_approxs = self.sess.run(approxs, feed_dict = {self.input_ph: batch_data})
# [num_classes, batchsize, h, w, c]
approxs_val.append(batch_approxs)
approxs_val = np.concatenate(approxs_val, axis = 1)
# [num_classes, num_data, h, w, c]
pred_val = self.predict(x)
class_specific_scores = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
# [num_data, h, w, c]
return class_specific_scores
def compute_ig(self, x, reference):
x = np.array(x)
if self.dataset_name in ['mnist', 'cifar10', 'cifar100']:
batchsize = 1
steps = 50
pred_vals = self.predict(x)
class_specific_scores = []
num_iters = int(math.ceil(len(x) * 1.0 / batchsize))
for i in range(num_iters):
batch_data = x[i * batchsize: (i+1) * batchsize]
_, h, w, c = batch_data.shape
step_batch = [batch_data * float(s) / steps + reference * (1 - float(s) / steps) for s in range(1, steps+1)]
# [steps,batchsize, h, w, c]
step_batch = np.reshape(step_batch,
[-1, h, w, c])
# [steps * batchsize, h, w, c]
batch_grads = self.sess.run(self.grads,
feed_dict = {self.input_ph: step_batch})
# [num_classes, steps * batchsize, h, w, c]
num_classes, _, h, w, c = batch_grads.shape
grads_val = np.mean(batch_grads.reshape([num_classes, steps, -1, h, w, c]), axis = 1)
approxs_val = grads_val * (batch_data - reference)
# [num_classes, batchsize, h, w, c]
pred_val = pred_vals[i * batchsize: (i+1) * batchsize]
class_specific_score = approxs_val[np.argmax(pred_val, axis = 1), range(len(pred_val))]
# [batchsize, h, w, c]
# [batchsize, maxlen]
class_specific_scores.append(class_specific_score)
# [num_data, length]
return np.concatenate(class_specific_scores, axis = 0)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
# from model import *
import numpy as np
import tensorflow as tf
import os
import time
import numpy as np
import sys
import os
import tarfile
import zipfile
import keras
import math
from keras.utils import to_categorical
class ImageData():
def __init__(self, dataset_name):
if dataset_name == 'mnist':
from keras.datasets import mnist
(x_train, y_train), (x_val, y_val) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_val = x_val.reshape(x_val.shape[0], 28, 28, 1)
elif dataset_name == 'cifar100':
from keras.datasets import cifar100
(x_train, y_train), (x_val, y_val) = cifar100.load_data()
elif dataset_name == 'cifar10':
from keras.datasets import cifar10
# Load CIFAR10 Dataset
(x_train, y_train), (x_val, y_val) = cifar10.load_data()
x_train = x_train.astype('float32')/255
x_val = x_val.astype('float32')/255
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
x_train_mean = np.zeros(x_train.shape[1:])
x_train -= x_train_mean
x_val -= x_train_mean
self.clip_min = 0.0
self.clip_max = 1.0
self.x_train = x_train#[idx[:1000]]
self.x_val = x_val
self.y_train = y_train#[idx[:1000]]
self.y_val = y_val
self.x_train_mean = x_train_mean
print('self.x_val', self.x_val.shape)
def split_data(x, y, model, num_classes = 10, split_rate = 0.8, sample_per_class = 100):
# print('x.shape', x.shape)
# print('y.shape', y.shape)
np.random.seed(10086)
pred = model.predict(x)
label_pred = np.argmax(pred, axis = 1)
label_truth = np.argmax(y, axis = 1)
correct_idx = label_pred==label_truth
print('Accuracy is {}'.format(np.mean(correct_idx)))
x, y = x[correct_idx], y[correct_idx]
label_pred = label_pred[correct_idx]
# print('x.shape', x.shape)
# print('y.shape', y.shape)
x_train, x_test, y_train, y_test = [], [], [], []
for class_id in range(num_classes):
_x = x[label_pred == class_id][:sample_per_class]
_y = y[label_pred == class_id][:sample_per_class]
l = len(_x)
x_train.append(_x[:int(l * split_rate)])
x_test.append(_x[int(l * split_rate):])
y_train.append(_y[:int(l * split_rate)])
y_test.append(_y[int(l * split_rate):])
x_train = np.concatenate(x_train, axis = 0)
x_test = np.concatenate(x_test, axis = 0)
y_train = np.concatenate(y_train, axis = 0)
y_test = np.concatenate(y_test, axis = 0)
idx_train = np.random.permutation(len(x_train))
idx_test = np.random.permutation(len(x_test))
x_train = x_train[idx_train]
y_train = y_train[idx_train]
x_test = x_test[idx_test]
y_test = y_test[idx_test]
return x_train, y_train, x_test, y_test
if __name__ == '__main__':
import argparse
from build_model import ImageModel
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type = str,
choices = ['cifar10'],
default = 'cifar10')
parser.add_argument('--model_name', type = str,
choices = ['resnet'],
default = 'resnet')
args = parser.parse_args()
dict_a = vars(args)
data_model = args.dataset_name + args.model_name
dataset = ImageData(args.dataset_name)
model = ImageModel(args.model_name, args.dataset_name, train = False, load = True)
x, y = dataset.x_val, dataset.y_val
x_train, y_train, x_test, y_test = split_data(x, y, model, num_classes = 10, split_rate = 0.8)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# disable tf logging
# some of these might have to be commented out to use verbose=True in the
# adaptive attack
import os
import torch
from ml_loo import collect_layers
from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
from utils import build_dataloader_from_arrays
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
try:
import cPickle as pickle
except:
import _pickle as pickle
import cleverhans
from cleverhans.utils_keras import KerasModelWrapper
from active_tests.decision_boundary_binarization import \
interior_boundary_discrimination_attack
import numpy as np
from sklearn.linear_model import LogisticRegressionCV
from tqdm import tqdm
from build_model import ImageModel
from load_data import ImageData, split_data
from attack_model import BIM, CW, FMA
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type = str,
choices = ['cifar10'],
default = 'cifar10')
parser.add_argument('--model_name', type = str,
choices = ['resnet'],
default = 'resnet')
parser.add_argument(
'--attack',
type = str,
choices = ['cw', 'bim', 'fma'],
default = 'cw'
)
parser.add_argument(
'--detector-attack',
type = str,
choices = ['cw', 'bim', 'bim2'],
default = 'cw'
)
parser.add_argument("--n-samples", type=int, default=500)
parser.add_argument("--n-boundary-points", type=int, default=1)
parser.add_argument("--n-inner-points", type=int, default=999)
# default equals value for FPPR5; obtained from train_and_evaluate.py
parser.add_argument("--detector-threshold", type=float, default=0.6151412488088068)
parser.add_argument("--inverted-test", action="store_true")
args = parser.parse_args()
if args.inverted_test:
print("Running inverted test")
else:
print("Running normal/non-inverted test")
dict_a = vars(args)
args.data_model = args.dataset_name + args.model_name
# load detector
with open(f"{args.data_model}/models/ml_loo_{args.detector_attack}_lr.pkl", "rb") as f:
lr = pickle.load(f)
print('Loading dataset...')
dataset = ImageData(args.dataset_name)
model = ImageModel(args.model_name, args.dataset_name, train = False, load = True)
class ModelWrapper(cleverhans.model.Model):
def __init__(self, model, sess, input_ph, weight_shape, bias_shape):
self.weight = tf.placeholder(dtype=tf.float32, shape=weight_shape)
self.bias = tf.placeholder(dtype=tf.float32, shape=bias_shape)
self.model = model
self.sess = sess
self.input_ph = input_ph
self.num_classes = 2
self.first = True
def fprop(self, x):
y = self.model.get_layer(x, "flatten_1")
logits = y @ tf.transpose(self.weight) + tf.reshape(self.bias, (1, -1))
return {"logits": logits, "probs": tf.nn.softmax(logits, -1), "predictions": tf.argmax(logits, -1)}
def get_probs(self, x):
return self.fprop(x)["probs"]
def predict(self, x, weights_feed_dict, logits=True):
if self.first:
self.targets = self.fprop(self.input_ph)
self.first = False
targets = self.targets
if logits:
target = targets["logits"]
else:
target = targets["probs"]
return self.sess.run(target, {self.input_ph: x, **weights_feed_dict})
keras_model = KerasModelWrapper(model.model)
wrapped_model = ModelWrapper(keras_model, model.sess, model.input_ph, (2, 256), (2,))
features = keras_model.get_layer(model.input_ph, "flatten_1")
feature_gradients = tf.gradients(features, model.input_ph)[0]
logits = keras_model.get_logits(model.input_ph)
def run_features(x: np.ndarray, features_only=True, features_and_logits=False):
if features_only:
assert not features_and_logits
targets = features
elif features_and_logits:
targets = (features, logits)
else:
targets = logits
return model.sess.run(targets,
feed_dict={model.input_ph: x.transpose(0, 2, 3, 1)})
def run_features_and_gradients(x: np.ndarray):
return model.sess.run((features, feature_gradients),
feed_dict={model.input_ph: x.transpose(0, 2, 3, 1)})
feature_extractor = TensorFlow1ToPyTorchWrapper(
logit_forward_pass=lambda x, features_only = False,
features_and_logits = False: run_features(x, features_only,
features_and_logits),
logit_forward_and_backward_pass=lambda x: run_features_and_gradients(x)
)
if args.dataset_name == 'cifar10':
X_train, Y_train, X_test, Y_test = split_data(dataset.x_val,
dataset.y_val, model, num_classes = 10,
split_rate = 0.8, sample_per_class = 1000)
else:
raise NotImplementedError()
if args.n_samples == -1:
args.n_samples = len(X_test)
X_test = X_test[:args.n_samples]
Y_test = Y_test[:args.n_samples]
from ml_loo import get_ml_loo_features
if args.model_name == 'resnet':
interested_layers = [14,24,35,45,56,67,70]
else:
raise ValueError()
# only relevant feature used by logistic regression model
stat_names = ['quantile']
reference = - dataset.x_train_mean
get_ml_loo_features_ = lambda x: get_ml_loo_features(model, x, reference, interested_layers, stat_names=stat_names)[:, 0]
detector = lambda x: lr.predict_proba(get_ml_loo_features_(x))[:, 1]
batch_size = 50
detector_threshold = args.detector_threshold
if args.attack == 'cw':
if args.dataset_name in ['cifar10']:
if args.model_name == 'resnet':
attack_model = CW(
wrapped_model,
wrapped_model.sess,
wrapped_model.input_ph,
wrapped_model.num_classes,
source_samples = 1,
binary_search_steps = 5,
cw_learning_rate = 1e-2,
confidence = 0,
attack_iterations = 100,
attack_initial_const = 1e-2,
)
original_attack_model = CW(
keras_model,
wrapped_model.sess,
wrapped_model.input_ph,
model.num_classes,
source_samples = 1,
binary_search_steps = 5,
cw_learning_rate = 1e-2,
confidence = 0,
attack_iterations = 100,
attack_initial_const = 1e-2,
)
elif args.attack == "bim":
if args.dataset_name in ['cifar10']:
if args.model_name == 'resnet':
attack_model = BIM(
wrapped_model,
wrapped_model.sess,
wrapped_model.input_ph,
wrapped_model.num_classes,
attack_iterations = 100,
epsilon=0.03,
learning_rate=2.5 * 0.03 / 100,
random_init=True
)
original_attack_model = BIM(
keras_model,
wrapped_model.sess,
wrapped_model.input_ph,
model.num_classes,
attack_iterations = 100,
epsilon=0.03,
learning_rate=2.5 * 0.03 / 100,
random_init=True
)
elif args.attack == "fma":
if args.dataset_name in ['cifar10']:
if args.model_name == 'resnet':
target_samples = []
for y in range(10):
target_samples.append(X_train[np.argmax(Y_train == y)])
target_samples = np.array(target_samples)
attack_model = FMA(
model,
wrapped_model,
wrapped_model.sess,
wrapped_model.input_ph,
wrapped_model.num_classes,
target_samples=target_samples[:2],
reference=reference,
features=collect_layers(model, interested_layers),
attack_iterations = 500,
epsilon=0.03,
learning_rate=4 * 0.03 / 100,
num_random_features=3100,
random_init=True
)
original_attack_model = BIM(
keras_model,
wrapped_model.sess,
wrapped_model.input_ph,
model.num_classes,
attack_iterations = 100,
epsilon=0.03,
learning_rate=2.5 * 0.03 / 100,
random_init=True
)
assert 0 < X_test.max() <= 1.0, (X_test.min(), X_test.max())
test_loader = build_dataloader_from_arrays(X_test.transpose((0, 3, 1, 2)), Y_test, batch_size=32)
def run_attack(m, l, attack_kwargs):
# attack_kwargs contains values that might be useful for e.g. constructing logit matching evasion attacks
if args.attack == "fma":
reference_points = attack_kwargs["reference_points_x"]
if len(reference_points) < 2:
reference_points = np.concatenate([reference_points, reference_points], 0)
reference_points = reference_points.transpose((0, 2, 3, 1))
attack_model.target_samples = reference_points
else:
del attack_kwargs
linear_layer = m[-1]
del m
weights_feed_dict = {
wrapped_model.weight: linear_layer.weight.data.numpy(),
wrapped_model.bias: linear_layer.bias.data.numpy()
}
for x, y in l:
x = x.numpy()
x = x.transpose((0, 2, 3, 1))
assert len(x) == 1
x_adv = attack_model.attack(x, feedable_dict=weights_feed_dict)
logits_adv = wrapped_model.predict(
x_adv, weights_feed_dict=weights_feed_dict, logits=True)
y_adv = logits_adv.argmax(-1)
is_adv = y_adv != y
is_not_detected = verify_input_data_fn(torch.tensor(x_adv.transpose((0, 3, 1, 2))))
is_adv_and_not_detected = np.logical_and(is_adv, is_not_detected)
is_adv_and_detected = np.logical_and(is_adv, ~is_not_detected)
if args.inverted_test:
test_result = is_adv_and_detected
else:
test_result = is_adv_and_not_detected
return test_result, (torch.tensor(x_adv), torch.tensor(logits_adv))
def get_boundary_adversarials(x, y, n_samples, epsilon):
"""Generate adversarial examples for the base classifier that get
rejected by detector."""
assert len(x.shape) == 3
x = x.unsqueeze(0)
x = torch.repeat_interleave(x, n_samples, dim=0)
x = x.numpy()
x = x.transpose((0, 2, 3, 1))
for _ in range(25):
x_adv = original_attack_model.attack(x)
diff = x_adv - x
diff = diff / np.max(np.abs(diff)) * epsilon
x_adv = np.clip(x + diff, 0, 1)
is_detected = ~verify_input_data_fn(torch.tensor(x_adv.transpose((0, 3, 1, 2))))
if np.all(is_detected):
# generative until we finally found (an) adversarial example(s) that
# get(s) detected
break
else:
warnings.warn("Could not generate adversarial example that gets "
"detected after 25 trials.")
x_adv = x_adv.transpose((0, 3, 1, 2))
return torch.tensor(x_adv)
from argparse_utils import DecisionBoundaryBinarizationSettings
from active_tests.decision_boundary_binarization import format_result
if args.inverted_test:
additional_settings = dict(
n_boundary_points=args.n_boundary_points,
n_boundary_adversarial_points=1,
n_far_off_boundary_points=1,
n_far_off_adversarial_points=1,
)
else:
additional_settings = dict(
n_boundary_points=args.n_boundary_points,
n_boundary_adversarial_points=args.n_boundary_points - 1,
n_far_off_boundary_points=1,
n_far_off_adversarial_points=0,
)
far_off_distance = 1.75
def verify_input_data_fn(x: torch.Tensor) -> np.ndarray:
"""Checks if detector does not reject input data as adversarial, i.e.
input is clean."""
#if args.inverted_test:
# return detector(x.numpy().transpose((0, 2, 3, 1))) > detector_threshold
#else:
return detector(x.numpy().transpose((0, 2, 3, 1))) < detector_threshold
scores_logit_differences_and_validation_accuracies = \
interior_boundary_discrimination_attack(
feature_extractor,
test_loader,
attack_fn=lambda m, l, attack_kwargs: run_attack(m, l, attack_kwargs),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=0.03,
norm="linf",
lr=10000,
adversarial_attack_settings=None,
optimizer="sklearn",
n_inner_points=args.n_inner_points,
**additional_settings
),
n_samples=args.n_samples,
device="cpu",
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
verify_valid_boundary_training_data_fn=verify_input_data_fn,
get_boundary_adversarials_fn=get_boundary_adversarials,
verify_valid_inner_training_data_fn=None,
verify_valid_input_validation_data_fn=None,
fill_batches_for_verification=False,
far_off_distance=far_off_distance,
rejection_resampling_max_repetitions=25,
rescale_logits="adaptive"
)
print(format_result(scores_logit_differences_and_validation_accuracies, args.n_samples))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
from keras.layers import Input
from cleverhans.attacks import CarliniWagnerL2
from cleverhans.dataset import MNIST
from cleverhans.loss import CrossEntropy
from cleverhans.utils import grid_visual, AccuracyReport
from cleverhans.utils import set_log_level
from cleverhans.utils_tf import model_eval, tf_model_load
from cleverhans.train import train
from cleverhans.utils_keras import KerasModelWrapper
from build_model import ImageModel
from load_data import ImageData, split_data
import pickle as pkl
from keras.utils import to_categorical
from attack_model import Attack, CW, BIM
import scipy
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type = str,
choices = ['cifar10'],
default = 'cifar10')
parser.add_argument('--model_name', type = str,
choices = ['resnet'],
default = 'resnet')
parser.add_argument('--data_sample', type = str,
choices = ['x_train', 'x_val', 'x_val200'],
default = 'x_val200')
parser.add_argument(
'--attack',
type = str,
choices = ['cw', 'bim', 'bim2'],
default = 'cw'
)
args = parser.parse_args()
dict_a = vars(args)
data_model = args.dataset_name + args.model_name
if data_model not in os.listdir('./'):
os.mkdir(data_model)
if 'results' not in os.listdir('./{}'.format(data_model)):
os.mkdir('{}/results'.format(data_model))
if 'models' not in os.listdir(data_model):
os.mkdir('{}/models'.format(data_model))
if 'data' not in os.listdir(data_model):
os.mkdir('{}/data'.format(data_model))
if 'figs' not in os.listdir(data_model):
os.mkdir('{}/figs'.format(data_model))
print('Loading dataset...')
dataset = ImageData(args.dataset_name)
model = ImageModel(args.model_name, args.dataset_name, train = False, load = True)
if args.dataset_name == 'cifar10':
X_train, Y_train, X_test, Y_test = split_data(dataset.x_val,
dataset.y_val, model, num_classes = 10,
split_rate = 0.8, sample_per_class = 1000)
print('Sanity checking...')
data_sample = X_test
print('data_sample.shape', data_sample.shape)
print('X_train.shape', X_train.shape)
pred_test = model.predict(dataset.x_val)
def cross_entropy(predictions, targets, epsilon=1e-12):
predictions = np.clip(predictions, epsilon, 1. - epsilon)
N = predictions.shape[0]
ce = -np.sum(targets*np.log(predictions+1e-9))/N
return ce
ce = cross_entropy(pred_test, dataset.y_val, epsilon=1e-12)
acc = np.mean(np.argmax(pred_test, axis = 1) == np.argmax(dataset.y_val, axis = 1))
print('The accuracy is {}. The cross entropy is {}.'.format(acc, ce))
if args.attack == 'cw':
if args.dataset_name in ['cifar10']:
if args.model_name == 'resnet':
attack_model = CW(
KerasModelWrapper(model.model),
model.input_ph,
model.num_classes,
source_samples = 100,
binary_search_steps = 5,
cw_learning_rate = 1e-2,
confidence = 0,
attack_iterations = 100,
attack_initial_const = 1e-2,
)
elif args.attack == "bim":
if args.dataset_name in ['cifar10']:
if args.model_name == 'resnet':
attack_model = BIM(
KerasModelWrapper(model.model),
model.sess,
model.input_ph,
model.num_classes,
attack_iterations = 100,
epsilon=0.03,
learning_rate=2.5 * 0.03 / 100,
random_init=True
)
elif args.attack == "bim2":
if args.dataset_name in ['cifar10']:
if args.model_name == 'resnet':
attack_model = BIM(
KerasModelWrapper(model.model),
model.sess,
model.input_ph,
model.num_classes,
attack_iterations = 10,
epsilon=0.03,
learning_rate=2.5 * 0.03 / 10,
random_init=True
)
###################################################
# filter data samples with correct predictions by model and successsful attacks
###################################################
data_types = ['train', 'test']
data = {'train': (X_train, Y_train), 'test': (X_test, Y_test)}
if args.data_sample == 'x_val200':
num_samples = {'train': 800, 'test': 200}
for data_type in data_types:
x, y = data[data_type]
print('x.shape', x.shape)
print('y.shape', y.shape)
num_successes = 0
oris = []
perturbeds = []
batch_size = int(np.minimum(100, num_samples[data_type]))
cur_batch = 0
conf = 15
epsilon = 0
while num_successes < num_samples[data_type]:
batch_x, batch_y = x[cur_batch * batch_size:(cur_batch+1) * batch_size], y[cur_batch * batch_size:(cur_batch+1) * batch_size]
print('batch_x', batch_x.shape)
x_adv = attack_model.attack(batch_x)
print('x_adv', x_adv.shape)
if x_adv.shape[0] == 0:
continue
x_adv_labels = np.argmax(model.predict(x_adv), axis = -1)
index_filter = (x_adv_labels != np.argmax(batch_y, axis = 1))
ori = batch_x[index_filter]
perturbed = x_adv[index_filter]
print('Success rate', perturbed.shape[0] / len(x_adv))
oris.append(ori)
perturbeds.append(perturbed)
cur_batch += 1
num_successes += len(ori)
print('Number of successsful samples is {}'.format(num_successes))
oris = np.concatenate(oris, axis = 0)
perturbeds = np.concatenate(perturbeds, axis = 0)
oris = oris[:num_samples[data_type]]
perturbeds = perturbeds[:num_samples[data_type]]
print('oris.shape', oris.shape)
print('perturbeds.shape', perturbeds.shape)
np.save('{}/data/{}{}_{}_{}.npy'.format(
data_model,
args.data_sample,
'' if data_type == 'test' else '_train',
args.attack,
'ori'),
oris)
np.save('{}/data/{}{}_adv_{}_{}.npy'.format(
data_model,
args.data_sample,
'' if data_type == 'test' else '_train',
args.attack,
'ori'),
perturbeds)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K
import tensorflow as tf
import numpy as np
class Clipper(Layer):
"""clips input to lie wihin valid pixel range
Only active at training time since it is a regularization layer.
# Arguments
attenuation: how much to attenuate the input
# Input shape
Arbitrary.
# Output shape
Same as the input shape.
"""
def __init__(self, **kwargs):
super(Clipper, self).__init__(**kwargs)
self.supports_masking = True
def call(self, inputs, training=None):
def augmented():
return tf.clip_by_value(inputs,-0.5,0.5)
return K.in_train_phase(augmented, augmented, training=training)
def get_config(self):
config = {}
base_config = super(Clipper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K
import tensorflow as tf
import numpy as np
class Grayscaler(Layer):
"""Converts input to grayscale
Only active at training time since it is a regularization layer.
# Arguments
attenuation: how much to attenuate the input
# Input shape
Arbitrary.
# Output shape
Same as the input shape.
"""
def __init__(self, **kwargs):
super(Grayscaler, self).__init__(**kwargs)
self.supports_masking = True
def call(self, inputs, training=None):
def augmented():
return tf.image.rgb_to_grayscale(inputs)
return K.in_train_phase(augmented, augmented, training=training)
def get_config(self):
config = {}
base_config = super(Grayscaler, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Run this to attack a trained model via TrainModel.
Use the "loadFullModel" submethod to load in an already trained model (trained via TrainModel)
The main attack function is "runAttacks" which runs attacks on trained models
"""
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from cleverhans.attacks import ProjectedGradientDescent
from Model_Implementations import Model_Softmax_Baseline, \
Model_Logistic_Baseline, Model_Logistic_Ensemble, Model_Tanh_Ensemble, \
Model_Tanh_Baseline
from tensorflow.keras.datasets import cifar10
from tensorflow.keras import backend
import tensorflow as tf;
import numpy as np
import scipy.linalg
model_path = 'checkpoints/ECOC/tanh32/checkpoints' #path with saved model parameters
def setup_model_and_data(adaptive_attack=False):
print("Modifying model for adaptive attack:", adaptive_attack)
# Dataset-specific parameters - should be same as those used in TrainModel
DATA_DESC = 'CIFAR10';
(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
epochs = None;
weight_save_freq = None
num_classes = 10 # how many classes (categories) are in this dataset?
Y_train = np.squeeze(Y_train);
Y_test = np.squeeze(Y_test)
num_filters_std = [32, 64, 128];
num_filters_ens = [32, 64, 128];
num_filters_ens_2 = 16;
dropout_rate_std = 0.0;
dropout_rate_ens = 0.0;
weight_decay = 0
model_rep_baseline = 2;
model_rep_ens = 2;
DATA_AUGMENTATION_FLAG = 1;
BATCH_NORMALIZATION_FLAG = 1
num_channels = 3;
inp_shape = (32, 32, 3);
lr = 1e-4;
batch_size = 80;
noise_stddev = 0.032;
blend_factor = .032
# DATA PRE-PROCESSING
X_train = (X_train / 255).astype(np.float32);
X_test = (X_test / 255).astype(np.float32)
# reshape (add third (image) channel)
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1],
X_train.shape[2],
num_channels);
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],
num_channels)
X_valid = X_test[1000:2000];
Y_valid = Y_test[1000:2000]; # validation data, used to attack model
## ENSEMBLE TANH 32 MODEL DEFINITION
name = 'tanh_32_diverse' + '_' + DATA_DESC;
seed = 59;
code_length = 32;
num_codes = code_length;
num_chunks = 4;
base_model = None;
def output_activation(x):
if adaptive_attack:
return x
else:
return tf.nn.tanh(x)
M = scipy.linalg.hadamard(code_length).astype(np.float32)
M[np.arange(0, num_codes,
2), 0] = -1 # replace first col, which for scipy's Hadamard construction is always 1, hence not a useful classifier; this change still ensures all codewords have dot product <=0; since our decoder ignores negative correlations anyway, this has no net effect on probability estimation
np.random.seed(seed)
np.random.shuffle(M)
idx = np.random.permutation(code_length)
M = M[0:num_codes, idx[0:code_length]]
params_dict = {'BATCH_NORMALIZATION_FLAG': BATCH_NORMALIZATION_FLAG,
'DATA_AUGMENTATION_FLAG': DATA_AUGMENTATION_FLAG, 'M': M,
'base_model': base_model, 'num_chunks': num_chunks,
'model_rep': model_rep_ens,
'output_activation': output_activation,
'num_filters_ens': num_filters_ens,
'num_filters_ens_2': num_filters_ens_2, 'batch_size': batch_size,
'epochs': epochs, 'dropout_rate': dropout_rate_ens, 'lr': lr,
'blend_factor': blend_factor, 'inp_shape': inp_shape,
'noise_stddev': noise_stddev,
'weight_save_freq': weight_save_freq, 'name': name,
'model_path': model_path,
'zero_one_input': True,
'adaptive_attack': adaptive_attack
}
m5 = Model_Tanh_Ensemble({}, params_dict)
m5.loadFullModel() # load in the saved model, which should have already been trained first via TrainModel
m5.legend = 'TEns32';
model = m5
return model, (X_valid, Y_valid), (X_test, Y_test)
def wbAttack(sess, model, x_ph, x_adv_op, X, Y, batch_size=500, verbose=True):
n_correct = 0
n_total = 0
all_logits = []
all_x_adv = []
import tqdm
pbar = np.arange(0, X.shape[0], batch_size)
if verbose:
pbar = tqdm.tqdm(pbar)
for start_idx in pbar:
x = X[start_idx:start_idx + batch_size]
y = Y[start_idx:start_idx + batch_size]
x_adv = sess.run(x_adv_op, {x_ph: x})
logits = sess.run(model.logits, {model.input: x_adv})
preds = np.argmax(logits, -1)
n_correct += np.sum(np.equal(preds, y))
n_total += len(x)
all_logits.append(logits)
all_x_adv.append(x_adv)
all_x_adv = np.concatenate(all_x_adv, 0)
all_logits = np.concatenate(all_logits, 0)
adv_acc = n_correct / n_total
return adv_acc, all_logits, all_x_adv
def patch_pgd_loss():
import cleverhans
def fgm(x,
logits,
y=None,
eps=0.3,
ord=np.inf,
clip_min=None,
clip_max=None,
targeted=False,
sanity_checks=True):
asserts = []
# If a data range was specified, check that the input was in that range
if clip_min is not None:
asserts.append(cleverhans.utils_tf.assert_greater_equal(
x, tf.cast(clip_min, x.dtype)))
if clip_max is not None:
asserts.append(cleverhans.utils_tf.assert_less_equal(x, tf.cast(clip_max, x.dtype)))
# Make sure the caller has not passed probs by accident
assert logits.op.type != 'Softmax'
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = tf.reduce_max(logits, 1, keepdims=True)
y = tf.to_float(tf.equal(logits, preds_max))
y = tf.stop_gradient(y)
y = y / tf.reduce_sum(y, 1, keepdims=True)
# Compute loss
loss = loss_fn(labels=y, logits=logits)
if targeted:
loss = -loss
# loss = tf.Print(loss, [loss])
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x)
optimal_perturbation = cleverhans.attacks.optimize_linear(grad, eps, ord)
# Add perturbation to original example to obtain adversarial example
adv_x = x + optimal_perturbation
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = cleverhans.utils_tf.clip_by_value(adv_x, clip_min, clip_max)
if sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x
def loss_fn(sentinel=None,
labels=None,
logits=None,
dim=-1):
"""
Wrapper around tf.nn.softmax_cross_entropy_with_logits_v2 to handle
deprecated warning
"""
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
name = "softmax_cross_entropy_with_logits"
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)"
% name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
labels = tf.stop_gradient(labels)
# modified from
# https://github.com/carlini/nn_robust_attacks/blob/master/li_attack.py
real = tf.reduce_sum(labels * logits, -1)
other = tf.reduce_max((1-labels) * logits - (labels*10000), -1)
loss = other - real
# loss = tf.Print(loss, [loss])
return loss
cleverhans.attacks.fgm = fgm
def main():
sess = backend.get_session()
backend.set_learning_phase(
0) # need to do this to get CleverHans to work with batchnorm
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--eps", type=float, default=8, help="in 0-255")
parser.add_argument("--pgd-n-steps", default=200, type=int)
parser.add_argument("--pgd-step-size", type=float, default=2 / 3 * 8, help="in 0-255")
parser.add_argument("--n-samples", type=int, default=512)
parser.add_argument("--adaptive-attack", action="store_true")
args = parser.parse_args()
model, (X_valid, Y_valid), (X_test, Y_test) = setup_model_and_data(adaptive_attack=args.adaptive_attack)
test_indices = list(range(len(X_test)))
np.random.shuffle(test_indices)
X_test, Y_test = X_test[test_indices], Y_test[test_indices]
X_test, Y_test = X_test[:args.n_samples], Y_test[:args.n_samples]
model_ch = model.modelCH()
attack = ProjectedGradientDescent(model_ch, sess=sess)
att_params = {'clip_min': 0.0, 'clip_max': 1.0,
'eps': args.eps / 255.0, 'eps_iter': args.pgd_step_size / 255.0,
'nb_iter': args.pgd_n_steps, 'ord': np.inf,
}
if args.adaptive_attack:
patch_pgd_loss()
x_ph = tf.placeholder(shape=model.input.shape, dtype=tf.float32)
x_adv_op = attack.generate(x_ph, **att_params)
adv_acc, all_logits, all_x_adv = wbAttack(sess, model,
x_ph, x_adv_op,
X_test, Y_test, batch_size=512)
print("Robust accuracy:", adv_acc)
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Full implementation of all methods of Abstract class "Model"
"""
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Dropout, Multiply, Lambda, Input, Dense, Conv2D, MaxPooling2D, Flatten, Activation, UpSampling2D, Concatenate, GaussianNoise
from tensorflow.keras.utils import plot_model
from tensorflow.keras import metrics, regularizers, optimizers
from tensorflow.keras.models import Model as KerasModel
from Model import Model
from tensorflow.keras import losses, metrics
from ClassBlender import ClassBlender
from DataAugmenter import DataAugmenter
#Full architectural definition for all "baseline" models used in the paper
def defineModelBaseline(self):
outputs=[]
self.penultimate = []
self.penultimate2 = []
x = self.input
x = GaussianNoise(self.params_dict['noise_stddev'], input_shape=self.params_dict['inp_shape'])(x)
if (self.TRAIN_FLAG==1):
if self.params_dict['DATA_AUGMENTATION_FLAG']>0:
x = DataAugmenter(self.params_dict['batch_size'])(x)
x = ClassBlender(self.params_dict['blend_factor'], self.params_dict['batch_size'])(x)
x = Lambda(lambda x: tf.clip_by_value(x,-0.5,0.5))(x)
for rep in np.arange(self.params_dict['model_rep']):
x = Conv2D(self.params_dict['num_filters_std'][0], (5,5), activation='elu', padding='same', kernel_regularizer=regularizers.l2(self.params_dict['weight_decay']))(x)
if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
x = BatchNormalization()(x)
x = Conv2D(self.params_dict['num_filters_std'][0], (3,3), strides=(2,2), activation='elu', padding='same')(x)
for rep in np.arange(self.params_dict['model_rep']):
x = Conv2D(self.params_dict['num_filters_std'][1], (3, 3), activation='elu', padding='same', kernel_regularizer=regularizers.l2(self.params_dict['weight_decay']))(x)
if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
x = BatchNormalization()(x)
x = Conv2D(self.params_dict['num_filters_std'][1], (3,3), strides=(2,2), activation='elu', padding='same')(x)
x_=x
for rep in np.arange(self.params_dict['model_rep']):
x_ = Conv2D(self.params_dict['num_filters_std'][2], (3, 3), activation='elu', padding='same', kernel_regularizer=regularizers.l2(self.params_dict['weight_decay']))(x_)
if self.params_dict['BATCH_NORMALIZATION_FLAG']>0:
x_ = BatchNormalization()(x_)
x_ = Conv2D(self.params_dict['num_filters_std'][2], (3,3), strides=(2,2), activation='elu', padding='same')(x_)
x_ = Flatten()(x_)
x_ = Dense(128, activation='elu')(x_)
x_ = Dense(64, activation='elu')(x_)
x0 = Dense(64, activation='linear')(x_)
x1 = Dense(self.params_dict['M'].shape[1], activation='linear', kernel_regularizer=regularizers.l2(0.0))(x0)
outputs = [x1]
self.model = KerasModel(inputs=self.input, outputs=outputs)
print(self.model.summary())
plot_model(self.model, to_file=self.params_dict['model_path'] + '/' + self.params_dict['name'] + '.png')
return outputs
class Model_Softmax_Baseline(Model):
def __init__(self, data_dict, params_dict):
super(Model_Softmax_Baseline, self).__init__(data_dict, params_dict)
def defineModel(self):
return defineModelBaseline(self)
def defineLoss(self, idx):
def loss_fn(y_true, y_pred):
loss = tf.keras.backend.categorical_crossentropy(y_true, y_pred, from_logits=True)
return loss
return loss_fn
def defineMetric(self):
return [metrics.categorical_accuracy]
class Model_Logistic_Baseline(Model):
def __init__(self, data_dict, params_dict):
super(Model_Logistic_Baseline, self).__init__(data_dict, params_dict)
def defineModel(self):
return defineModelBaseline(self)
def defineLoss(self, idx):
def loss_fn(y_true, y_pred):
loss = tf.keras.backend.binary_crossentropy(y_true, y_pred, from_logits=True)
return loss
return loss_fn
def defineMetric(self):
def sigmoid_pred(y_true, y_pred):
corr = tf.to_float((y_pred*(2*y_true-1))>0)
return tf.reduce_mean(corr)
return [sigmoid_pred]
class Model_Tanh_Baseline(Model):
def __init__(self, data_dict, params_dict):
super(Model_Tanh_Baseline, self).__init__(data_dict, params_dict)
def defineModel(self):
return defineModelBaseline(self)
def defineLoss(self, idx):
def hinge_loss(y_true, y_pred):
loss = tf.reduce_mean(tf.maximum(1.0-y_true*y_pred, 0))
return loss
return hinge_loss
def defineMetric(self):
def tanh_pred(y_true, y_pred):
corr = tf.to_float((y_pred*y_true)>0)
return tf.reduce_mean(corr)
return [tanh_pred]
class Model_Logistic_Ensemble(Model):
def __init__(self, data_dict, params_dict):
super(Model_Logistic_Ensemble, self).__init__(data_dict, params_dict)
def defineLoss(self, idx):
def loss_fn(y_true, y_pred):
loss = tf.keras.backend.binary_crossentropy(y_true, y_pred, from_logits=True)
return loss
return loss_fn
def defineMetric(self):
def sigmoid_pred(y_true, y_pred):
corr = tf.to_float((y_pred*(2*y_true-1))>0)
return tf.reduce_mean(corr)
return [sigmoid_pred]
class Model_Tanh_Ensemble(Model):
def __init__(self, data_dict, params_dict):
super(Model_Tanh_Ensemble, self).__init__(data_dict, params_dict)
def defineLoss(self, idx):
def hinge_loss(y_true, y_pred):
loss = tf.reduce_mean(tf.maximum(1.0-y_true*y_pred, 0))
return loss
return hinge_loss
def defineMetric(self):
def hinge_pred(y_true, y_pred):
corr = tf.to_float((y_pred*y_true)>0)
return tf.reduce_mean(corr)
return [hinge_pred]
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This code blends two classes together as a convex combination; a type of simple data augmentation
"""
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K
import tensorflow as tf
import numpy as np
class ClassBlender(Layer):
"""Only active at training time since it is a regularization layer.
# Arguments
attenuation: how much to attenuate the input
# Input shape
Arbitrary.
# Output shape
Same as the input shape.
"""
def __init__(self, attenuation, batch_size, **kwargs):
super(ClassBlender, self).__init__(**kwargs)
self.supports_masking = True
self.attenuation = attenuation
self.batch_size = batch_size
def call(self, inputs, training=None):
def blended():
inputs_permuted = tf.random_shuffle(inputs)
angles = (180*(2*np.random.rand(self.batch_size)-1))*np.pi/180
shifts = 4*(2*np.random.rand(self.batch_size, 2)-1)
inputs_permuted_translated = tf.contrib.image.translate(inputs_permuted, shifts)
inputs_permuted_translated_rotated = tf.contrib.image.rotate(inputs_permuted_translated,angles)
inputs_adjusted = inputs_permuted_translated_rotated
inputs_adjusted = tf.clip_by_value(inputs_adjusted,-0.5,0.5)
return (1.0-self.attenuation)*inputs + self.attenuation*inputs_adjusted
return K.in_train_phase(blended, inputs, training=training)
def get_config(self):
config = {'attenuation': self.attenuation, 'batch_size':self.batch_size}
base_config = super(ClassBlender, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K
import tensorflow as tf
import numpy as np
class DataAugmenter(Layer):
"""Shifts and scales input
Only active at training time since it is a regularization layer.
# Arguments
attenuation: how much to attenuate the input
# Input shape
Arbitrary.
# Output shape
Same as the input shape.
"""
def __init__(self, batch_size, **kwargs):
super(DataAugmenter, self).__init__(**kwargs)
self.supports_masking = True
self.batch_size = batch_size
def call(self, inputs, training=None):
def augmented():
angles = (15*(2*np.random.rand(self.batch_size)-1))*np.pi/180
shifts = 4*(2*np.random.rand(self.batch_size, 2)-1)
inputs_shifted = tf.contrib.image.translate(inputs, shifts)
inputs_shifted_rotated = tf.contrib.image.rotate(inputs_shifted,angles)
random_number = tf.random_uniform([self.batch_size])
inputs_shifted_rotated_flipped = tf.where(random_number<0.5, tf.image.flip_left_right(inputs_shifted_rotated), inputs_shifted_rotated)
# modificaiton by zimmerrol to make sure keras shape computation works
inputs_shifted_rotated_flipped = tf.ensure_shape(inputs_shifted_rotated_flipped, inputs.shape)
return inputs_shifted_rotated_flipped
return K.in_train_phase(augmented, inputs, training=training)
def get_config(self):
config = {}
config['batch_size'] = self.batch_size
base_config = super(DataAugmenter, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.