python_code
stringlengths 0
91.3k
|
---|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .wideresnet import * |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes,
out_planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self,
nb_layers,
in_planes,
out_planes,
block,
stride,
dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers,
stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,
dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(
block(i == 0 and in_planes or out_planes, out_planes,
i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [
16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor
]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
self.conv1 = nn.Conv2d(3,
nChannels[0],
kernel_size=3,
stride=1,
padding=1,
bias=False)
# block 1
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1,
dropRate)
# block 2
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2,
dropRate)
# block 3
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2,
dropRate)
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x, mode='logits', features_only=False, features_and_logits=False):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
if features_only:
return out.view(x.size(0), -1)
if features_and_logits:
return out.view(x.size(0), -1), self.fc(out)
if mode.lower() == 'logits':
return self.fc(out)
elif mode.lower() == 'feature':
return out.view(x.size(0), -1)
else:
raise Exception('unsupported mode is specified')
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, argparse, time, subprocess, io, shlex
import pandas as pd
import tqdm
parser = argparse.ArgumentParser(description='ImageNet Validation')
parser.add_argument('--in_path', required=True,
help='path to ImageNet folder that contains val folder')
parser.add_argument('--batch_size', default=128, type=int,
help='size of batch for validation')
parser.add_argument('--workers', default=20,
help='number of data loading workers')
parser.add_argument('--ngpus', default=1, type=int,
help='number of GPUs to use; 0 if you want to run on CPU')
parser.add_argument('--model_arch', choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'], default='resnet50',
help='back-end model architecture to load')
FLAGS, FIRE_FLAGS = parser.parse_known_args()
def set_gpus(n=2):
"""
Finds all GPUs on the system and restricts to n of them that have the most
free memory.
"""
if n > 0:
gpus = subprocess.run(shlex.split(
'nvidia-smi --query-gpu=index,memory.free,memory.total --format=csv,nounits'), check=True,
stdout=subprocess.PIPE).stdout
gpus = pd.read_csv(io.BytesIO(gpus), sep=', ', engine='python')
gpus = gpus[gpus['memory.total [MiB]'] > 10000] # only above 10 GB
if os.environ.get('CUDA_VISIBLE_DEVICES') is not None:
visible = [int(i)
for i in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
gpus = gpus[gpus['index'].isin(visible)]
gpus = gpus.sort_values(by='memory.free [MiB]', ascending=False)
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # making sure GPUs are numbered the same way as in nvidia_smi
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
[str(i) for i in gpus['index'].iloc[:n]])
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
set_gpus(FLAGS.ngpus)
import torch
import torch.nn as nn
import torchvision
from vonenet import get_model
device = torch.device("cuda" if FLAGS.ngpus > 0 else "cpu")
def val():
model = get_model(model_arch=FLAGS.model_arch, pretrained=True)
if FLAGS.ngpus == 0:
print('Running on CPU')
if FLAGS.ngpus > 0 and torch.cuda.device_count() > 1:
print('Running on multiple GPUs')
model = model.to(device)
elif FLAGS.ngpus > 0 and torch.cuda.device_count() is 1:
print('Running on single GPU')
model = model.to(device)
else:
print('No GPU detected!')
model = model.module
validator = ImageNetVal(model)
record = validator()
print(record['top1'])
print(record['top5'])
return
class ImageNetVal(object):
def __init__(self, model):
self.name = 'val'
self.model = model
self.data_loader = self.data()
self.loss = nn.CrossEntropyLoss(size_average=False)
self.loss = self.loss.to(device)
def data(self):
dataset = torchvision.datasets.ImageFolder(
os.path.join(FLAGS.in_path, 'val'),
torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]),
]))
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=FLAGS.batch_size,
shuffle=False,
num_workers=FLAGS.workers,
pin_memory=True)
return data_loader
def __call__(self):
self.model.eval()
start = time.time()
record = {'loss': 0, 'top1': 0, 'top5': 0}
with torch.no_grad():
for (inp, target) in tqdm.tqdm(self.data_loader, desc=self.name):
target = target.to(device)
output = self.model(inp)
record['loss'] += self.loss(output, target).item()
p1, p5 = accuracy(output, target, topk=(1, 5))
record['top1'] += p1
record['top5'] += p5
for key in record:
record[key] /= len(self.data_loader.dataset.samples)
record['dur'] = (time.time() - start) / len(self.data_loader)
return record
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
_, pred = output.topk(max(topk), dim=1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = [correct[:k].sum().item() for k in topk]
return res
if __name__ == '__main__':
val()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, argparse, time
import tqdm
import numpy as np
import attacks.pgd as pgd
import attacks.autopgd as autopgd
parser = argparse.ArgumentParser(description='ImageNet Adversarial Validation')
parser.add_argument('--in-path', required=True,
help='path to ImageNet folder that contains val folder')
parser.add_argument('--batch-size', default=128, type=int,
help='size of batch for validation')
parser.add_argument('--workers', default=20,
help='number of data loading workers')
parser.add_argument('--model-arch',
choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'],
default='resnet50',
help='back-end model architecture to load')
parser.add_argument("--n-samples", type=int, default=50000)
parser.add_argument("--epsilon", default=1, help="in X/255", type=int)
parser.add_argument("--attack", choices=("pgd", "apgd"), default="pgd")
parser.add_argument("--n-steps", type=int, default=64)
parser.add_argument("--step-size", default=0.1, help="in X/255", type=float)
parser.add_argument("--ensemble-size", type=int, default=1)
parser.add_argument("--deterministic-replacement", action="store_true")
parser.add_argument("--differentiable-replacement", action="store_true")
parser.add_argument("--stable-gradients", action="store_true")
FLAGS = parser.parse_args()
import torch
import torch.nn as nn
import torchvision
from vonenet import get_model
device = "cuda" if torch.cuda.is_available() else "cpu"
def val():
model = get_model(model_arch=FLAGS.model_arch, pretrained=True)
model = model.to(device)
if FLAGS.attack == "pgd":
attack_fn = lambda m, x, y: \
pgd.pgd(m, x, y, FLAGS.n_steps, FLAGS.step_size / 255.0,
FLAGS.epsilon / 255.0, "linf",
n_averaging_steps=FLAGS.ensemble_size)[0]
else:
attack_fn = lambda m, x, y: \
autopgd.auto_pgd(m, x, y, FLAGS.n_steps, FLAGS.step_size / 255.0,
FLAGS.epsilon / 255.0, "linf",
n_averaging_steps=FLAGS.ensemble_size)[0]
validator = ImageNetAdversarialVal(model, attack_fn=attack_fn,
n_samples=FLAGS.n_samples)
record = validator()
print("Top 1:", record['top1'])
print("Top 5:", record['top5'])
return
class ImageNetAdversarialVal(object):
def __init__(self, model, attack_fn, n_samples=50000):
self.name = 'val'
self.model = model
self.data_loader = self.data()
self.loss = nn.CrossEntropyLoss(size_average=False)
self.loss = self.loss.to(device)
self.attack_fn = attack_fn
self.n_samples = n_samples
def data(self):
dataset = torchvision.datasets.ImageFolder(
os.path.join(FLAGS.in_path, 'val'),
torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]),
]))
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.workers,
pin_memory=True)
return data_loader
def __call__(self):
self.model.eval()
start = time.time()
record = {'loss': 0, 'top1': 0, 'top5': 0}
n_samples = 0
n_batches = 0
with tqdm.tqdm(
total=int(np.ceil(self.n_samples / self.data_loader.batch_size)),
desc=self.name) as pbar:
for (inp, target) in self.data_loader:
target = target.to(device)
with torch.autograd.set_detect_anomaly(True):
if FLAGS.stable_gradients:
self.model.module.vone_block.stable_gabor_f = True
self.model.module.vone_block.deterministic = FLAGS.deterministic_replacement
if FLAGS.differentiable_replacement:
self.model.module.vone_block.simple = nn.ReLU(inplace=False)
inp_adv = self.attack_fn(self.model, inp, target)
# make model stochastic again etc.
self.model.module.vone_block.deterministic = False
self.model.module.vone_block.stable_gabor_f = False
self.model.module.vone_block.simple = nn.ReLU(inplace=True)
with torch.no_grad():
output = self.model(inp_adv)
record['loss'] += self.loss(output, target).item()
p1, p5 = accuracy(output, target, topk=(1, 5))
record['top1'] += p1
record['top5'] += p5
n_samples += len(inp)
n_batches += 1
pbar.update(1)
if n_samples >= self.n_samples:
break
for key in record:
record[key] /= n_samples
record['dur'] = (time.time() - start) / n_batches
return record
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
_, pred = output.topk(max(topk), dim=1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = [correct[:k].sum().item() for k in topk]
return res
if __name__ == '__main__':
val()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
"torch>=0.4.0+",
"torchvision",
"numpy",
"pandas",
"scipy",
"tqdm",
"fire",
"requests",
]
setup(
name='vonenet',
version='0.1.0',
description="CNNs with a Primary Visual Cortex Front-End ",
long_description=readme,
author="Tiago Marques, Joel Dapello",
author_email='[email protected], [email protected]',
url='https://github.com/dicarlolab/vonenet',
packages=['vonenet'],
include_package_data=True,
install_requires=requirements,
license="GNU GPL v3",
zip_safe=False,
keywords='VOneNet, Robustness, Primary Visual Cortex',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU GPL v3',
'Natural Language :: English',
'Programming Language :: Python :: 3.6'
],
)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, argparse, time, subprocess, io, shlex, pickle, pprint
import pandas as pd
import numpy as np
import tqdm
import fire
parser = argparse.ArgumentParser(description='ImageNet Training')
## General parameters
parser.add_argument('--in_path', required=True,
help='path to ImageNet folder that contains train and val folders')
parser.add_argument('-o', '--output_path', default=None,
help='path for storing ')
parser.add_argument('-restore_epoch', '--restore_epoch', default=0, type=int,
help='epoch number for restoring model training ')
parser.add_argument('-restore_path', '--restore_path', default=None, type=str,
help='path of folder containing specific epoch file for restoring model training')
## Training parameters
parser.add_argument('--ngpus', default=0, type=int,
help='number of GPUs to use; 0 if you want to run on CPU')
parser.add_argument('-j', '--workers', default=20, type=int,
help='number of data loading workers')
parser.add_argument('--epochs', default=70, type=int,
help='number of total epochs to run')
parser.add_argument('--batch_size', default=256, type=int,
help='mini-batch size')
parser.add_argument('--optimizer', choices=['stepLR', 'plateauLR'], default='stepLR',
help='Optimizer')
parser.add_argument('--lr', '--learning_rate', default=.1, type=float,
help='initial learning rate')
parser.add_argument('--step_size', default=20, type=int,
help='after how many epochs learning rate should be decreased by step_factor')
parser.add_argument('--step_factor', default=0.1, type=float,
help='factor by which to decrease the learning rate')
parser.add_argument('--momentum', default=.9, type=float, help='momentum')
parser.add_argument('--weight_decay', default=1e-4, type=float,
help='weight decay ')
## Model parameters
parser.add_argument('--torch_seed', default=0, type=int,
help='seed for weights initializations and torch RNG')
parser.add_argument('--model_arch', choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'], default='resnet50',
help='back-end model architecture to load')
parser.add_argument('--normalization', choices=['vonenet', 'imagenet'], default='vonenet',
help='image normalization to apply to models')
parser.add_argument('--visual_degrees', default=8, type=float,
help='Field-of-View of the model in visual degrees')
## VOneBlock parameters
# Gabor filter bank
parser.add_argument('--stride', default=4, type=int,
help='stride for the first convolution (Gabor Filter Bank)')
parser.add_argument('--ksize', default=25, type=int,
help='kernel size for the first convolution (Gabor Filter Bank)')
parser.add_argument('--simple_channels', default=256, type=int,
help='number of simple channels in V1 block')
parser.add_argument('--complex_channels', default=256, type=int,
help='number of complex channels in V1 block')
parser.add_argument('--gabor_seed', default=0, type=int,
help='seed for gabor initialization')
parser.add_argument('--sf_corr', default=0.75, type=float,
help='')
parser.add_argument('--sf_max', default=6, type=float,
help='')
parser.add_argument('--sf_min', default=0, type=float,
help='')
parser.add_argument('--rand_param', choices=[True, False], default=False, type=bool,
help='random gabor params')
parser.add_argument('--k_exc', default=25, type=float,
help='')
# Noise layer
parser.add_argument('--noise_mode', choices=['gaussian', 'neuronal', None],
default=None,
help='noise distribution')
parser.add_argument('--noise_scale', default=1, type=float,
help='noise scale factor')
parser.add_argument('--noise_level', default=1, type=float,
help='noise level')
FLAGS, FIRE_FLAGS = parser.parse_known_args()
def set_gpus(n=2):
"""
Finds all GPUs on the system and restricts to n of them that have the most
free memory.
"""
if n > 0:
gpus = subprocess.run(shlex.split(
'nvidia-smi --query-gpu=index,memory.free,memory.total --format=csv,nounits'), check=True,
stdout=subprocess.PIPE).stdout
gpus = pd.read_csv(io.BytesIO(gpus), sep=', ', engine='python')
gpus = gpus[gpus['memory.total [MiB]'] > 10000] # only above 10 GB
if os.environ.get('CUDA_VISIBLE_DEVICES') is not None:
visible = [int(i)
for i in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
gpus = gpus[gpus['index'].isin(visible)]
gpus = gpus.sort_values(by='memory.free [MiB]', ascending=False)
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # making sure GPUs are numbered the same way as in nvidia_smi
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
[str(i) for i in gpus['index'].iloc[:n]])
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
if FLAGS.ngpus > 0:
set_gpus(FLAGS.ngpus)
import torch
import torch.nn as nn
import torch.utils.model_zoo
import torchvision
from vonenet import get_model
torch.manual_seed(FLAGS.torch_seed)
torch.backends.cudnn.benchmark = True
if FLAGS.ngpus > 0:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
device = 'cpu'
if FLAGS.normalization == 'vonenet':
print('VOneNet normalization')
norm_mean = [0.5, 0.5, 0.5]
norm_std = [0.5, 0.5, 0.5]
elif FLAGS.normalization == 'imagenet':
print('Imagenet standard normalization')
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
def load_model():
map_location = None if FLAGS.ngpus > 0 else 'cpu'
print('Getting VOneNet')
model = get_model(map_location=map_location, model_arch=FLAGS.model_arch, pretrained=False,
visual_degrees=FLAGS.visual_degrees, stride=FLAGS.stride, ksize=FLAGS.ksize,
sf_corr=FLAGS.sf_corr, sf_max=FLAGS.sf_max, sf_min=FLAGS.sf_min, rand_param=FLAGS.rand_param,
gabor_seed=FLAGS.gabor_seed, simple_channels=FLAGS.simple_channels,
complex_channels=FLAGS.simple_channels, noise_mode=FLAGS.noise_mode,
noise_scale=FLAGS.noise_scale, noise_level=FLAGS.noise_level, k_exc=FLAGS.k_exc)
if FLAGS.ngpus > 0 and torch.cuda.device_count() > 1:
print('We have multiple GPUs detected')
model = model.to(device)
elif FLAGS.ngpus > 0 and torch.cuda.device_count() is 1:
print('We run on GPU')
model = model.to(device)
else:
print('No GPU detected!')
model = model.module
return model
def train(save_train_epochs=.2, # how often save output during training
save_val_epochs=.5, # how often save output during validation
save_model_epochs=1, # how often save model weights
save_model_secs=720 * 10 # how often save model (in sec)
):
model = load_model()
trainer = ImageNetTrain(model)
validator = ImageNetVal(model)
start_epoch = 0
records = []
if FLAGS.restore_epoch > 0:
print('Restoring from previous...')
ckpt_data = torch.load(os.path.join(FLAGS.restore_path, f'epoch_{FLAGS.restore_epoch:02d}.pth.tar'))
start_epoch = ckpt_data['epoch']
print('Loaded epoch: '+str(start_epoch))
model.load_state_dict(ckpt_data['state_dict'])
trainer.optimizer.load_state_dict(ckpt_data['optimizer'])
results_old = pickle.load(open(os.path.join(FLAGS.restore_path, 'results.pkl'), 'rb'))
for result in results_old:
records.append(result)
results = {'meta': {'step_in_epoch': 0,
'epoch': start_epoch,
'wall_time': time.time()}
}
# records = []
recent_time = time.time()
nsteps = len(trainer.data_loader)
if save_train_epochs is not None:
save_train_steps = (np.arange(0, FLAGS.epochs + 1,
save_train_epochs) * nsteps).astype(int)
if save_val_epochs is not None:
save_val_steps = (np.arange(0, FLAGS.epochs + 1,
save_val_epochs) * nsteps).astype(int)
if save_model_epochs is not None:
save_model_steps = (np.arange(0, FLAGS.epochs + 1,
save_model_epochs) * nsteps).astype(int)
for epoch in tqdm.trange(start_epoch, FLAGS.epochs + 1, initial=0, desc='epoch'):
print(epoch)
data_load_start = np.nan
data_loader_iter = trainer.data_loader
for step, data in enumerate(tqdm.tqdm(data_loader_iter, desc=trainer.name)):
data_load_time = time.time() - data_load_start
global_step = epoch * nsteps + step
if save_val_steps is not None:
if global_step in save_val_steps:
results[validator.name] = validator()
if FLAGS.optimizer == 'plateauLR' and step == 0:
trainer.lr.step(results[validator.name]['loss'])
trainer.model.train()
print('LR: ', trainer.optimizer.param_groups[0]["lr"])
if FLAGS.output_path is not None:
if not (os.path.isdir(FLAGS.output_path)):
os.mkdir(FLAGS.output_path)
records.append(results)
if len(results) > 1:
pickle.dump(records, open(os.path.join(FLAGS.output_path, 'results.pkl'), 'wb'))
ckpt_data = {}
ckpt_data['flags'] = FLAGS.__dict__.copy()
ckpt_data['epoch'] = epoch
ckpt_data['state_dict'] = model.state_dict()
ckpt_data['optimizer'] = trainer.optimizer.state_dict()
if save_model_secs is not None:
if time.time() - recent_time > save_model_secs:
torch.save(ckpt_data, os.path.join(FLAGS.output_path,
'latest_checkpoint.pth.tar'))
recent_time = time.time()
if save_model_steps is not None:
if global_step in save_model_steps:
torch.save(ckpt_data, os.path.join(FLAGS.output_path,
f'epoch_{epoch:02d}.pth.tar'))
else:
if len(results) > 1:
pprint.pprint(results)
if epoch < FLAGS.epochs:
frac_epoch = (global_step + 1) / nsteps
record = trainer(frac_epoch, *data)
record['data_load_dur'] = data_load_time
results = {'meta': {'step_in_epoch': step + 1,
'epoch': frac_epoch,
'wall_time': time.time()}
}
if save_train_steps is not None:
if step in save_train_steps:
results[trainer.name] = record
data_load_start = time.time()
class ImageNetTrain(object):
def __init__(self, model):
self.name = 'train'
self.model = model
self.data_loader = self.data()
self.optimizer = torch.optim.SGD(self.model.parameters(), FLAGS.lr, momentum=FLAGS.momentum,
weight_decay=FLAGS.weight_decay)
if FLAGS.optimizer == 'stepLR':
self.lr = torch.optim.lr_scheduler.StepLR(self.optimizer, gamma=FLAGS.step_factor,
step_size=FLAGS.step_size)
elif FLAGS.optimizer == 'plateauLR':
self.lr = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, factor=FLAGS.step_factor,
patience=FLAGS.step_size-1, threshold=0.01)
self.loss = nn.CrossEntropyLoss()
if FLAGS.ngpus > 0:
self.loss = self.loss.cuda()
def data(self):
dataset = torchvision.datasets.ImageFolder(
os.path.join(FLAGS.in_path, 'train'),
torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=norm_mean, std=norm_std)
]))
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.workers,
pin_memory=True)
return data_loader
def __call__(self, frac_epoch, inp, target):
start = time.time()
if FLAGS.optimizer == 'stepLR':
self.lr.step(epoch=frac_epoch)
target = target.to(device)
output = self.model(inp)
record = {}
loss = self.loss(output, target)
record['loss'] = loss.item()
record['top1'], record['top5'] = accuracy(output, target, topk=(1, 5))
record['top1'] /= len(output)
record['top5'] /= len(output)
# record['learning_rate'] = self.lr.get_lr()[0]
record['learning_rate'] = self.optimizer.param_groups[0]["lr"]
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
record['dur'] = time.time() - start
return record
class ImageNetVal(object):
def __init__(self, model):
self.name = 'val'
self.model = model
self.data_loader = self.data()
self.loss = nn.CrossEntropyLoss(size_average=False)
self.loss = self.loss.to(device)
def data(self):
dataset = torchvision.datasets.ImageFolder(
os.path.join(FLAGS.in_path, 'val'),
torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=norm_mean, std=norm_std),
]))
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=FLAGS.batch_size,
shuffle=False,
num_workers=FLAGS.workers,
pin_memory=True)
return data_loader
def __call__(self):
self.model.eval()
start = time.time()
record = {'loss': 0, 'top1': 0, 'top5': 0}
with torch.no_grad():
for (inp, target) in tqdm.tqdm(self.data_loader, desc=self.name):
target = target.to(device)
output = self.model(inp)
record['loss'] += self.loss(output, target).item()
p1, p5 = accuracy(output, target, topk=(1, 5))
record['top1'] += p1
record['top5'] += p5
for key in record:
record[key] /= len(self.data_loader.dataset.samples)
record['dur'] = (time.time() - start) / len(self.data_loader)
return record
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
_, pred = output.topk(max(topk), dim=1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = [correct[:k].sum().item() for k in topk]
return res
if __name__ == '__main__':
fire.Fire(command=FIRE_FLAGS)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, argparse
from typing import Tuple
import numpy as np
import attacks.pgd as pgd
import attacks.autopgd as autopgd
from active_tests.decision_boundary_binarization import format_result
from active_tests.decision_boundary_binarization import \
interior_boundary_discrimination_attack
from argparse_utils import DecisionBoundaryBinarizationSettings
parser = argparse.ArgumentParser(description='ImageNet Binarization Test')
parser.add_argument('--in-path', required=True,
help='path to ImageNet folder that contains val folder')
parser.add_argument('--batch-size', default=128, type=int,
help='size of batch for validation')
parser.add_argument('--workers', default=20,
help='number of data loading workers')
parser.add_argument('--model-arch',
choices=['alexnet', 'resnet50', 'resnet50_at', 'cornets'],
default='resnet50',
help='back-end model architecture to load')
parser.add_argument("--n-boundary-points", type=int, default=1)
parser.add_argument("--n-inner-points", type=int, default=999)
parser.add_argument("--n-samples", type=int, default=50000)
parser.add_argument("--epsilon", default=1, help="in X/255", type=float)
parser.add_argument("--attack", choices=("pgd", "apgd"), default="pgd")
parser.add_argument("--n-steps", type=int, default=64)
parser.add_argument("--step-size", default=0.1, help="in X/255", type=float)
parser.add_argument("--ensemble-size", type=int, default=1)
parser.add_argument("--deterministic-replacement", action="store_true")
parser.add_argument("--differentiable-replacement", action="store_true")
parser.add_argument("--stable-gradients", action="store_true")
parser.add_argument("--anomaly-detection", action="store_true")
FLAGS = parser.parse_args()
import torch
import torch.nn as nn
import torchvision
from vonenet import get_model
device = "cuda" if torch.cuda.is_available() else "cpu"
def val():
model = get_model(model_arch=FLAGS.model_arch, pretrained=True)
model = model.to(device)
if FLAGS.attack == "pgd":
attack_fn = lambda m, x, y: \
pgd.pgd(m, x, y, FLAGS.n_steps, FLAGS.step_size / 255.0,
FLAGS.epsilon / 255.0, "linf",
n_averaging_steps=FLAGS.ensemble_size)[0]
else:
attack_fn = lambda m, x, y: \
autopgd.auto_pgd(m, x, y, FLAGS.n_steps, FLAGS.step_size / 255.0,
FLAGS.epsilon / 255.0, "linf",
n_averaging_steps=FLAGS.ensemble_size)[0]
validator = ImageNetAdversarialVal(model, attack_fn=attack_fn,
n_samples=FLAGS.n_samples)
validator()
class ImageNetAdversarialVal(object):
def __init__(self, model, attack_fn, n_samples=50000):
self.name = 'val'
self.model = model
self.data_loader = self.data()
self.loss = nn.CrossEntropyLoss(size_average=False)
self.loss = self.loss.to(device)
self.attack_fn = attack_fn
self.n_samples = n_samples
def data(self):
dataset = torchvision.datasets.ImageFolder(
os.path.join(FLAGS.in_path, 'val'),
torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]),
]))
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.workers,
pin_memory=True)
return data_loader
def __call__(self):
self.model.eval()
def attack_model(m, l, attack_kwargs) -> Tuple[
np.ndarray, Tuple[torch.Tensor, torch.Tensor]]:
for inp, target in l:
target = target.to(device)
with torch.autograd.set_detect_anomaly(FLAGS.anomaly_detection):
if FLAGS.stable_gradients:
self.model.module.vone_block.stable_gabor_f = True
self.model.module.vone_block.deterministic = FLAGS.deterministic_replacement
if FLAGS.differentiable_replacement:
self.model.module.vone_block.simple = nn.ReLU(inplace=False)
inp_adv = self.attack_fn(m, inp, target)
# make model stochastic again etc.
self.model.module.vone_block.deterministic = False
self.model.module.vone_block.stable_gabor_f = False
self.model.module.vone_block.simple = nn.ReLU(inplace=True)
with torch.no_grad():
output = m(inp_adv)
is_adv = (output != target).cpu().numpy()
return is_adv, (inp_adv, output.cpu())
additional_settings = dict(
n_boundary_points=FLAGS.n_boundary_points,
n_far_off_boundary_points=0,
n_far_off_adversarial_points=0,
)
scores_logit_differences_and_validation_accuracies = \
interior_boundary_discrimination_attack(
self.model,
self.data_loader,
attack_fn=attack_model,
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=FLAGS.epsilon / 255.0,
norm="linf",
lr=10000,
adversarial_attack_settings=None,
optimizer="sklearn",
n_inner_points=FLAGS.n_inner_points,
**additional_settings
),
n_samples=FLAGS.n_samples,
device=device,
batch_size=FLAGS.batch_size,
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
verify_valid_boundary_training_data_fn=None,
get_boundary_adversarials_fn=None,
verify_valid_inner_training_data_fn=None,
verify_valid_input_validation_data_fn=None,
fill_batches_for_verification=True
)
print(format_result(scores_logit_differences_and_validation_accuracies,
FLAGS.n_samples))
if __name__ == '__main__':
val()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .utils import sample_dist
import scipy.stats as stats
def generate_gabor_param(features, seed=0, rand_flag=False, sf_corr=0, sf_max=9, sf_min=0):
# Generates random sample
np.random.seed(seed)
phase_bins = np.array([0, 360])
phase_dist = np.array([1])
if rand_flag:
print('Uniform gabor parameters')
ori_bins = np.array([0, 180])
ori_dist = np.array([1])
nx_bins = np.array([0.1, 10**0.2])
nx_dist = np.array([1])
ny_bins = np.array([0.1, 10**0.2])
ny_dist = np.array([1])
# sf_bins = np.array([0.5, 8])
# sf_dist = np.array([1])
sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
sf_dist = np.array([1, 1, 1, 1, 1, 1, 1, 1])
sfmax_ind = np.where(sf_bins < sf_max)[0][-1]
sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
sf_bins = sf_bins[sfmin_ind:sfmax_ind+1]
sf_dist = sf_dist[sfmin_ind:sfmax_ind]
sf_dist = sf_dist / sf_dist.sum()
else:
print('Neuronal distributions gabor parameters')
# DeValois 1982a
ori_bins = np.array([-22.5, 22.5, 67.5, 112.5, 157.5])
ori_dist = np.array([66, 49, 77, 54])
ori_dist = ori_dist / ori_dist.sum()
# Schiller 1976
cov_mat = np.array([[1, sf_corr], [sf_corr, 1]])
# Ringach 2002b
nx_bins = np.logspace(-1, 0.2, 6, base=10)
ny_bins = np.logspace(-1, 0.2, 6, base=10)
n_joint_dist = np.array([[2., 0., 1., 0., 0.],
[8., 9., 4., 1., 0.],
[1., 2., 19., 17., 3.],
[0., 0., 1., 7., 4.],
[0., 0., 0., 0., 0.]])
n_joint_dist = n_joint_dist / n_joint_dist.sum()
nx_dist = n_joint_dist.sum(axis=1)
nx_dist = nx_dist / nx_dist.sum()
ny_dist_marg = n_joint_dist / n_joint_dist.sum(axis=1, keepdims=True)
# DeValois 1982b
sf_bins = np.array([0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8])
sf_dist = np.array([4, 4, 8, 25, 32, 26, 28, 12])
sfmax_ind = np.where(sf_bins <= sf_max)[0][-1]
sfmin_ind = np.where(sf_bins >= sf_min)[0][0]
sf_bins = sf_bins[sfmin_ind:sfmax_ind+1]
sf_dist = sf_dist[sfmin_ind:sfmax_ind]
sf_dist = sf_dist / sf_dist.sum()
phase = sample_dist(phase_dist, phase_bins, features)
ori = sample_dist(ori_dist, ori_bins, features)
ori[ori < 0] = ori[ori < 0] + 180
if rand_flag:
sf = sample_dist(sf_dist, sf_bins, features, scale='log2')
nx = sample_dist(nx_dist, nx_bins, features, scale='log10')
ny = sample_dist(ny_dist, ny_bins, features, scale='log10')
else:
samps = np.random.multivariate_normal([0, 0], cov_mat, features)
samps_cdf = stats.norm.cdf(samps)
nx = np.interp(samps_cdf[:,0], np.hstack(([0], nx_dist.cumsum())), np.log10(nx_bins))
nx = 10**nx
ny_samp = np.random.rand(features)
ny = np.zeros(features)
for samp_ind, nx_samp in enumerate(nx):
bin_id = np.argwhere(nx_bins < nx_samp)[-1]
ny[samp_ind] = np.interp(ny_samp[samp_ind], np.hstack(([0], ny_dist_marg[bin_id, :].cumsum())),
np.log10(ny_bins))
ny = 10**ny
sf = np.interp(samps_cdf[:,1], np.hstack(([0], sf_dist.cumsum())), np.log2(sf_bins))
sf = 2**sf
return sf, ori, phase, nx, ny
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import nn
from collections import OrderedDict
# AlexNet Back-End architecture
# Based on Torchvision implementation in
# https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
class AlexNetBackEnd(nn.Module):
def __init__(self, num_classes=1000):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(64, 192, kernel_size=5, stride=2, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 7 * 7, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
# ResNet Back-End architecture
# Based on Torchvision implementation in
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True) #
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True) # inplace=True
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNetBackEnd(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNetBackEnd, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x, features_only=False):
# See note [TorchScript super()]
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
if not features_only:
x = self.fc(x)
return x
def forward(self, x, **kwargs):
return self._forward_impl(x, **kwargs)
# CORnet-S Back-End architecture
# Based on CORnet code in
# https://github.com/dicarlolab/CORnet
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Identity(nn.Module):
def forward(self, x):
return x
class CORblock_S(nn.Module):
scale = 4 # scale of the bottleneck convolution channels
def __init__(self, in_channels, out_channels, times=1):
super().__init__()
self.times = times
self.conv_input = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self.skip = nn.Conv2d(out_channels, out_channels,
kernel_size=1, stride=2, bias=False)
self.norm_skip = nn.BatchNorm2d(out_channels)
self.conv1 = nn.Conv2d(out_channels, out_channels * self.scale,
kernel_size=1, bias=False)
self.nonlin1 = nn.ReLU(inplace=True) #
self.conv2 = nn.Conv2d(out_channels * self.scale, out_channels * self.scale,
kernel_size=3, stride=2, padding=1, bias=False)
self.nonlin2 = nn.ReLU(inplace=True) #
self.conv3 = nn.Conv2d(out_channels * self.scale, out_channels,
kernel_size=1, bias=False)
self.nonlin3 = nn.ReLU(inplace=True) #
self.output = Identity() # for an easy access to this block's output
# need BatchNorm for each time step for training to work well
for t in range(self.times):
setattr(self, f'norm1_{t}', nn.BatchNorm2d(out_channels * self.scale))
setattr(self, f'norm2_{t}', nn.BatchNorm2d(out_channels * self.scale))
setattr(self, f'norm3_{t}', nn.BatchNorm2d(out_channels))
def forward(self, inp):
x = self.conv_input(inp)
for t in range(self.times):
if t == 0:
skip = self.norm_skip(self.skip(x))
self.conv2.stride = (2, 2)
else:
skip = x
self.conv2.stride = (1, 1)
x = self.conv1(x)
x = getattr(self, f'norm1_{t}')(x)
x = self.nonlin1(x)
x = self.conv2(x)
x = getattr(self, f'norm2_{t}')(x)
x = self.nonlin2(x)
x = self.conv3(x)
x = getattr(self, f'norm3_{t}')(x)
x += skip
x = self.nonlin3(x)
output = self.output(x)
return output
class CORnetSBackEnd(nn.Module):
def __init__(self, num_classes=1000):
super(CORnetSBackEnd, self).__init__()
self.V2 = CORblock_S(64, 128, times=2)
self.V4 = CORblock_S(128, 256, times=4)
self.IT = CORblock_S(256, 512, times=2)
self.decoder = nn.Sequential(OrderedDict([
('avgpool', nn.AdaptiveAvgPool2d(1)),
('flatten', Flatten()),
('linear', nn.Linear(512, num_classes)),
('output', Identity())
]))
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, np.sqrt(2. / n))
# nn.Linear is missing here because I originally forgot
# to add it during the training of this network
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.V2(x)
x = self.V4(x)
x = self.IT(x)
x = self.decoder(x)
return x
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import os
import requests
from .vonenet import VOneNet
from torch.nn import Module
FILE_WEIGHTS = {'alexnet': 'vonealexnet_e70.pth.tar', 'resnet50': 'voneresnet50_e70.pth.tar',
'resnet50_at': 'voneresnet50_at_e96.pth.tar', 'cornets': 'vonecornets_e70.pth.tar',
'resnet50_ns': 'voneresnet50_ns_e70.pth.tar'}
class Wrapper(Module):
def __init__(self, model):
super(Wrapper, self).__init__()
self.module = model
def get_model(model_arch='resnet50', pretrained=True, map_location='cpu', **kwargs):
"""
Returns a VOneNet model.
Select pretrained=True for returning one of the 3 pretrained models.
model_arch: string with identifier to choose the architecture of the back-end (resnet50, cornets, alexnet)
"""
if pretrained and model_arch:
url = f'https://vonenet-models.s3.us-east-2.amazonaws.com/{FILE_WEIGHTS[model_arch.lower()]}'
home_dir = os.environ['HOME']
vonenet_dir = os.path.join(home_dir, '.vonenet')
weightsdir_path = os.path.join(vonenet_dir, FILE_WEIGHTS[model_arch.lower()])
if not os.path.exists(vonenet_dir):
os.makedirs(vonenet_dir)
if not os.path.exists(weightsdir_path):
print('Downloading model weights to ', weightsdir_path)
r = requests.get(url, allow_redirects=True)
open(weightsdir_path, 'wb').write(r.content)
ckpt_data = torch.load(weightsdir_path, map_location=map_location)
stride = ckpt_data['flags']['stride']
simple_channels = ckpt_data['flags']['simple_channels']
complex_channels = ckpt_data['flags']['complex_channels']
k_exc = ckpt_data['flags']['k_exc']
noise_mode = ckpt_data['flags']['noise_mode']
noise_scale = ckpt_data['flags']['noise_scale']
noise_level = ckpt_data['flags']['noise_level']
model_id = ckpt_data['flags']['arch'].replace('_','').lower()
model = globals()[f'VOneNet'](model_arch=model_id, stride=stride, k_exc=k_exc,
simple_channels=simple_channels, complex_channels=complex_channels,
noise_mode=noise_mode, noise_scale=noise_scale, noise_level=noise_level)
if model_arch.lower() == 'resnet50_at':
ckpt_data['state_dict'].pop('vone_block.div_u.weight')
ckpt_data['state_dict'].pop('vone_block.div_t.weight')
model.load_state_dict(ckpt_data['state_dict'])
else:
model = Wrapper(model)
model.load_state_dict(ckpt_data['state_dict'])
model = model.module
model = nn.DataParallel(model)
else:
model = globals()[f'VOneNet'](model_arch=model_arch, **kwargs)
model = nn.DataParallel(model)
model.to(map_location)
return model
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
def gabor_kernel(frequency, sigma_x, sigma_y, theta=0, offset=0, ks=61):
w = ks // 2
grid_val = torch.arange(-w, w+1, dtype=torch.float)
x, y = torch.meshgrid(grid_val, grid_val)
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
g = torch.zeros(y.shape)
g[:] = torch.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y
g *= torch.cos(2 * np.pi * frequency * rotx + offset)
return g
def sample_dist(hist, bins, ns, scale='linear'):
rand_sample = np.random.rand(ns)
if scale == 'linear':
rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), bins)
elif scale == 'log2':
rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log2(bins))
rand_sample = 2**rand_sample
elif scale == 'log10':
rand_sample = np.interp(rand_sample, np.hstack(([0], hist.cumsum())), np.log10(bins))
rand_sample = 10**rand_sample
return rand_sample
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from torch import nn
from .modules import VOneBlock
from .back_ends import ResNetBackEnd, Bottleneck, AlexNetBackEnd, CORnetSBackEnd
from .params import generate_gabor_param
import numpy as np
class LastLayerKwargsSequential(nn.Sequential):
def forward(self, x, **last_layer_kwargs):
for i, module in enumerate(self):
if i == (len(self) - 1):
x = module(x, **last_layer_kwargs)
else:
x = module(x)
return x
def VOneNet(sf_corr=0.75, sf_max=9, sf_min=0, rand_param=False, gabor_seed=0,
simple_channels=256, complex_channels=256,
noise_mode='neuronal', noise_scale=0.35, noise_level=0.07, k_exc=25,
model_arch='resnet50', image_size=224, visual_degrees=8, ksize=25, stride=4):
out_channels = simple_channels + complex_channels
sf, theta, phase, nx, ny = generate_gabor_param(out_channels, gabor_seed, rand_param, sf_corr, sf_max, sf_min)
gabor_params = {'simple_channels': simple_channels, 'complex_channels': complex_channels, 'rand_param': rand_param,
'gabor_seed': gabor_seed, 'sf_max': sf_max, 'sf_corr': sf_corr, 'sf': sf.copy(),
'theta': theta.copy(), 'phase': phase.copy(), 'nx': nx.copy(), 'ny': ny.copy()}
arch_params = {'k_exc': k_exc, 'arch': model_arch, 'ksize': ksize, 'stride': stride}
# Conversions
ppd = image_size / visual_degrees
sf = sf / ppd
sigx = nx / sf
sigy = ny / sf
theta = theta/180 * np.pi
phase = phase / 180 * np.pi
vone_block = VOneBlock(sf=sf, theta=theta, sigx=sigx, sigy=sigy, phase=phase,
k_exc=k_exc, noise_mode=noise_mode, noise_scale=noise_scale, noise_level=noise_level,
simple_channels=simple_channels, complex_channels=complex_channels,
ksize=ksize, stride=stride, input_size=image_size)
if model_arch:
bottleneck = nn.Conv2d(out_channels, 64, kernel_size=1, stride=1, bias=False)
nn.init.kaiming_normal_(bottleneck.weight, mode='fan_out', nonlinearity='relu')
if model_arch.lower() == 'resnet50':
print('Model: ', 'VOneResnet50')
model_back_end = ResNetBackEnd(block=Bottleneck, layers=[3, 4, 6, 3])
elif model_arch.lower() == 'alexnet':
print('Model: ', 'VOneAlexNet')
model_back_end = AlexNetBackEnd()
elif model_arch.lower() == 'cornets':
print('Model: ', 'VOneCORnet-S')
model_back_end = CORnetSBackEnd()
model = LastLayerKwargsSequential(OrderedDict([
('vone_block', vone_block),
('bottleneck', bottleneck),
('model', model_back_end),
]))
else:
print('Model: ', 'VOneNet')
model = vone_block
model.image_size = image_size
model.visual_degrees = visual_degrees
model.gabor_params = gabor_params
model.arch_params = arch_params
return model
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from .utils import gabor_kernel
device = "cuda" if torch.cuda.is_available() else "cpu"
class Identity(nn.Module):
def forward(self, x):
return x
class GFB(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=4):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size)
self.stride = (stride, stride)
self.padding = (kernel_size // 2, kernel_size // 2)
# Param instatiations
self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size))
def forward(self, x):
return F.conv2d(x, self.weight, None, self.stride, self.padding)
def initialize(self, sf, theta, sigx, sigy, phase):
random_channel = torch.randint(0, self.in_channels, (self.out_channels,))
for i in range(self.out_channels):
self.weight[i, random_channel[i]] = gabor_kernel(frequency=sf[i], sigma_x=sigx[i], sigma_y=sigy[i],
theta=theta[i], offset=phase[i], ks=self.kernel_size[0])
self.weight = nn.Parameter(self.weight, requires_grad=False)
class VOneBlock(nn.Module):
def __init__(self, sf, theta, sigx, sigy, phase,
k_exc=25, noise_mode=None, noise_scale=1, noise_level=1,
simple_channels=128, complex_channels=128, ksize=25, stride=4, input_size=224,
stable_gabor_f=False, deterministic=False):
super().__init__()
self.in_channels = 3
self.simple_channels = simple_channels
self.complex_channels = complex_channels
self.out_channels = simple_channels + complex_channels
self.stride = stride
self.input_size = input_size
self.sf = sf
self.theta = theta
self.sigx = sigx
self.sigy = sigy
self.phase = phase
self.k_exc = k_exc
self.set_noise_mode(noise_mode, noise_scale, noise_level)
self.fixed_noise = None
self.simple_conv_q0 = GFB(self.in_channels, self.out_channels, ksize, stride)
self.simple_conv_q1 = GFB(self.in_channels, self.out_channels, ksize, stride)
self.simple_conv_q0.initialize(sf=self.sf, theta=self.theta, sigx=self.sigx, sigy=self.sigy,
phase=self.phase)
self.simple_conv_q1.initialize(sf=self.sf, theta=self.theta, sigx=self.sigx, sigy=self.sigy,
phase=self.phase + np.pi / 2)
self.simple = nn.ReLU(inplace=True)
self.complex = Identity()
self.gabors = Identity()
self.noise = nn.ReLU(inplace=True)
self.output = Identity()
self.stable_gabor_f = stable_gabor_f
self.deterministic = deterministic
def forward(self, x):
# Gabor activations [Batch, out_channels, H/stride, W/stride]
x = self.gabors_f(x)
# Noise [Batch, out_channels, H/stride, W/stride]
if not self.deterministic:
x = self.noise_f(x)
# V1 Block output: (Batch, out_channels, H/stride, W/stride)
x = self.output(x)
return x
def gabors_f(self, x):
s_q0 = self.simple_conv_q0(x)
s_q1 = self.simple_conv_q1(x)
if self.stable_gabor_f:
c = self.complex(torch.sqrt(1e-12 + s_q0[:, self.simple_channels:, :, :] ** 2 +
s_q1[:, self.simple_channels:, :, :] ** 2) / np.sqrt(2))
else:
c = self.complex(torch.sqrt(s_q0[:, self.simple_channels:, :, :] ** 2 +
s_q1[:, self.simple_channels:, :, :] ** 2) / np.sqrt(2))
s = self.simple(s_q0[:, 0:self.simple_channels, :, :])
return self.gabors(self.k_exc * torch.cat((s, c), 1))
def noise_f(self, x):
if self.noise_mode == 'neuronal':
eps = 10e-5
x *= self.noise_scale
x += self.noise_level
if self.fixed_noise is not None:
x += self.fixed_noise * torch.sqrt(F.relu(x.clone()) + eps)
else:
x += torch.distributions.normal.Normal(torch.zeros_like(x), scale=1).rsample() * \
torch.sqrt(F.relu(x.clone()) + eps)
x -= self.noise_level
x /= self.noise_scale
if self.noise_mode == 'gaussian':
if self.fixed_noise is not None:
x += self.fixed_noise * self.noise_scale
else:
x += torch.distributions.normal.Normal(torch.zeros_like(x), scale=1).rsample() * self.noise_scale
return self.noise(x)
def set_noise_mode(self, noise_mode=None, noise_scale=1, noise_level=1):
self.noise_mode = noise_mode
self.noise_scale = noise_scale
self.noise_level = noise_level
def fix_noise(self, batch_size=256, seed=None):
noise_mean = torch.zeros(batch_size, self.out_channels, int(self.input_size/self.stride),
int(self.input_size/self.stride))
if seed:
torch.manual_seed(seed)
if self.noise_mode:
self.fixed_noise = torch.distributions.normal.Normal(noise_mean, scale=1).rsample().to(device)
def unfix_noise(self):
self.fixed_noise = None
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
class TorchAlarm(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.ModuleList([
torch.nn.Linear(28682, 112),
torch.nn.ReLU(),
torch.nn.Linear(112, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 300),
torch.nn.ReLU(),
torch.nn.Linear(300, 200),
torch.nn.ReLU(),
torch.nn.Linear(200, 77),
torch.nn.ReLU(),
torch.nn.Linear(77, 1),
])
def __call__(self, x, training=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
for layer in self.layers:
x = layer(x)
return x
class resnet_layer_torch(torch.nn.Module):
def __init__(self,
prior_filters=16,
num_filters=16,
kernel_size=3,
strides=1):
super().__init__()
self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=1)
self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.layers = [self.a, self.b, self.c, self.d]
def forward(self, inputs):
x1 = self.a(inputs)
x2 = self.b(x1)
x3 = torch.nn.ReLU()(x2)
x4 = self.c(x3)
x5 = self.d(x4)
x6 = x5 + inputs
return x6, x2, x5
class resnet_layer2_torch(torch.nn.Module):
def __init__(self,
prior_filters=16,
num_filters=16,
kernel_size=3,
strides=1):
super().__init__()
self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=0, stride=(2,2))
self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
self.c2 = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=1, padding=0, stride=(2,2))
self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.layers = [self.a, self.b, self.c, self.c2, self.d]
def forward(self, x):
xp = torch.nn.functional.pad(x, (0, 1, 0, 1), "constant", 0)
y = self.a(xp)
y = self.b(y)
y = torch.nn.ReLU()(y)
y = self.c(y)
z = self.c2(x)
y = self.d(y)
x = z+y
return x
class TorchModel(torch.nn.Module):
def __init__(self):
super().__init__()
class Transpose(torch.nn.Module):
def forward(self, x):
return x.permute((0, 2, 3, 1))
self.layers = torch.nn.ModuleList([
torch.nn.Conv2d(3, 16, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(16, eps=.000),
torch.nn.ReLU(),
# AAA
resnet_layer_torch(16, 16),
torch.nn.ReLU(),
resnet_layer_torch(16, 16),
torch.nn.ReLU(),
resnet_layer_torch(16, 16),
torch.nn.ReLU(),
resnet_layer2_torch(16, 32),
torch.nn.ReLU(),
resnet_layer_torch(32, 32),
torch.nn.ReLU(),
resnet_layer_torch(32, 32),
torch.nn.ReLU(),
resnet_layer2_torch(32, 64),
torch.nn.ReLU(),
resnet_layer_torch(64, 64),
torch.nn.ReLU(),
resnet_layer_torch(64, 64),
torch.nn.ReLU(),
torch.nn.AvgPool2d(8),
#
Transpose(),
torch.nn.Flatten(),
torch.nn.Linear(64, 10),
])
def __call__(self, x, training=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
extra = []
for i,layer in enumerate(self.layers):
if isinstance(layer, resnet_layer_torch):
x,y,z = layer(x)
if i == 11:
extra.append(y)
if i == 19:
extra.append(z)
else:
x = layer(x)
if i == 1:
extra.append(x)
extra = torch.cat([x.permute((0, 2, 3, 1)).reshape((x.shape[0], -1)) for x in extra] + [x], axis=1)
return x, extra
class TorchWithDetect:
def __init__(self, model, alarm):
self.model = model
self.alarm = alarm
def __call__(self, x):
out, hidden = self.model(x)
is_ok = self.alarm(hidden)
return out, is_ok
#def load_model(path_model_weights='checkpoints/dla/dla_cifar_classifier.h5',
# path_detector_weights='checkpoints/dla/dla_cifar_detector.h5', device=None):
def load_model(path_model_weights='/home/AUTHOR/dla/dla/cifar_model.h5',
path_detector_weights='/home/AUTHOR/dla/dla/cifar_alarm.h5', device=None):
torch_model = TorchModel()
torch_model.load_state_dict(
torch.load(path_model_weights))
torch_model.eval().to(device)
torch_alarm = TorchAlarm()
torch_alarm.load_state_dict(
torch.load(path_detector_weights))
torch_alarm.eval().to(device)
return TorchWithDetect(torch_model, torch_alarm), \
lambda x: torch_model(x)[0], \
lambda x, how=None: torch_alarm(torch_model(x)[1]).flatten()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import warnings
warnings.filterwarnings("ignore")
import argparse
import torch
import numpy as np
import defense_v3
import defense_v2
import defense
from cifar import CIFAR10
import pgd_attack
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--batch-size", type=int, default=512)
parser.add_argument("--n-samples", type=int, default=512)
parser.add_argument("--adversarial-attack",
choices=(None, "pgd", "selective-pgd", "joined-pgd"),
default=None)
parser.add_argument("--epsilon", type=float, default=0.31)
parser.add_argument("--n-steps", type=int, default=100)
parser.add_argument("--step-size", type=float, default=0.001)
parser.add_argument("--threshold", type=float, default=None)
parser.add_argument("--fpr-threshold", type=float, default=0.05)
args = parser.parse_args()
assert args.n_samples < 5000
if args.epsilon > 0 or args.n_steps > 0 or args.step_size > 0:
assert args.adversarial_attack is not None
device = "cuda" if torch.cuda.is_available() else "cpu"
dataset = CIFAR10(tf_mode=True)
classifier_and_detector, classifier, detector = defense_v2.load_model(
device=device)
n_batches = int(np.ceil(args.n_samples / args.batch_size))
is_adv = []
adv_detector_scores = []
detector_scores = []
for batch_idx in range(n_batches):
x_batch = dataset.test_data[batch_idx*args.batch_size :
(batch_idx+1)*args.batch_size]
y_batch = dataset.test_labels[batch_idx*args.batch_size :
(batch_idx+1)*args.batch_size]
x_batch = x_batch.transpose((0, 3, 1, 2))
x_batch = torch.tensor(x_batch, dtype=torch.float32).to(device)
y_batch = torch.tensor(y_batch, dtype=torch.long).to(device)
if args.adversarial_attack is not None:
x_adv_batch = pgd_attack.attack(
x_batch, y_batch, classifier, classifier_and_detector,
args.adversarial_attack, args.n_steps, args.step_size, args.epsilon)
with torch.no_grad():
logits, adv_detector_scores_batch = classifier_and_detector(x_adv_batch)
adv_detector_scores_batch = adv_detector_scores_batch.cpu().numpy()
adv_predictions_batch = logits.argmax(1)
detector_scores_batch = detector(x_batch).cpu().numpy()
is_adv_batch = adv_predictions_batch != y_batch
is_adv_batch = is_adv_batch.cpu().numpy()
is_adv.append(is_adv_batch)
detector_scores.append(detector_scores_batch)
adv_detector_scores.append(adv_detector_scores_batch)
is_adv = np.concatenate(is_adv, 0)
is_adv = is_adv[:args.n_samples]
detector_scores = np.concatenate(detector_scores, 0)
detector_scores = detector_scores[:args.n_samples]
adv_detector_scores = np.concatenate(adv_detector_scores, 0)
adv_detector_scores = adv_detector_scores[:args.n_samples]
if args.threshold is None:
detector_threshold = np.sort(detector_scores)[
-int(len(detector_scores) * args.fpr_threshold)]
print("Threshold for FPR", args.fpr_threshold, "=", detector_threshold)
else:
detector_threshold = args.threshold
adv_is_detected = adv_detector_scores > detector_threshold
is_detected = detector_scores > detector_threshold
# true positive: detected + adversarial example
# true negative: not detected + normal example
# false positive: detected + normal example
# false negative: not detected + adversarial example
tpr = np.mean(adv_is_detected)
fnr = np.mean(~adv_is_detected)
tnr = np.mean(~is_detected)
fpr = np.mean(is_detected)
tp = np.sum(adv_is_detected)
fn = np.sum(~adv_is_detected)
fp = np.sum(is_detected)
f1 = tp / (tp + 0.5 * (fp + fn))
print("TPR", tpr)
print("FPR", fpr)
print("TNR", tnr)
print("FNR", fnr)
print("F1 ", f1)
is_adv_and_not_detected = np.logical_and(is_adv, ~adv_is_detected)
print("Attack Success Rate (w/o detector):", np.mean(is_adv))
print("Attack Success Rate (w/ detector):", np.mean(is_adv_and_not_detected))
if __name__ == "__main__":
main() |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import defense
if __name__ == '__main__':
device = "cuda"
classifier_and_detector, classifier, detector = defense.load_model(device=device)
data = defense.CIFAR10(seed=43)
# ### PGD
num_images = 2000
images = torch.tensor(data.test_data[:num_images].transpose((0, 3, 1, 2)), dtype=torch.float32, requires_grad=False)
targets = torch.tensor(data.test_labels[:num_images], dtype=torch.int64)
outs, is_ok = classifier_and_detector(images.to(device))
outs = outs.cpu()
is_ok = is_ok.cpu()
import pdb; pdb.set_trace()
correct = data.test_labels[:num_images] == outs.argmax(1).detach().numpy()
is_ok = is_ok.detach().numpy()
print('acc', correct.mean())
print('fraction bad', (is_ok > 0).mean())
print("successful attacks", ((~correct) & (is_ok < 0)).mean())
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
import attacks.pgd
def attack(x_batch, y_batch, classifier, classifier_and_detector,
adversarial_attack, n_steps, step_size, epsilon):
if adversarial_attack == "pgd":
loss_fn = lambda x, y: -F.cross_entropy(classifier(x), y)
def is_adversarial_fn(x, y):
with torch.no_grad():
return classifier(x).argmax(-1) != y
elif adversarial_attack == "joined-pgd":
def loss_fn(x, y):
l, k = classifier_and_detector(x)
return -F.cross_entropy(l, y) - F.binary_cross_entropy_with_logits(
k, torch.ones_like(k))
def is_adversarial_fn(x, y):
with torch.no_grad():
l, k = classifier_and_detector(x)
yc = l.argmax(1) != y
yd = k < 0
return torch.logical_and(yc, yd)
elif adversarial_attack == "selective-pgd":
def loss_fn(x, y):
l, k = classifier_and_detector(x)
mc = (l.argmax(1) == y).float().detach()
md = (k > 0).float().detach()
return -torch.mean(
mc * F.cross_entropy(l, y, reduction="none") +
md * F.binary_cross_entropy_with_logits(
k, torch.ones_like(k), reduction="none")
)
def is_adversarial_fn(x, y):
with torch.no_grad():
l, k = classifier_and_detector(x)
yc = l.argmax(1) != y
yd = k < 0
return torch.logical_and(yc, yd)
elif adversarial_attack == "orthogonal-pgd":
raise ValueError("not implemented")
x_batch = attacks.pgd.general_pgd(loss_fn, is_adversarial_fn,
x_batch, y_batch, n_steps,
step_size, epsilon, "linf",
early_stopping=True,
random_start=False
)[0]
return x_batch
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class TorchResnetAlarm(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.ModuleList([
torch.nn.Linear(28682, 112),
torch.nn.ReLU(),
torch.nn.Linear(112, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 300),
torch.nn.ReLU(),
torch.nn.Linear(300, 200),
torch.nn.ReLU(),
torch.nn.Linear(200, 77),
torch.nn.ReLU(),
torch.nn.Linear(77, 1),
])
def __call__(self, x, training=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
for layer in self.layers:
x = layer(x)
return x
class resnet_layer_torch(torch.nn.Module):
def __init__(self,
prior_filters=16,
num_filters=16,
kernel_size=3,
strides=1):
super().__init__()
self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=1)
self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.layers = [self.a, self.b, self.c, self.d]
def forward(self, inputs):
x1 = self.a(inputs)
x2 = self.b(x1)
x3 = torch.nn.ReLU()(x2)
x4 = self.c(x3)
x5 = self.d(x4)
x6 = x5 + inputs
return x6, x2, x5
class resnet_layer2_torch(torch.nn.Module):
def __init__(self,
prior_filters=16,
num_filters=16,
kernel_size=3,
strides=1):
super().__init__()
self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=0, stride=(2,2))
self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
self.c2 = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=1, padding=0, stride=(2,2))
self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.layers = [self.a, self.b, self.c, self.c2, self.d]
def forward(self, x):
xp = torch.nn.functional.pad(x, (0, 1, 0, 1), "constant", 0)
y = self.a(xp)
y = self.b(y)
y = torch.nn.ReLU()(y)
y = self.c(y)
z = self.c2(x)
y = self.d(y)
x = z+y
return x
class TorchResnetModel(torch.nn.Module):
def __init__(self):
super().__init__()
class Transpose(torch.nn.Module):
def forward(self, x):
return x.permute((0, 2, 3, 1))
self.layers = torch.nn.ModuleList([
torch.nn.Conv2d(3, 16, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(16, eps=.000),
torch.nn.ReLU(),
# AAA
resnet_layer_torch(16, 16),
torch.nn.ReLU(),
resnet_layer_torch(16, 16),
torch.nn.ReLU(),
resnet_layer_torch(16, 16),
torch.nn.ReLU(),
resnet_layer2_torch(16, 32),
torch.nn.ReLU(),
resnet_layer_torch(32, 32),
torch.nn.ReLU(),
resnet_layer_torch(32, 32),
torch.nn.ReLU(),
resnet_layer2_torch(32, 64),
torch.nn.ReLU(),
resnet_layer_torch(64, 64),
torch.nn.ReLU(),
resnet_layer_torch(64, 64),
torch.nn.ReLU(),
torch.nn.AvgPool2d(8),
#
Transpose(),
torch.nn.Flatten(),
torch.nn.Linear(64, 10),
])
def __call__(self, x, training=False, return_features=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
extra = []
for i,layer in enumerate(self.layers):
if isinstance(layer, resnet_layer_torch):
x,y,z = layer(x)
if i == 11:
extra.append(y)
if i == 19:
extra.append(z)
else:
x = layer(x)
if i == 1:
extra.append(x)
if isinstance(layer, torch.nn.Flatten):
features = x
extra = torch.cat([x.permute((0, 2, 3, 1)).reshape((x.shape[0], -1)) for x in extra] + [x], axis=1)
if return_features:
return x, extra, features
else:
return x, extra
class TorchModel(torch.nn.Module):
def __init__(self):
super().__init__()
class Transpose(torch.nn.Module):
def forward(self, x):
return x.permute((0, 2, 3, 1))
self.layers = torch.nn.ModuleList([
torch.nn.Conv2d(3, 32, kernel_size=3, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(32, 32, kernel_size=3),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Conv2d(32, 64, kernel_size=3, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 64, kernel_size=3),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
Transpose(),
torch.nn.Flatten(),
torch.nn.Linear(2304, 512),
torch.nn.ReLU(),
torch.nn.Linear(512, 10)])
def __call__(self, x, training=False, return_features=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
for i,layer in enumerate(self.layers):
#print('l',layer)
#print('x',x.shape)
x = layer(x)
if i == 13:
#print("Have", x)
sav = x
if isinstance(layer, torch.nn.Flatten):
features = x
#print('aaa', sav)
if return_features:
return x, torch.cat([sav, x], axis=1), features
else:
return x, torch.cat([sav, x], axis=1)
class TorchAlarm(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.ModuleList([
torch.nn.Linear(522, 100),
# modification by AUTHOR
#torch.nn.SELU(),
torch.nn.Linear(100, 50),
# modification by AUTHOR
#torch.nn.SELU(),
torch.nn.Linear(50, 10),
# modification by AUTHOR
#torch.nn.SELU(),
torch.nn.Linear(10, 1)
])
def __call__(self, x, training=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
for i, layer in enumerate(self.layers):
x = layer(x)
# modification by AUTHOR
if i != len(self.layers) - 1:
x = torch.relu(x)
return x
class TorchWithDetect:
def __init__(self, model, alarm):
self.model = model
self.alarm = alarm
def __call__(self, x):
out, hidden = self.model(x)
is_ok = self.alarm(hidden)
return out, is_ok
def load_model(path_model_weights='/home/AUTHOR/dla/more_dla/CIFAR10_keras_cnn.torchmodel',
path_detector_weights='/home/AUTHOR/dla/more_dla/CIFAR10_keras_cnn_LinfPGD_alarm_DLA.torchmodel', device=None):
torch_model = TorchModel()
torch_model.load_state_dict(
torch.load(path_model_weights))
torch_model.eval().to(device)
torch_alarm = TorchAlarm()
torch_alarm.load_state_dict(
torch.load(path_detector_weights))
torch_alarm.eval().to(device)
return TorchWithDetect(torch_model, torch_alarm), \
lambda x: torch_model(x)[0], \
lambda x, how=None: torch_alarm(torch_model(x)[1]).flatten() |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class CIFAR10:
def __init__(self, seed = 43, tf_mode=False):
if tf_mode:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.compat.v1.disable_eager_execution()
(train_data, train_labels),(self.test_data, self.test_labels) = tf.keras.datasets.cifar10.load_data()
else:
import torchvision
train_dataset = torchvision.datasets.CIFAR10("data", train=True)
test_dataset = torchvision.datasets.CIFAR10("data", train=False)
train_data, train_labels = train_dataset.data, np.array(train_dataset.targets, dtype=int)
self.test_data, self.test_labels = test_dataset.data, np.array(test_dataset.targets, dtype=int)
train_data = train_data/255.
self.test_data = self.test_data/255.
VALIDATION_SIZE = 5000
np.random.seed(seed)
shuffled_indices = np.arange(len(train_data))
np.random.shuffle(shuffled_indices)
train_data = train_data[shuffled_indices]
train_labels = train_labels[shuffled_indices]
shuffled_indices = np.arange(len(self.test_data))
np.random.shuffle(shuffled_indices)
self.test_data = self.test_data[shuffled_indices]
self.test_labels = self.test_labels[shuffled_indices].flatten()
self.validation_data = train_data[:VALIDATION_SIZE, :, :, :]
self.validation_labels = train_labels[:VALIDATION_SIZE]
self.train_data = train_data[VALIDATION_SIZE:, :, :, :]
self.train_labels = train_labels[VALIDATION_SIZE:] |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class TorchAlarm(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.ModuleList([
torch.nn.Linear(28682, 112),
torch.nn.ReLU(),
torch.nn.Linear(112, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 300),
torch.nn.ReLU(),
torch.nn.Linear(300, 200),
torch.nn.ReLU(),
torch.nn.Linear(200, 77),
torch.nn.ReLU(),
torch.nn.Linear(77, 1),
])
def __call__(self, x, training=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
for layer in self.layers:
x = layer(x)
return x
class resnet_layer_torch(torch.nn.Module):
def __init__(self,
prior_filters=16,
num_filters=16,
kernel_size=3,
strides=1):
super().__init__()
self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=1)
self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.layers = [self.a, self.b, self.c, self.d]
def forward(self, inputs):
x1 = self.a(inputs)
x2 = self.b(x1)
x3 = torch.nn.ReLU()(x2)
x4 = self.c(x3)
x5 = self.d(x4)
x6 = x5 + inputs
return x6, x2, x5
class resnet_layer2_torch(torch.nn.Module):
def __init__(self,
prior_filters=16,
num_filters=16,
kernel_size=3,
strides=1):
super().__init__()
self.a = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=kernel_size, padding=0, stride=(2,2))
self.b = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.c = torch.nn.Conv2d(num_filters, num_filters, kernel_size=kernel_size, padding=1)
self.c2 = torch.nn.Conv2d(prior_filters, num_filters, kernel_size=1, padding=0, stride=(2,2))
self.d = torch.nn.BatchNorm2d(num_filters, eps=.000)
self.layers = [self.a, self.b, self.c, self.c2, self.d]
def forward(self, x):
xp = torch.nn.functional.pad(x, (0, 1, 0, 1), "constant", 0)
y = self.a(xp)
y = self.b(y)
y = torch.nn.ReLU()(y)
y = self.c(y)
z = self.c2(x)
y = self.d(y)
x = z+y
return x
class TorchModel(torch.nn.Module):
def __init__(self):
super().__init__()
class Transpose(torch.nn.Module):
def forward(self, x):
return x.permute((0, 2, 3, 1))
self.layers = torch.nn.ModuleList([
torch.nn.Conv2d(3, 16, kernel_size=3, padding=1),
torch.nn.BatchNorm2d(16, eps=.000),
torch.nn.ReLU(),
# AAA
resnet_layer_torch(16, 16),
torch.nn.ReLU(),
resnet_layer_torch(16, 16),
torch.nn.ReLU(),
resnet_layer_torch(16, 16),
torch.nn.ReLU(),
resnet_layer2_torch(16, 32),
torch.nn.ReLU(),
resnet_layer_torch(32, 32),
torch.nn.ReLU(),
resnet_layer_torch(32, 32),
torch.nn.ReLU(),
resnet_layer2_torch(32, 64),
torch.nn.ReLU(),
resnet_layer_torch(64, 64),
torch.nn.ReLU(),
resnet_layer_torch(64, 64),
torch.nn.ReLU(),
torch.nn.AvgPool2d(8),
#
Transpose(),
torch.nn.Flatten(),
torch.nn.Linear(64, 10),
])
def __call__(self, x, training=False, return_features=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
extra = []
for i,layer in enumerate(self.layers):
if isinstance(layer, resnet_layer_torch):
x,y,z = layer(x)
if i == 11:
extra.append(y)
if i == 19:
extra.append(z)
else:
x = layer(x)
if i == 1:
extra.append(x)
if isinstance(layer, torch.nn.Flatten):
features = x
extra = torch.cat([x.permute((0, 2, 3, 1)).reshape((x.shape[0], -1)) for x in extra] + [x], axis=1)
if return_features:
return x, extra, features
else:
return x, extra
class TorchModel(torch.nn.Module):
def __init__(self):
super().__init__()
class Transpose(torch.nn.Module):
def forward(self, x):
return x.permute((0, 2, 3, 1))
self.layers = torch.nn.ModuleList([
torch.nn.Conv2d(3, 32, kernel_size=3, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(32, 32, kernel_size=3),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Conv2d(32, 64, kernel_size=3, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 64, kernel_size=3),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
Transpose(),
torch.nn.Flatten(),
torch.nn.Linear(2304, 512),
torch.nn.ReLU(),
torch.nn.Linear(512, 10)])
def __call__(self, x, training=False, return_features=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
for i,layer in enumerate(self.layers):
#print('l',layer)
#print('x',x.shape)
x = layer(x)
if i == 13:
#print("Have", x)
sav = x
if isinstance(layer, torch.nn.Flatten):
features = x
#print('aaa', sav)
if return_features:
return x, torch.cat([sav, x], axis=1), features
else:
return x, torch.cat([sav, x], axis=1)
class TorchAlarm(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.ModuleList([
torch.nn.Linear(522, 100),
torch.nn.SELU(),
torch.nn.Linear(100, 50),
torch.nn.SELU(),
torch.nn.Linear(50, 10),
torch.nn.SELU(),
torch.nn.Linear(10, 1)
])
def __call__(self, x, training=False):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
for layer in self.layers:
x = layer(x)
return x
class TorchWithDetect:
def __init__(self, model, alarm):
self.model = model
self.alarm = alarm
def __call__(self, x):
out, hidden = self.model(x)
is_ok = self.alarm(hidden)
return out, is_ok
def load_model(path_model_weights='checkpoints/dla/dla_cifar_classifier_v2.h5',
path_detector_weights='checkpoints/dla/dla_cifar_detector_v2.h5', device=None):
torch_model = TorchModel()
torch_model.load_state_dict(
torch.load(path_model_weights))
torch_model.eval().to(device)
torch_alarm = TorchAlarm()
torch_alarm.load_state_dict(
torch.load(path_detector_weights))
torch_alarm.eval().to(device)
return TorchWithDetect(torch_model, torch_alarm), \
lambda x: torch_model(x)[0], \
lambda x, how=None: torch_alarm(torch_model(x)[1]).flatten() |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import warnings
from active_tests.decision_boundary_binarization import format_result
from active_tests.decision_boundary_binarization import \
interior_boundary_discrimination_attack
from argparse_utils import DecisionBoundaryBinarizationSettings
logging.getLogger('tensorflow').setLevel(logging.FATAL)
# import warnings
# warnings.filterwarnings("ignore")
import argparse
import torch
import numpy as np
import utils as ut
import defense_v2
from cifar import CIFAR10
import pgd_attack
class TorchWithDetectAndOtherReadout(torch.nn.Module):
def __init__(self, model, alarm, other_readout):
super().__init__()
self.model = model
self.alarm = alarm
self.other_readout = other_readout
def forward(self, x):
_, hidden, features = self.model(x, return_features=True)
is_ok = self.alarm(hidden)
out = self.other_readout(features)
return out, is_ok
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--batch-size", type=int, default=512)
parser.add_argument("--n-samples", type=int, default=512)
parser.add_argument("--adversarial-attack",
choices=("pgd", "selective-pgd", "joined-pgd"),
required=True)
parser.add_argument("--epsilon", type=float, default=0)
parser.add_argument("--n-steps", type=int, default=0)
parser.add_argument("--step-size", type=float, default=0)
parser.add_argument("--n-boundary-points", default=49, type=int)
parser.add_argument("--n-inner-points", default=10, type=int)
# parser.add_argument("--dont-verify-training-data", action="store_true")
# parser.add_argument("--use-boundary-adverarials", action="store_true")
parser.add_argument("--inverted-test", action="store_true")
args = parser.parse_args()
assert args.n_samples < 5000
if args.epsilon > 0 or args.n_steps > 0 or args.step_size > 0:
assert args.adversarial_attack is not None
if args.inverted_test:
print("Running inverted test")
else:
print("Running normal/non-inverted test")
device = "cuda" if torch.cuda.is_available() else "cpu"
dataset = CIFAR10(tf_mode=True)
classifier_and_detector, classifier, detector = defense_v2.load_model(
device=device)
def verify_valid_input_data(x_set: torch.Tensor) -> np.ndarray:
"""Returns True if something is not detected as an adversarial example."""
n_batches = math.ceil(x_set.shape[0] / args.batch_size)
with torch.no_grad():
return np.concatenate(
[(detector(
x_set[b * args.batch_size:(b + 1) * args.batch_size]
) < 0).cpu().numpy() for b in range(n_batches)])
def get_boundary_adversarials(x, y, n_samples, epsilon):
"""Generate adversarial examples for the base classifier."""
assert len(x.shape) == 3
x = x.unsqueeze(0)
x = torch.repeat_interleave(x, n_samples, dim=0)
y = y.unsqueeze(0)
y = torch.repeat_interleave(y, n_samples, dim=0)
for _ in range(25):
x_adv = pgd_attack.attack(
x, y, classifier, classifier_and_detector,
"pgd", args.n_steps, args.step_size, epsilon)
# project adversarials to the max norm boundary
x_adv = ut.clipping_aware_rescaling(x, x_adv - x, args.epsilon,
norm="linf")
is_valid = verify_valid_input_data(x_adv)
is_invalid = ~is_valid
if np.all(is_invalid):
# generative until we finally found an adversarial example that gets
# detected
break
else:
raise RuntimeError("Could not generate adversarial example that gets "
"detected after 25 trials.")
return x_adv
def run_attack(m, l, attack_kwargs):
modified_classifier_and_detector = TorchWithDetectAndOtherReadout(
classifier_and_detector.model,
(lambda *args, **kwargs: -classifier_and_detector.alarm(
*args,
**kwargs)) if args.inverted_test else classifier_and_detector.alarm,
list(m.children())[-1])
for x, y in l:
x, y = x.to(device), y.to(device)
x_adv = pgd_attack.attack(
x, y, m, modified_classifier_and_detector,
args.adversarial_attack, args.n_steps, args.step_size, args.epsilon)
with torch.no_grad():
logits = m(x_adv)
is_adv = (logits.argmax(1) != y).cpu().numpy()
if args.inverted_test:
undetected = (detector(x_adv) > 0).cpu().numpy()
else:
undetected = (detector(x_adv) < 0).cpu().numpy()
is_adv = np.logical_and(is_adv, undetected)
return is_adv, (x_adv, logits)
class FeatureExtractor(torch.nn.Module):
def __init__(self, classifier_and_detector):
super().__init__()
self.classifier = classifier_and_detector.model
def forward(self, x, features_only=True):
if features_only:
_, _, f = self.classifier(x, return_features=True)
return f
else:
return self.classifier(x)
feature_extractor = FeatureExtractor(classifier_and_detector)
# select clean data samples which don't get rejected by the detector
test_data_x = []
test_data_y = []
batch_idx = 0
n_samples = 0
with torch.no_grad():
while n_samples < args.n_samples:
x_batch = dataset.test_data[batch_idx * args.batch_size:
(batch_idx + 1) * args.batch_size]
y_batch = dataset.test_labels[batch_idx * args.batch_size:
(batch_idx + 1) * args.batch_size]
x_batch = x_batch.transpose((0, 3, 1, 2))
x_batch = torch.tensor(x_batch, dtype=torch.float32)
y_batch = torch.tensor(y_batch, dtype=torch.long)
mask = verify_valid_input_data(x_batch.to(device))
x_batch = x_batch[mask].numpy()
y_batch = y_batch[mask].numpy()
test_data_x.append(x_batch)
test_data_y.append(y_batch)
n_samples += len(x_batch)
test_data_x = np.concatenate(test_data_x, 0)
test_data_y = np.concatenate(test_data_y, 0)
test_data_x = test_data_x[:args.n_samples]
test_data_y = test_data_y[:args.n_samples]
del batch_idx, n_samples
test_loader = ut.build_dataloader_from_arrays(
test_data_x, test_data_y)
if args.inverted_test:
additional_settings = dict(
n_boundary_points=args.n_boundary_points,
n_boundary_adversarial_points=1,
n_far_off_boundary_points=1,
n_far_off_adversarial_points=1,
)
else:
additional_settings = dict(
n_boundary_points=args.n_boundary_points,
n_boundary_adversarial_points=args.n_boundary_points - 1,
n_far_off_boundary_points=1,
n_far_off_adversarial_points=0,
)
far_off_distance = 1.75
scores_logit_differences_and_validation_accuracies = \
interior_boundary_discrimination_attack(
feature_extractor,
test_loader,
attack_fn=lambda m, l, attack_kwargs: run_attack(m, l, attack_kwargs),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=args.epsilon,
norm="linf",
lr=10000,
adversarial_attack_settings=None,
optimizer="sklearn",
n_inner_points=args.n_inner_points,
**additional_settings
),
n_samples=args.n_samples,
device=device,
batch_size=args.batch_size,
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
verify_valid_boundary_training_data_fn=verify_valid_input_data,
get_boundary_adversarials_fn=get_boundary_adversarials,
verify_valid_inner_training_data_fn=None,
verify_valid_input_validation_data_fn=None,
fill_batches_for_verification=False,
far_off_distance=far_off_distance
)
print(format_result(scores_logit_differences_and_validation_accuracies,
args.n_samples))
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import subprocess
directory = "models/modelGTP_cifar10"
subprocess.run("python PGD_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name fgsm --save_eval_log --num_steps 1 --no-random_start --step_size 8 --model_dir {} ; python run_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name fgsm --save_eval_log --model_dir {} ; python PGD_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds5 --save_eval_log --num_steps 5 --model_dir {} ; python run_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds5 --save_eval_log --num_steps 5 --model_dir {} ; python PGD_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds10 --save_eval_log --num_steps 10 --model_dir {} ; python run_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds10 --save_eval_log --num_steps 10 --model_dir {} ; python PGD_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds20 --save_eval_log --num_steps 20 --model_dir {} ; python run_attack.py -d cifar10 --data_path datasets/cifar10 --attack_name pgds20 --save_eval_log --num_steps 20 --model_dir {}".format(directory, directory, directory, directory, directory, directory, directory, directory, directory, directory), shell=True)
print("{}: Ended evaluation on fgsm and pgd attacks".format(datetime.now()))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for importing the CIFAR10 dataset.
Each image in the dataset is a numpy array of shape (32, 32, 3), with the values
being unsigned integers (i.e., in the range 0,1,...,255).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import sys
import tensorflow as tf
import numpy as np
import re
version = sys.version_info
class CIFAR10Data(object):
"""
Unpickles the CIFAR10 dataset from a specified folder containing a pickled
version following the format of Krizhevsky which can be found
[here](https://www.cs.toronto.edu/~kriz/cifar.html).
Inputs to constructor
=====================
- path: path to the pickled dataset. The training data must be pickled
into five files named data_batch_i for i = 1, ..., 5, containing 10,000
examples each, the test data
must be pickled into a single file called test_batch containing 10,000
examples, and the 10 class names must be
pickled into a file called batches.meta. The pickled examples should
be stored as a tuple of two objects: an array of 10,000 32x32x3-shaped
arrays, and an array of their 10,000 true labels.
"""
def __init__(self, path, batch_start = 0, init_shuffle=True, train_size_ratio=1):
num_classes = 10
path = CIFAR10Data.rec_search(path)
train_filenames = ['data_batch_{}'.format(ii + 1) for ii in range(5)]
eval_filename = 'test_batch'
metadata_filename = 'batches.meta'
train_images = np.zeros((50000, 32, 32, 3), dtype='uint8')
train_labels = np.zeros(50000, dtype='int32')
for ii, fname in enumerate(train_filenames):
cur_images, cur_labels = self._load_datafile(os.path.join(path, fname))
train_images[ii * 10000: (ii + 1) * 10000, ...] = cur_images
train_labels[ii * 10000: (ii + 1) * 10000, ...] = cur_labels
eval_images, eval_labels = self._load_datafile(
os.path.join(path, eval_filename))
with open(os.path.join(path, metadata_filename), 'rb') as fo:
if version.major == 3:
data_dict = pickle.load(fo, encoding='bytes')
else:
data_dict = pickle.load(fo)
self.label_names = data_dict[b'label_names']
for ii in range(len(self.label_names)):
self.label_names[ii] = self.label_names[ii].decode('utf-8')
if train_size_ratio < 1:
new_train_images = []
new_train_labels = []
for class_ind in range(num_classes):
current_class_train_images = train_images[train_labels == class_ind]
num_train_per_class = int(current_class_train_images.shape[0] * train_size_ratio)
new_train_images.append(current_class_train_images[:num_train_per_class])
new_train_labels.append(np.full(num_train_per_class, class_ind, dtype='int32'))
train_images = np.concatenate(new_train_images, axis=0)
train_labels = np.concatenate(new_train_labels)
self.train_data = DataSubset(train_images, train_labels, batch_start = batch_start, init_shuffle=init_shuffle)
self.eval_data = DataSubset(eval_images, eval_labels, batch_start = batch_start, init_shuffle=init_shuffle)
@staticmethod
def rec_search(original_path):
rx = re.compile(r'data_batch_[0-9]+')
r = []
for path, _, file_names in os.walk(original_path):
r.extend([os.path.join(path, x) for x in file_names if rx.search(x)])
if len(r) is 0: # Is this the best way?
return original_path
return os.path.dirname(r[0])
@staticmethod
def _load_datafile(filename):
with open(filename, 'rb') as fo:
if version.major == 3:
data_dict = pickle.load(fo, encoding='bytes')
else:
data_dict = pickle.load(fo)
assert data_dict[b'data'].dtype == np.uint8
image_data = data_dict[b'data']
image_data = image_data.reshape((10000, 3, 32, 32)).transpose(0, 2, 3, 1)
return image_data, np.array(data_dict[b'labels'])
class AugmentedCIFAR10Data(object):
"""
Data augmentation wrapper over a loaded dataset.
Inputs to constructor
=====================
- raw_cifar10data: the loaded CIFAR10 dataset, via the CIFAR10Data class
- sess: current tensorflow session
- model: current model (needed for input tensor)
"""
def __init__(self, raw_cifar10data, sess, model):
assert isinstance(raw_cifar10data, CIFAR10Data)
self.image_size = 32
# create augmentation computational graph
self.x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
padded = tf.map_fn(lambda img: tf.image.resize_image_with_crop_or_pad(
img, self.image_size + 4, self.image_size + 4),
self.x_input_placeholder)
cropped = tf.map_fn(lambda img: tf.random_crop(img, [self.image_size,
self.image_size,
3]), padded)
flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), cropped)
self.augmented = flipped
self.train_data = AugmentedDataSubset(raw_cifar10data.train_data, sess,
self.x_input_placeholder,
self.augmented)
self.eval_data = AugmentedDataSubset(raw_cifar10data.eval_data, sess,
self.x_input_placeholder,
self.augmented)
self.label_names = raw_cifar10data.label_names
class DataSubset(object):
def __init__(self, xs, ys, batch_start = 0, init_shuffle=True):
self.xs = xs
self.n = xs.shape[0]
self.ys = ys
self.batch_start = batch_start
if init_shuffle:
self.cur_order = np.random.permutation(self.n)
else:
self.cur_order = np.arange(self.n)
def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
if self.n < batch_size:
raise ValueError('Batch size can be at most the dataset size')
if not multiple_passes:
actual_batch_size = min(batch_size, self.n - self.batch_start)
if actual_batch_size <= 0:
raise ValueError('Pass through the dataset is complete.')
batch_end = self.batch_start + actual_batch_size
batch_xs = self.xs[self.cur_order[self.batch_start: batch_end], ...]
batch_ys = self.ys[self.cur_order[self.batch_start: batch_end], ...]
self.batch_start += actual_batch_size
if actual_batch_size < batch_size:
print('actual_batch_size < batch_size, padding with zeros')
batch_xs_pad = np.zeros(shape=(batch_size - actual_batch_size, batch_xs.shape[1], batch_xs.shape[2], batch_xs.shape[3]), dtype=batch_xs.dtype)
batch_ys_pad = np.zeros(batch_size - actual_batch_size, dtype=batch_ys.dtype)
batch_xs = np.concatenate([batch_xs, batch_xs_pad], axis=0)
batch_ys = np.concatenate([batch_ys, batch_ys_pad], axis=0)
return batch_xs, batch_ys
actual_batch_size = min(batch_size, self.n - self.batch_start)
if actual_batch_size < batch_size:
if reshuffle_after_pass:
self.cur_order = np.random.permutation(self.n)
self.batch_start = 0
batch_end = self.batch_start + batch_size
batch_xs = self.xs[self.cur_order[self.batch_start: batch_end], ...]
batch_ys = self.ys[self.cur_order[self.batch_start: batch_end], ...]
self.batch_start += actual_batch_size
return batch_xs, batch_ys
class AugmentedDataSubset(object):
def __init__(self, raw_datasubset, sess, x_input_placeholder,
augmented):
self.sess = sess
self.raw_datasubset = raw_datasubset
self.x_input_placeholder = x_input_placeholder
self.augmented = augmented
def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
raw_batch = self.raw_datasubset.get_next_batch(batch_size, multiple_passes,
reshuffle_after_pass)
images = raw_batch[0].astype(np.float32)
return self.sess.run(self.augmented, feed_dict={self.x_input_placeholder:
raw_batch[0]}), raw_batch[1]
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configargparse
import pdb
def pair(arg):
return [float(x) for x in arg.split(',')]
def get_args():
parser = configargparse.ArgParser(default_config_files=[])
parser.add("--model_dir", type=str, default="checkpoints/tf_curriculum_at/modelGTP_cifar10/", help="Path to save/load the checkpoints, default=models/model")
parser.add("--data_path", type=str, default="data/cifar-10-batches-py/", help="Path to dataset, default=datasets/cifar10")
parser.add("--tf_seed", type=int, default=451760341, help="Random seed for initializing tensor-flow variables to rule out the effect of randomness in experiments, default=45160341")
parser.add("--np_seed", type=int, default=216105420, help="Random seed for initializing numpy variables to rule out the effect of randomness in experiments, default=216105420")
parser.add("--num_eval_examples", type=int, default=10000, help="Number of eval samples, default=10000")
parser.add("--eval_batch_size", type=int, default=512, help="Eval batch size, default=100")
parser.add("--epsilon", "-e", type=float, default=8.0, help="Epsilon (Lp Norm distance from the original image) for generating adversarial examples, default=8.0")
parser.add("--num_steps", type=int, default=10, help="Number of steps to PGD attack, default=10")
parser.add("--ckpt", type=int, default = 0, help = "Checkpoint number for midway evaluation, default = 0")
parser.add("--step_size", "-s", type=float, default=2.0, help="Step size in PGD attack for generating adversarial examples in each step, default=2.0")
parser.add("--random_start", dest="random_start", action="store_true", help="Random start for PGD attack default=True")
parser.add("--no-random_start", dest="random_start", action="store_false", help="No random start for PGD attack default=True")
parser.set_defaults(random_start=True)
parser.add("--loss_func", "-f", type=str, default="xent", choices=["logit-diff", "xent", "target_task_xent"], help="Loss function for the model, choices are [xent, target_task_xent], default=xent")
parser.add("--attack_norm", type=str, default="inf", choices=["", "inf", "2", "TRADES"], help="Lp norm type for attacks, choices are [inf, 2], default=inf")
parser.add("--dataset", "-d", type=str, default="cifar10", choices=["cifar10", "cifar100", "tinyimagenet"], help="Path to load dataset, default=cifar10")
parser.add("--store_adv_path", type=str, default=None, help="Path to save adversarial examples, default=None")
parser.add("--attack_name", type=str, default=None, help="Path to save adversarial examples, default=''")
parser.add("--save_eval_log", dest="save_eval_log", action="store_true", help="Save txt file for attack eval")
parser.add("--no-save_eval_log", dest="save_eval_log", action="store_false", help="Save txt file for attack eval")
parser.set_defaults(save_eval_log=False)
parser.add("--xfer_attack", dest="xfer_attack", action="store_true", help="Adversarial transfer attack")
parser.add("--no-xfer_attack", dest="xfer_attack", action="store_false", help="not adversarial transfer attack")
parser.set_defaults(xfer_attack=False)
parser.add("--custom_output_model_name", type=str, default=None, help="Custom model name, default=None")
# for binarization test
parser.add("--n_boundary_points", default=None, type=int)
parser.add("--n_inner_points", default=None, type=int)
parser.add("--sample-from-corners", action="store_true")
parser.add("--save_data_path", default=None, type=str)
parser.add("--inference_mode", default="train", choices=("train", "eval"), type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
print(get_args())
pdb.set_trace()
# TODO Default for model_dir
# TODO Need to update the helps
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import tensorflow as tf
import numpy as np
import cifar10_input
import config_attack
import sys
import math
from tqdm import tqdm
from case_studies.curriculum_at.PGD_attack import LinfPGDAttack
if __name__ == '__main__':
config = vars(config_attack.get_args())
tf.set_random_seed(config['tf_seed'])
np.random.seed(config['np_seed'])
model_file = tf.train.latest_checkpoint(config['model_dir'])
# print("config['model_dir']: ", config['model_dir'])
if model_file is None:
print('No model found')
sys.exit()
if 'GTP' in config['model_dir']:
from model_new import Model, ModelTinyImagnet
if config['dataset'] == 'cifar10' or config['dataset'] == 'cifar100':
# TODO: verify this with the authors
# ATTENTION: mode was "train" before
model = Model(mode=config["inference_mode"], dataset=config['dataset'],
train_batch_size=config['eval_batch_size'],
normalize_zero_mean=True)
else:
model = ModelTinyImagnet(mode='train', dataset=config['dataset'],
train_batch_size=config['eval_batch_size'],
normalize_zero_mean=True)
elif 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config[
'model_dir']:
print("finetuned tinyimagenet MODEL")
from model_new import ModelTinyImagenetSourceExtendedLogits
full_source_model_x_input = tf.placeholder(tf.float32,
shape=[None, 32, 32, 3])
upresized_full_source_model_x_input = tf.image.resize_images(
full_source_model_x_input, size=[64, 64])
if config['dataset'] == 'cifar10':
model = ModelTinyImagenetSourceExtendedLogits(mode='train',
dataset='tinyimagenet',
target_task_class_num=10,
train_batch_size=config[
'eval_batch_size'],
input_tensor=upresized_full_source_model_x_input)
elif config['dataset'] == 'cifar100':
model = ModelTinyImagenetSourceExtendedLogits(mode='train',
dataset='tinyimagenet',
target_task_class_num=100,
train_batch_size=config[
'eval_batch_size'],
input_tensor=upresized_full_source_model_x_input)
model.x_input = full_source_model_x_input
t_vars = tf.trainable_variables()
source_model_vars = [var for var in t_vars if (
'discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
source_model_target_logit_vars = [var for var in t_vars if
'target_task_logit' in var.name]
source_model_saver = tf.train.Saver(var_list=source_model_vars)
finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
finetuned_source_model_saver = tf.train.Saver(
var_list=finetuned_source_model_vars)
elif 'finetuned_on_cifar100' in config['model_dir']:
raise NotImplementedError
print("finetuned MODEL")
from model_original_cifar_challenge import ModelExtendedLogits
model = ModelExtendedLogits(mode='train', target_task_class_num=100,
train_batch_size=config['eval_batch_size'])
t_vars = tf.trainable_variables()
source_model_vars = [var for var in t_vars if (
'discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
source_model_target_logit_vars = [var for var in t_vars if
'target_task_logit' in var.name]
source_model_saver = tf.train.Saver(var_list=source_model_vars)
finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
finetuned_source_model_saver = tf.train.Saver(
var_list=finetuned_source_model_vars)
elif ('adv_trained' in config['model_dir'] or 'naturally_trained' in config[
'model_dir'] or 'a_very_robust_model' in config['model_dir']):
raise NotImplementedError
print("original challenge MODEL")
from free_model_original import Model
model = Model(mode='eval', dataset=config['dataset'],
train_batch_size=config['eval_batch_size'])
elif 'IGAM' in config['model_dir']:
print("IGAM MODEL")
from model_new import Model
model = Model(mode='train', dataset=config['dataset'],
train_batch_size=config['eval_batch_size'],
normalize_zero_mean=True)
else:
raise NotImplementedError
print("other MODEL")
from free_model import Model
model = Model(mode='eval', dataset=config['dataset'],
train_batch_size=config['eval_batch_size'])
attack = LinfPGDAttack(model,
config['epsilon'],
config['num_steps'],
config['step_size'],
config['random_start'],
config['loss_func'],
dataset=config['dataset'])
saver = tf.train.Saver()
data_path = config['data_path']
# print(data_path)
# x = input()
if config['dataset'] == 'cifar10':
# print("load cifar10 dataset")
cifar = cifar10_input.CIFAR10Data(data_path)
elif config['dataset'] == 'cifar100':
raise NotImplementedError
print("load cifar100 dataset")
cifar = cifar100_input.CIFAR100Data(data_path)
else:
raise NotImplementedError
print("load tinyimagenet dataset")
cifar = tinyimagenet_input.TinyImagenetData()
with tf.Session() as sess:
# Restore the checkpoint
if 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config[
'model_dir']:
sess.run(tf.global_variables_initializer())
source_model_file = tf.train.latest_checkpoint(
"models/model_AdvTrain-igamsource-IGAM-tinyimagenet_b16")
source_model_saver.restore(sess, source_model_file)
finetuned_source_model_file = tf.train.latest_checkpoint(
config['model_dir'])
finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
elif 'finetuned_on_cifar100' in config['model_dir']:
sess.run(tf.global_variables_initializer())
source_model_file = tf.train.latest_checkpoint("models/adv_trained")
source_model_saver.restore(sess, source_model_file)
finetuned_source_model_file = tf.train.latest_checkpoint(
config['model_dir'])
finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
else:
saver.restore(sess, model_file)
# Iterate over the samples batch-by-batch
num_eval_examples = config['num_eval_examples']
eval_batch_size = config['eval_batch_size']
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
x_adv = [] # adv accumulator
x = []
y = []
y_p = []
y_adv = []
is_correct = []
# print('Iterating over {} batches'.format(num_batches))
for ibatch in tqdm(range(num_batches)):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
x_batch = cifar.eval_data.xs[bstart:bend, :]
y_batch = cifar.eval_data.ys[bstart:bend]
if config['attack_norm'] == 'inf':
x_batch_adv = attack.perturb(x_batch, y_batch, sess)
elif config['attack_norm'] == '2':
x_batch_adv = attack.perturb_l2(x_batch, y_batch, sess)
elif config['attack_norm'] == 'TRADES':
x_batch_adv = attack.perturb_TRADES(x_batch, y_batch, sess)
elif config['attack_norm'] == "":
x_batch_adv = x_batch
y_pred = sess.run(model.predictions, feed_dict={model.x_input: x_batch_adv})
y_pred_clean = sess.run(model.predictions, feed_dict={model.x_input: x_batch})
x_adv.append(x_batch_adv)
x.append(x_batch)
y.append(y_batch)
y_p.append(y_pred_clean)
y_adv.append(y_pred)
is_correct.append(y_pred == y_batch)
is_correct = np.concatenate(is_correct)
x_adv = np.concatenate(x_adv)
x = np.concatenate(x)
y = np.concatenate(y)
y_p = np.concatenate(y_p)
y_adv = np.concatenate(y_adv)
if config["save_data_path"] is not None:
x = x.astype(int)
x_adv = x_adv.astype(int)
np.savez(config["save_data_path"], x_a=x, x_b=x_adv, y_a=y_p, y_b=y_adv)
print(f"Robust accuracy: {np.mean(is_correct)}")
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of attack methods. Running this file as a program will
apply the attack to the model specified by the config file and store
the examples in an .npy file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import tensorflow as tf
import numpy as np
import cifar10_input
import config_attack
class LinfPGDAttack:
def __init__(self, model, epsilon, num_steps, step_size, random_start, loss_func, dataset='cifar10'):
"""Attack parameter initialization. The attack performs k steps of
size a, while always staying within epsilon from the initial
point."""
self.model = model
self.epsilon = epsilon
self.num_steps = num_steps
self.step_size = step_size
self.rand = random_start
self.dataset = dataset
if loss_func == 'xent':
loss = model.xent
elif loss_func == "logit-diff":
loss = model.top2_logit_diff_loss
elif loss_func == 'target_task_xent':
raise ValueError("Not implemented")
loss = model.target_task_mean_xent
correct_logit = tf.reduce_sum(label_mask * model.pre_softmax, axis=1)
wrong_logit = tf.reduce_max((1-label_mask) * model.pre_softmax - 1e4*label_mask, axis=1)
loss = -tf.nn.relu(correct_logit - wrong_logit + 50)
else:
print('Unknown loss function. Defaulting to cross-entropy')
loss = model.xent
self.grad = tf.gradients(loss, model.x_input)[0]
self.loss = loss
# self.logit = tf.placeholder(tf.float32, shape=[None, 100])
# self.grad2 = tf.gradients(loss + tf.reduce_mean(tf.reduce_sum(tf.pow(tf.subtract(self.logit, model.pre_softmax), 2.0), keepdims=True)), model.x_input)[0]
def perturb(self, x_nat, y, sess, feed_dict={}):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_infinity norm."""
if self.rand:
x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
x = np.clip(x, 0, 255) # ensure valid pixel range
else:
x = np.copy(x_nat)
for i in range(self.num_steps):
loss, grad = sess.run((self.loss, self.grad), feed_dict={self.model.x_input: x,
self.model.y_input: y,
**feed_dict})
x = np.add(x, self.step_size * np.sign(grad), out=x, casting='unsafe')
x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
x = np.clip(x, 0, 255) # ensure valid pixel range
return x
def perturb_l2(self, x_nat, y, sess, feed_dict={}):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_2 norm."""
if self.rand:
pert = np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
pert_norm = np.linalg.norm(pert)
pert = pert / max(1, pert_norm)
else:
pert = np.zeros(x_nat.shape)
for i in range(self.num_steps):
x = x_nat + pert
# x = np.clip(x, 0, 255)
grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
self.model.y_input: y,
**feed_dict})
normalized_grad = grad / np.linalg.norm(grad)
pert = np.add(pert, self.step_size * normalized_grad, out=pert, casting='unsafe')
# project pert to norm ball
pert_norm = np.linalg.norm(pert)
rescale_factor = pert_norm / self.epsilon
pert = pert / max(1, rescale_factor)
x = x_nat + pert
x = np.clip(x, 0, 255)
return x
# def perturb_TRADES(self, x_nat, y, sess):
# """Given a set of examples (x_nat, y), returns a set of adversarial
# examples within epsilon of x_nat in l_2 norm of TRADES Loss."""
# if self.rand:
# pert = np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
# pert_norm = np.linalg.norm(pert)
# pert = pert / max(1, pert_norm)
# else:
# pert = np.zeros(x_nat.shape)
# nat_logit = sess.run(model.pre_softmax, feed_dict={self.model.x_input: x_nat,
# self.model.y_input: y})
# for i in range(self.num_steps):
# x = x_nat + pert
# grad = sess.run(self.grad2, feed_dict={self.model.x_input: x,
# self.model.y_input: y, self.logit: nat_logit})
# normalized_grad = grad / np.linalg.norm(grad)
# pert = np.add(pert, self.step_size * normalized_grad, out=pert, casting='unsafe')
# pert_norm = np.linalg.norm(pert)
# rescale_factor = pert_norm / self.epsilon
# pert = pert / max(1, rescale_factor)
# #x = x_nat + pert
# x = np.clip(x, 0, 255)
# return x
def modified_perturb_l2(self, x_nat, y, feed_dict={}):
if self.rand:
pert = np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
pert_norm = np.linalg.norm(pert)
pert = pert / max(1, pert_norm)
else:
pert = np.zeros(x_nat.shape)
for i in range(self.num_steps):
x = x_nat + pert
# x = np.clip(x, 0, 255)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
self.model.y_input: y,
**feed_dict})
normalized_grad = grad / np.linalg.norm(grad)
pert = np.add(pert, self.step_size * normalized_grad, out=pert, casting='unsafe')
# project pert to norm ball
pert_norm = np.linalg.norm(pert)
rescale_factor = pert_norm / self.epsilon
pert = pert / max(1, rescale_factor)
x = x_nat + pert
x = np.clip(x, 0, 255)
return (x - x_nat)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates a model against examples from a .npy file as specified
in attack_config.json"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import json
import math
import os
import sys
import time
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import cifar10_input
import config_attack
config = vars(config_attack.get_args())
data_path = config['data_path']
def run_attack(checkpoint, x_adv, epsilon):
if config['dataset'] == 'cifar10':
cifar = cifar10_input.CIFAR10Data(data_path)
elif config['dataset'] == 'cifar100':
cifar = cifar100_input.CIFAR100Data(data_path)
else:
cifar = tinyimagenet_input.TinyImagenetData()
if 'GTP' in config['model_dir']:
print("GTP MODEL")
from model_new import Model, ModelTinyImagnet
if config['dataset'] == 'cifar10' or config['dataset'] == 'cifar100':
model = Model(mode='train', dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True)
else:
model = ModelTinyImagnet(mode='train', dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True)
elif 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
print("finetuned tinyimagenet MODEL")
from model_new import ModelTinyImagenetSourceExtendedLogits
full_source_model_x_input = tf.placeholder(tf.float32, shape = [None, 32, 32, 3])
upresized_full_source_model_x_input = tf.image.resize_images(full_source_model_x_input, size=[64, 64])
if config['dataset'] == 'cifar10':
model = ModelTinyImagenetSourceExtendedLogits(mode='train', dataset='tinyimagenet', target_task_class_num=10, train_batch_size=config['eval_batch_size'], input_tensor=upresized_full_source_model_x_input)
elif config['dataset'] == 'cifar100':
model = ModelTinyImagenetSourceExtendedLogits(mode='train', dataset='tinyimagenet', target_task_class_num=100, train_batch_size=config['eval_batch_size'], input_tensor=upresized_full_source_model_x_input)
model.x_input = full_source_model_x_input
t_vars = tf.trainable_variables()
source_model_vars = [var for var in t_vars if ('discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
source_model_target_logit_vars = [var for var in t_vars if 'target_task_logit' in var.name]
source_model_saver = tf.train.Saver(var_list=source_model_vars)
finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
finetuned_source_model_saver = tf.train.Saver(var_list=finetuned_source_model_vars)
elif 'finetuned_on_cifar100' in config['model_dir']:
print("finetuned MODEL")
from model_original_cifar_challenge import ModelExtendedLogits
model = ModelExtendedLogits(mode='train', target_task_class_num=100, train_batch_size=config['eval_batch_size'])
t_vars = tf.trainable_variables()
source_model_vars = [var for var in t_vars if ('discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
source_model_target_logit_vars = [var for var in t_vars if 'target_task_logit' in var.name]
source_model_saver = tf.train.Saver(var_list=source_model_vars)
finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
finetuned_source_model_saver = tf.train.Saver(var_list=finetuned_source_model_vars)
elif ('adv_trained' in config['model_dir'] or 'naturally_trained' in config['model_dir'] or 'a_very_robust_model' in config['model_dir']):
print("original challenge MODEL")
from free_model_original import Model
model = Model(mode='eval', dataset=config['dataset'], train_batch_size=config['eval_batch_size'])
elif 'IGAM' in config['model_dir']:
print("IGAM MODEL")
from model_new import Model
model = Model(mode='train', dataset=config['dataset'], train_batch_size=config['eval_batch_size'], normalize_zero_mean=True)
else:
print("other MODEL")
from free_model import Model
model = Model(mode='eval', dataset=config['dataset'], train_batch_size=config['eval_batch_size'])
saver = tf.train.Saver()
num_eval_examples = 10000
eval_batch_size = 100
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
total_corr = 0
x_nat = cifar.eval_data.xs
l_inf = np.amax(np.abs(x_nat - x_adv))
if l_inf > epsilon + 0.0001:
print('maximum perturbation found: {}'.format(l_inf))
print('maximum perturbation allowed: {}'.format(epsilon))
return
y_pred = [] # label accumulator
with tf.Session() as sess:
# Restore the checkpoint
if 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
sess.run(tf.global_variables_initializer())
source_model_file = tf.train.latest_checkpoint("models/model_AdvTrain-igamsource-IGAM-tinyimagenet_b16")
source_model_saver.restore(sess, source_model_file)
finetuned_source_model_file = tf.train.latest_checkpoint(config['model_dir'])
finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
elif 'finetuned_on_cifar100' in config['model_dir']:
sess.run(tf.global_variables_initializer())
source_model_file = tf.train.latest_checkpoint("models/adv_trained")
source_model_saver.restore(sess, source_model_file)
finetuned_source_model_file = tf.train.latest_checkpoint(config['model_dir'])
finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
else:
saver.restore(sess, checkpoint)
# Iterate over the samples batch-by-batch
for ibatch in range(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
x_batch = x_adv[bstart:bend, :]
y_batch = cifar.eval_data.ys[bstart:bend]
dict_adv = {model.x_input: x_batch,
model.y_input: y_batch}
if 'finetuned_on_cifar10' in config['model_dir'] or 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
cur_corr, y_pred_batch = sess.run([model.target_task_num_correct, model.target_task_predictions],
feed_dict=dict_adv)
else:
cur_corr, y_pred_batch = sess.run([model.num_correct, model.predictions],
feed_dict=dict_adv)
total_corr += cur_corr
y_pred.append(y_pred_batch)
accuracy = total_corr / num_eval_examples
print('Adv Accuracy: {:.2f}%'.format(100.0 * accuracy))
y_pred = np.concatenate(y_pred, axis=0)
store_adv_pred_path = "preds/" + adv_examples_path.split("/")[-1]
if not os.path.exists("preds/"):
os.makedirs("preds/")
np.save(store_adv_pred_path, y_pred)
print('Output saved at ', store_adv_pred_path)
if config['save_eval_log']:
date_str = datetime.now().strftime("%d_%b")
log_dir = "attack_log/" + date_str
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_filename = adv_examples_path.split("/")[-1].replace('.npy', '.txt')
model_name = config['model_dir'].split('/')[1]
log_file_path = os.path.join(log_dir, log_filename)
with open(log_file_path, "w") as f:
f.write('Model checkpoint: {} \n'.format(checkpoint))
f.write('Adv Accuracy: {:.2f}%'.format(100.0 * accuracy))
print('Results saved at ', log_file_path)
# full test evaluation
if config['dataset'] == 'cifar10':
raw_data = cifar10_input.CIFAR10Data(data_path)
elif config['dataset'] == 'cifar100':
raw_data = cifar100_input.CIFAR100Data(data_path)
else:
raw_data = tinyimagenet_input.TinyImagenetData()
data_size = raw_data.eval_data.n
if data_size % config['eval_batch_size'] == 0:
eval_steps = data_size // config['eval_batch_size']
else:
eval_steps = data_size // config['eval_batch_size'] + 1
total_num_correct = 0
for ii in tqdm(range(eval_steps)):
x_eval_batch, y_eval_batch = raw_data.eval_data.get_next_batch(config['eval_batch_size'], multiple_passes=False)
eval_dict = {model.x_input: x_eval_batch, model.y_input: y_eval_batch}
if 'finetuned_on_cifar10' in config['model_dir'] or 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config['model_dir']:
num_correct = sess.run(model.target_task_num_correct, feed_dict=eval_dict)
else:
num_correct = sess.run(model.num_correct, feed_dict=eval_dict)
total_num_correct += num_correct
eval_acc = total_num_correct / data_size
with open(log_file_path, "a+") as f:
f.write('\nClean Accuracy: {:.2f}%'.format(100.0 * eval_acc))
print('Clean Accuracy: {:.2f}%'.format(100.0 * eval_acc))
print('Results saved at ', log_file_path)
if __name__ == '__main__':
import json
# with open('attack_config.json') as config_file:
# config = json.load(config_file)
model_dir = config['model_dir']
checkpoint = tf.train.latest_checkpoint(model_dir)
adv_examples_path = config['store_adv_path']
if adv_examples_path == None:
model_name = config['model_dir'].split('/')[1]
if config['attack_name'] == None:
if config['dataset'] == 'cifar10':
adv_examples_path = "attacks/{}_attack.npy".format(model_name)
elif config['dataset'] == 'cifar100':
adv_examples_path = "attacks/{}_c100attack.npy".format(model_name)
else:
adv_examples_path = "attacks/{}_tinyattack.npy".format(model_name)
else:
if config['dataset'] == 'cifar10':
adv_examples_path = "attacks/{}_{}_attack.npy".format(model_name, config['attack_name'])
elif config['dataset'] == 'cifar100':
adv_examples_path = "attacks/{}_{}_c100attack.npy".format(model_name, config['attack_name'])
else:
adv_examples_path = "attacks/{}_{}_tinyattack.npy".format(model_name, config['attack_name'])
if config['attack_norm'] == '2':
adv_examples_path = adv_examples_path.replace("attack.npy", "l2attack.npy")
x_adv = np.load(adv_examples_path)
tf.set_random_seed(config['tf_seed'])
np.random.seed(config['np_seed'])
if checkpoint is None:
print('No checkpoint found')
elif x_adv.shape != (10000, 32, 32, 3):
print('Invalid shape: expected (10000, 32, 32, 3), found {}'.format(x_adv.shape))
elif np.amax(x_adv) > 255.0001 or np.amin(x_adv) < -0.0001:
print('Invalid pixel range. Expected [0, 255], found [{}, {}]'.format(
np.amin(x_adv),
np.amax(x_adv)))
else:
print("adv_examples_path: ", adv_examples_path)
run_attack(checkpoint, x_adv, config['epsilon'])
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# based on https://github.com/tensorflow/models/tree/master/resnet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import json
from collections import OrderedDict
class Model(object):
"""ResNet model."""
def __init__(self, mode, dataset, train_batch_size=None, normalize_zero_mean=True, use_pert=False):
"""ResNet constructor.
Args:
mode: One of 'train' and 'eval'.
"""
self.neck = None
self.y_pred = None
self.mode = mode
self.dataset = dataset
self.pert = True if (mode == 'train' and use_pert) else False
if dataset == "cifar10":
self.num_classes = 10
elif dataset == "cifar100":
self.num_classes = 100
elif dataset == "GTSRB":
self.num_classes = 43
else:
self.num_classes = 200
self.train_batch_size = train_batch_size
self.activations = []
self.normalize_zero_mean = normalize_zero_mean
self._build_model()
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _build_model(self):
assert self.mode == 'train' or self.mode == 'eval'
"""Build the core model within the graph."""
with tf.variable_scope('classifier'):
with tf.variable_scope('input'):
if self.dataset == 'cifar10' or self.dataset == 'cifar100' or self.dataset == 'GTSRB':
self.x_input = tf.placeholder(
tf.float32,
shape=[None, 32, 32, 3])
else:
self.x_input = tf.placeholder(
tf.float32,
shape=[None, 64, 64, 3])
self.y_input = tf.placeholder(tf.int64, shape=None)
if self.pert:
self.pert = tf.get_variable(name='instance_perturbation', initializer=tf.zeros_initializer,
shape=[self.train_batch_size, 32, 32, 3], dtype=tf.float32,
trainable=True)
self.final_input = self.x_input + self.pert
self.final_input = tf.clip_by_value(self.final_input, 0., 255.)
else:
self.final_input = self.x_input
if self.normalize_zero_mean:
final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
for i in range(3):
final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
final_input_mean = tf.tile(final_input_mean, [1,32,32,3])
zero_mean_final_input = self.final_input - final_input_mean
self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
else:
self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
x = self._conv('init_conv', self.input_standardized, 3, 3, 16, self._stride_arr(1))
self.activations.append(x)
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
res_func = self._residual
# Uncomment the following codes to use w28-10 wide residual network.
# It is more memory efficient than very deep residual network and has
# comparably good performance.
# https://arxiv.org/pdf/1605.07146v1.pdf
# filters = [16, 16, 32, 64] # for debugging
filters = [16, 160, 320, 640]
# Update hps.num_residual_units to 9
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
activate_before_residual[0])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_1_%d' % i):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
activate_before_residual[1])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_2_%d' % i):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
activate_before_residual[2])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_3_%d' % i):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, 0.1)
x = self._global_avg_pool(x)
self.neck = x
with tf.variable_scope('logit'):
self.pre_softmax = self._fully_connected(x, self.num_classes)
self.activations.append(self.pre_softmax)
self.softmax = tf.nn.softmax(self.pre_softmax)
sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
sample_indices = tf.expand_dims(sample_indices, axis=-1)
target_indices = tf.expand_dims(self.y_input, axis=-1)
self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
self.target_softmax = tf.gather_nd(self.softmax, self.gather_indices, name="targetsoftmax")
# target logit is independent of other class logits while target softmax value is
self.target_logit = tf.gather_nd(self.pre_softmax, self.gather_indices, name="targetlogit")
self.predictions = tf.argmax(self.pre_softmax, 1)
self.y_pred = self.predictions
self.correct_prediction = tf.equal(self.predictions, self.y_input)
self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.feature_grad = tf.gradients(self.neck, self.x_input)[0]
with tf.variable_scope('costs'):
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
self.mean_xent = tf.reduce_mean(self.y_xent)
self.y_xent_dbp = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent_dbp = tf.reduce_sum(self.y_xent_dbp, name='y_xent_dbp')
self.mean_xent_dbp = tf.reduce_mean(self.y_xent_dbp)
self.weight_decay_loss = self._decay()
self.temploss = tf.reduce_sum(-tf.multiply(tf.one_hot(self.y_input, self.num_classes), tf.log(tf.clip_by_value(self.softmax, 1e-10, 1.0))), axis = 1)
# for top-2 logit diff loss
self.label_mask = tf.one_hot(self.y_input,
self.num_classes,
on_value=1.0,
off_value=0.0,
dtype=tf.float32)
self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
updates_collections=None, is_training=(self.mode == 'train'))
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, 0.1)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
class ModelTinyImagnet(object):
"""ResNet model."""
def __init__(self, mode, dataset, train_batch_size=None, normalize_zero_mean=False, use_pert=False):
"""ResNet constructor.
Args:
mode: One of 'train' and 'eval'.
"""
self.neck = None
self.y_pred = None
self.mode = mode
self.pert = True if (mode == 'train' and use_pert) else False
if dataset == 'tinyimagenet':
self.num_classes = 200
self.input_size = 64
elif dataset == 'cifar100':
self.num_classes = 100
self.input_size = 32
else:
self.num_classes = 10
self.input_size = 32
self.train_batch_size = train_batch_size
self.activations = []
self.normalize_zero_mean = normalize_zero_mean
self._build_model()
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _build_model(self):
assert self.mode == 'train' or self.mode == 'eval'
"""Build the core model within the graph."""
with tf.variable_scope('classifier'):
with tf.variable_scope('input'):
self.x_input = tf.placeholder(
tf.float32,
shape=[None, self.input_size, self.input_size, 3])
self.y_input = tf.placeholder(tf.int64, shape=None)
if self.pert:
self.pert = tf.get_variable(name='instance_perturbation', initializer=tf.zeros_initializer,
shape=[self.train_batch_size, self.input_size, self.input_size, 3], dtype=tf.float32,
trainable=True)
self.final_input = self.x_input + self.pert
self.final_input = tf.clip_by_value(self.final_input, 0., 255.)
else:
self.final_input = self.x_input
if self.normalize_zero_mean:
final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
for i in range(3):
final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
final_input_mean = tf.tile(final_input_mean, [1,self.input_size,self.input_size,3])
zero_mean_final_input = self.final_input - final_input_mean
self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
else:
self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
x = self._conv('init_conv', self.input_standardized, 3, 3, 16, self._stride_arr(1))
self.activations.append(x)
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
res_func = self._residual
# Uncomment the following codes to use w28-10 wide residual network.
# It is more memory efficient than very deep residual network and has
# comparably good performance.
# https://arxiv.org/pdf/1605.07146v1.pdf
# filters = [16, 16, 32, 64] # for debugging
filters = [16, 160, 320, 640]
# Update hps.num_residual_units to 9
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
activate_before_residual[0])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_1_%d' % i):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
activate_before_residual[1])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_2_%d' % i):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
activate_before_residual[2])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_3_%d' % i):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, 0.1)
x = self._global_avg_pool(x)
self.neck = x
with tf.variable_scope('logit'):
self.pre_softmax = self._fully_connected(x, self.num_classes)
self.activations.append(self.pre_softmax)
self.softmax = tf.nn.softmax(self.pre_softmax)
# y_one_hot = tf.one_hot(self.y_input, self.num_classes)
# self.target_softmax = self.softmax * y_one_hot
sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
sample_indices = tf.expand_dims(sample_indices, axis=-1)
target_indices = tf.expand_dims(self.y_input, axis=-1)
self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
self.target_softmax = tf.gather_nd(self.softmax, self.gather_indices, name="targetsoftmax")
# target logit is independent of other class logits while target softmax value is
self.target_logit = tf.gather_nd(self.pre_softmax, self.gather_indices, name="targetlogit")
self.predictions = tf.argmax(self.pre_softmax, 1)
self.y_pred = self.predictions
self.correct_prediction = tf.equal(self.predictions, self.y_input)
self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
with tf.variable_scope('costs'):
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
self.mean_xent = tf.reduce_mean(self.y_xent)
self.weight_decay_loss = self._decay()
self.temploss = tf.reduce_sum(-tf.multiply(tf.one_hot(self.y_input, self.num_classes), tf.log(tf.clip_by_value(self.softmax, 1e-10, 1.0))), axis = 1)
# for top-2 logit diff loss
self.label_mask = tf.one_hot(self.y_input,
self.num_classes,
on_value=1.0,
off_value=0.0,
dtype=tf.float32)
self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
updates_collections=None, is_training=(self.mode == 'train'))
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, 0.1)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
class ModelTinyImagenetSource(object):
"""ResNet model."""
def __init__(self, mode, dataset, train_batch_size=None, normalize_zero_mean=False, use_pert=False):
"""ResNet constructor.
Args:
mode: One of 'train' and 'eval'.
"""
self.neck = None
self.y_pred = None
self.mode = mode
self.pert = True if (mode == 'train' and use_pert) else False
if dataset == 'tinyimagenet':
self.num_classes = 200
self.input_size = 64
elif dataset == 'cifar100':
self.num_classes = 100
self.input_size = 32
else:
self.num_classes = 10
self.input_size = 32
self.train_batch_size = train_batch_size
self.activations = []
self.normalize_zero_mean = normalize_zero_mean
self._build_model()
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _build_model(self):
assert self.mode == 'train' or self.mode == 'eval'
"""Build the core model within the graph."""
with tf.variable_scope('input'):
self.x_input = tf.placeholder(
tf.float32,
shape=[None, self.input_size, self.input_size, 3])
self.y_input = tf.placeholder(tf.int64, shape=None)
if self.pert:
self.pert = tf.get_variable(name='instance_perturbation', initializer=tf.zeros_initializer,
shape=[self.train_batch_size, self.input_size, self.input_size, 3], dtype=tf.float32,
trainable=True)
self.final_input = self.x_input + self.pert
self.final_input = tf.clip_by_value(self.final_input, 0., 255.)
else:
self.final_input = self.x_input
if self.normalize_zero_mean:
final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
for i in range(3):
final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
final_input_mean = tf.tile(final_input_mean, [1,self.input_size,self.input_size,3])
zero_mean_final_input = self.final_input - final_input_mean
self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
else:
self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
x = self._conv('init_conv', self.input_standardized, 3, 3, 16, self._stride_arr(1))
self.activations.append(x)
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
res_func = self._residual
# Uncomment the following codes to use w28-10 wide residual network.
# It is more memory efficient than very deep residual network and has
# comparably good performance.
# https://arxiv.org/pdf/1605.07146v1.pdf
# filters = [16, 16, 32, 64] # for debugging
filters = [16, 160, 320, 640]
# Update hps.num_residual_units to 9
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
activate_before_residual[0])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_1_%d' % i):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
activate_before_residual[1])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_2_%d' % i):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
activate_before_residual[2])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_3_%d' % i):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, 0.1)
x = self._global_avg_pool(x)
self.neck = x
with tf.variable_scope('logit'):
self.pre_softmax = self._fully_connected(x, self.num_classes)
self.activations.append(self.pre_softmax)
self.softmax = tf.nn.softmax(self.pre_softmax)
sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
sample_indices = tf.expand_dims(sample_indices, axis=-1)
target_indices = tf.expand_dims(self.y_input, axis=-1)
self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
self.target_softmax = tf.gather_nd(self.softmax, self.gather_indices, name="targetsoftmax")
# target logit is independent of other class logits while target softmax value is
self.target_logit = tf.gather_nd(self.pre_softmax, self.gather_indices, name="targetlogit")
self.predictions = tf.argmax(self.pre_softmax, 1)
self.y_pred = self.predictions
self.correct_prediction = tf.equal(self.predictions, self.y_input)
self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
with tf.variable_scope('costs'):
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
self.mean_xent = tf.reduce_mean(self.y_xent)
self.weight_decay_loss = self._decay()
# for top-2 logit diff loss
self.label_mask = tf.one_hot(self.y_input,
self.num_classes,
on_value=1.0,
off_value=0.0,
dtype=tf.float32)
self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
updates_collections=None, is_training=(self.mode == 'train'))
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, 0.1)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
class ModelTinyImagenetSourceExtendedLogits(object):
"""ResNet model."""
def __init__(self, mode, dataset, train_batch_size=None, normalize_zero_mean=False, use_pert=False, target_task_class_num=10, input_tensor=None):
"""ResNet constructor.
Args:
mode: One of 'train' and 'eval'.
"""
self.neck = None
self.y_pred = None
self.mode = mode
self.pert = True if (mode == 'train' and use_pert) else False
if dataset == 'tinyimagenet':
self.num_classes = 200
self.input_size = 64
elif dataset == 'cifar100':
self.num_classes = 100
self.input_size = 32
else:
self.num_classes = 10
self.input_size = 32
self.train_batch_size = train_batch_size
self.activations = []
self.normalize_zero_mean = normalize_zero_mean
self.input_tensor = input_tensor
self.target_task_class_num = target_task_class_num
self._build_model()
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _build_model(self):
assert self.mode == 'train' or self.mode == 'eval'
"""Build the core model within the graph."""
with tf.variable_scope('input'):
if self.input_tensor == None:
self.x_input = tf.placeholder(
tf.float32,
shape=[None, self.input_size, self.input_size, 3])
else:
self.x_input = self.input_tensor
self.y_input = tf.placeholder(tf.int64, shape=None)
if self.pert:
self.pert = tf.get_variable(name='instance_perturbation', initializer=tf.zeros_initializer,
shape=[self.train_batch_size, self.input_size, self.input_size, 3], dtype=tf.float32,
trainable=True)
self.final_input = self.x_input + self.pert
self.final_input = tf.clip_by_value(self.final_input, 0., 255.)
else:
self.final_input = self.x_input
if self.normalize_zero_mean:
final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
for i in range(3):
final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
final_input_mean = tf.tile(final_input_mean, [1,self.input_size,self.input_size,3])
zero_mean_final_input = self.final_input - final_input_mean
self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
else:
self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
x = self._conv('init_conv', self.input_standardized, 3, 3, 16, self._stride_arr(1))
self.activations.append(x)
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
res_func = self._residual
# Uncomment the following codes to use w28-10 wide residual network.
# It is more memory efficient than very deep residual network and has
# comparably good performance.
# https://arxiv.org/pdf/1605.07146v1.pdf
# filters = [16, 16, 32, 64] # for debugging
filters = [16, 160, 320, 640]
# Update hps.num_residual_units to 9
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
activate_before_residual[0])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_1_%d' % i):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
activate_before_residual[1])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_2_%d' % i):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
activate_before_residual[2])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_3_%d' % i):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, 0.1)
x = self._global_avg_pool(x)
self.neck = x
with tf.variable_scope('logit'):
self.pre_softmax = self._fully_connected(x, self.num_classes)
self.activations.append(self.pre_softmax)
self.softmax = tf.nn.softmax(self.pre_softmax)
sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
sample_indices = tf.expand_dims(sample_indices, axis=-1)
target_indices = tf.expand_dims(self.y_input, axis=-1)
self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
self.target_softmax = tf.gather_nd(self.softmax, self.gather_indices, name="targetsoftmax")
# target logit is independent of other class logits while target softmax value is
self.target_logit = tf.gather_nd(self.pre_softmax, self.gather_indices, name="targetlogit")
self.predictions = tf.argmax(self.pre_softmax, 1)
self.y_pred = self.predictions
self.correct_prediction = tf.equal(self.predictions, self.y_input)
self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
with tf.variable_scope('costs'):
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
self.mean_xent = tf.reduce_mean(self.y_xent)
self.weight_decay_loss = self._decay()
# for top-2 logit diff loss
self.label_mask = tf.one_hot(self.y_input,
self.num_classes,
on_value=1.0,
off_value=0.0,
dtype=tf.float32)
self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
with tf.variable_scope('target_task_logit'):
self.target_task_pre_softmax = self._fully_connected(x, self.target_task_class_num)
self.target_task_softmax = tf.nn.softmax(self.target_task_pre_softmax)
sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
sample_indices = tf.expand_dims(sample_indices, axis=-1)
target_indices = tf.expand_dims(self.y_input, axis=-1)
self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
self.target_softmax = tf.gather_nd(self.target_task_softmax, self.gather_indices, name="targetsoftmax")
self.target_task_predictions = tf.argmax(self.target_task_pre_softmax, 1)
self.target_task_correct_prediction = tf.equal(self.target_task_predictions, self.y_input)
self.target_task_num_correct = tf.reduce_sum(
tf.cast(self.target_task_correct_prediction, tf.int64))
self.target_task_accuracy = tf.reduce_mean(
tf.cast(self.target_task_correct_prediction, tf.float32))
with tf.variable_scope('target_task_costs'):
self.target_task_y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.target_task_pre_softmax, labels=self.y_input)
self.target_task_xent = tf.reduce_sum(self.target_task_y_xent, name='target_task_y_xent')
self.target_task_mean_xent = tf.reduce_mean(self.target_task_y_xent)
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
updates_collections=None, is_training=(self.mode == 'train'))
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, 0.1)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
class ModelExtendedLogitsC2I(object):
"""ResNet model."""
def __init__(self, mode, target_task_class_num=200, train_batch_size=None, input_tensor=None, source_task="cifar10"):
"""ResNet constructor.
Args:
mode: One of 'train' and 'eval'.
"""
self.mode = mode
self.activations = []
self.target_task_class_num = target_task_class_num
self.train_batch_size = train_batch_size
self.input_tensor = input_tensor
self.source_task = source_task
self._build_model()
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _build_model(self):
assert self.mode == 'train' or self.mode == 'eval'
"""Build the core model within the graph."""
with tf.variable_scope('input'):
if self.input_tensor == None:
self.x_input = tf.placeholder(
tf.float32,
shape=[None, 32, 32, 3])
else:
self.x_input = self.input_tensor
self.y_input = tf.placeholder(tf.int64, shape=None)
input_standardized = tf.map_fn(lambda img: tf.image.per_image_standardization(img),
self.x_input)
x = self._conv('init_conv', input_standardized, 3, 3, 16, self._stride_arr(1))
self.activations.append(x)
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
res_func = self._residual
# Uncomment the following codes to use w28-10 wide residual network.
# It is more memory efficient than very deep residual network and has
# comparably good performance.
# https://arxiv.org/pdf/1605.07146v1.pdf
filters = [16, 160, 320, 640]
# Update hps.num_residual_units to 9
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
activate_before_residual[0])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_1_%d' % i):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
activate_before_residual[1])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_2_%d' % i):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
activate_before_residual[2])
self.activations.append(x)
for i in range(1, 5):
with tf.variable_scope('unit_3_%d' % i):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
self.activations.append(x)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, 0.1)
x = self._global_avg_pool(x)
with tf.variable_scope('logit'):
if self.source_task == "cifar10":
self.pre_softmax = self._fully_connected(x, 10)
elif self.source_task == "cifar100":
self.pre_softmax = self._fully_connected(x, 100)
self.activations.append(self.pre_softmax)
self.predictions = tf.argmax(self.pre_softmax, 1)
self.correct_prediction = tf.equal(self.predictions, self.y_input)
self.num_correct = tf.reduce_sum(
tf.cast(self.correct_prediction, tf.int64))
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_prediction, tf.float32))
with tf.variable_scope('costs'):
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
self.mean_xent = tf.reduce_mean(self.y_xent)
self.weight_decay_loss = self._decay()
with tf.variable_scope('target_task_logit'):
self.target_task_pre_softmax = self._fully_connected(x, self.target_task_class_num)
self.target_task_softmax = tf.nn.softmax(self.target_task_pre_softmax)
sample_indices = tf.range(self.train_batch_size, dtype=tf.int64)
sample_indices = tf.expand_dims(sample_indices, axis=-1)
target_indices = tf.expand_dims(self.y_input, axis=-1)
self.gather_indices = tf.concat([sample_indices, target_indices], axis=-1)
self.target_softmax = tf.gather_nd(self.target_task_softmax, self.gather_indices, name="targetsoftmax")
# self.target_task_pre_softmax = self._named_fully_connected('target_task_logit', x, self.target_task_class_num)
self.target_task_predictions = tf.argmax(self.target_task_pre_softmax, 1)
self.target_task_correct_prediction = tf.equal(self.target_task_predictions, self.y_input)
self.target_task_num_correct = tf.reduce_sum(
tf.cast(self.target_task_correct_prediction, tf.int64))
self.target_task_accuracy = tf.reduce_mean(
tf.cast(self.target_task_correct_prediction, tf.float32))
with tf.variable_scope('target_task_costs'):
self.target_task_y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.target_task_pre_softmax, labels=self.y_input)
self.target_task_xent = tf.reduce_sum(self.target_task_y_xent, name='target_task_y_xent')
self.target_task_mean_xent = tf.reduce_mean(self.target_task_y_xent)
# self.weight_decay_loss = self._decay()
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(
inputs=x,
decay=.9,
center=True,
scale=True,
activation_fn=None,
updates_collections=None,
is_training=(self.mode == 'train'))
def _residual(self, x, in_filter, out_filter, stride,
activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, 0.1)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter-in_filter)//2, (out_filter-in_filter)//2]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0/n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _named_fully_connected(self, name, x, out_dim):
"""FullyConnected layer for final output."""
with tf.variable_scope(name):
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
class IgamConvDiscriminatorModel(object):
"""Simple conv discriminator model."""
# based on https://github.com/tensorflow/models/blob/d361076952b73706c5c7ddf9c940bf42c27a3213/research/slim/nets/dcgan.py#L41
def __init__(self, mode, dataset, train_batch_size=None, num_conv_layers=5, base_num_channels=16, x_modelgrad_input_tensor=None, y_modelgrad_input_tensor=None, x_source_modelgrad_input_tensor=None,
y_source_modelgrad_input_tensor=None, normalize_zero_mean=False, only_fully_connected=False, num_fc_layers=3, image_size=32, cropped_input_size=None, crop_pad_x_tensor=None, crop_pad_y_tensor=None, avg_pool_hw=False):
"""conv disc constructor.
Args:
mode: One of 'train' and 'eval'.
"""
self.neck = None
self.y_pred = None
self.mode = mode
self.pert = False
self.num_classes = 2 # grad from model or rand init grad
self.train_batch_size = train_batch_size
self.num_conv_layers = num_conv_layers
self.num_fc_layers = num_fc_layers
self.base_num_channels = base_num_channels
self.x_modelgrad_input_tensor = x_modelgrad_input_tensor
self.y_modelgrad_input_tensor = y_modelgrad_input_tensor
self.x_source_modelgrad_input_tensor = x_source_modelgrad_input_tensor
self.y_source_modelgrad_input_tensor = y_source_modelgrad_input_tensor
self.normalize_zero_mean = normalize_zero_mean
self.only_fully_connected = only_fully_connected
self.image_size = image_size
self.cropped_input_size = cropped_input_size
self.crop_pad_x_tensor = crop_pad_x_tensor
self.crop_pad_y_tensor = crop_pad_y_tensor
self.avg_pool_hw = avg_pool_hw
self._build_model()
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _build_model(self):
assert self.mode == 'train' or self.mode == 'eval'
"""Build the core model within the graph."""
with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):
with tf.variable_scope('input'):
if self.x_modelgrad_input_tensor == None:
# for assign to work
self.x_modelgrad_input = tf.get_variable(name='x_modelgrad_input', initializer=tf.zeros_initializer,
shape=[self.train_batch_size, self.image_size, self.image_size, 3], dtype=tf.float32)
self.x_source_modelgrad_input = tf.placeholder(
tf.float32,
shape=[None, self.image_size, self.image_size, 3])
else:
self.x_modelgrad_input = self.x_modelgrad_input_tensor
self.x_source_modelgrad_input = self.x_source_modelgrad_input_tensor
if self.cropped_input_size != None:
if self.crop_pad_x_tensor == None:
crop_pad = (self.image_size - self.cropped_input_size) // 2
cropped_x_modelgrad_input = tf.slice(self.x_modelgrad_input, [0, crop_pad, crop_pad, 0], [-1, self.cropped_input_size, self.cropped_input_size, -1])
cropped_x_source_modelgrad_input = tf.slice(self.x_source_modelgrad_input, [0, crop_pad, crop_pad, 0], [-1, self.cropped_input_size, self.cropped_input_size, -1])
self.x_input = tf.concat([cropped_x_modelgrad_input, cropped_x_source_modelgrad_input], axis=0)
else:
cropped_x_modelgrad_input = tf.slice(self.x_modelgrad_input, [0, self.crop_pad_x_tensor, self.crop_pad_y_tensor, 0], [-1, self.cropped_input_size, self.cropped_input_size, -1])
cropped_x_source_modelgrad_input = tf.slice(self.x_source_modelgrad_input, [0, self.crop_pad_x_tensor, self.crop_pad_y_tensor, 0], [-1, self.cropped_input_size, self.cropped_input_size, -1])
self.x_input = tf.concat([cropped_x_modelgrad_input, cropped_x_source_modelgrad_input], axis=0)
else:
self.x_input = tf.concat([self.x_modelgrad_input, self.x_source_modelgrad_input], axis=0)
self.cropped_input_size = self.image_size
if self.y_modelgrad_input_tensor == None:
# for assign to work
self.y_modelgrad_input = tf.get_variable(name='y_modelgrad_input', initializer=tf.zeros_initializer,
shape=self.train_batch_size, dtype=tf.int64)
self.y_source_modelgrad_input = tf.placeholder(tf.int64, shape=None)
else:
self.y_modelgrad_input = self.y_modelgrad_input_tensor
self.y_source_modelgrad_input = self.y_source_modelgrad_input_tensor
self.y_input = tf.concat([self.y_modelgrad_input, self.y_source_modelgrad_input], axis=0)
if self.pert:
self.pert = tf.get_variable(name='instance_perturbation', initializer=tf.zeros_initializer,
shape=[self.train_batch_size, self.cropped_input_size, self.cropped_input_size, 3], dtype=tf.float32,
trainable=True)
self.final_input = self.x_input + self.pert
self.final_input = tf.clip_by_value(self.final_input, 0., 255.)
else:
self.final_input = self.x_input
if self.normalize_zero_mean:
final_input_mean = tf.reduce_mean(self.final_input, axis=[1,2,3])
for i in range(3):
final_input_mean = tf.expand_dims(final_input_mean, axis=-1)
final_input_mean = tf.tile(final_input_mean, [1, self.cropped_input_size, self.cropped_input_size,3])
zero_mean_final_input = self.final_input - final_input_mean
self.input_standardized = tf.math.l2_normalize(zero_mean_final_input, axis=[1,2,3])
else:
self.input_standardized = tf.math.l2_normalize(self.final_input, axis=[1,2,3])
x = self.input_standardized
base_num_channels = self.base_num_channels
if self.only_fully_connected == False:
for i in range(self.num_conv_layers):
output_num_channels = base_num_channels * 2**i
if i == 0:
x = self._conv('conv{}'.format(i), x, 4, 3, output_num_channels, self._stride_arr(2), bias=True)
x = self._batch_norm('bn{}'.format(i), x)
x = self._relu(x, 0.1)
else:
x = self._conv('conv{}'.format(i), x, 4, output_num_channels // 2, output_num_channels, self._stride_arr(2), bias=True)
x = self._batch_norm('bn{}'.format(i), x)
x = self._relu(x, 0.1)
else:
for i in range(self.num_fc_layers):
if i == self.num_fc_layers -1:
x = self._fully_connected(x, base_num_channels//2, name='fc{}'.format(i))
else:
x = self._fully_connected(x, base_num_channels, name='fc{}'.format(i))
x = self._batch_norm('bn{}'.format(i), x)
x = self._relu(x, 0.1)
with tf.variable_scope('logit'):
if self.avg_pool_hw:
x = self._global_avg_pool(x)
self.pre_softmax = self._fully_connected(x, self.num_classes)
self.predictions = tf.argmax(self.pre_softmax, 1)
self.y_pred = self.predictions
self.correct_prediction = tf.equal(self.predictions, self.y_input)
self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
with tf.variable_scope('costs'):
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
self.mean_xent = tf.reduce_mean(self.y_xent)
self.weight_decay_loss = self._decay()
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(inputs=x, decay=.9, center=True, scale=True, activation_fn=None,
updates_collections=None, is_training=(self.mode == 'train'))
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides, bias=False, padding='SAME'):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
if bias == True:
b = tf.get_variable('biases', [out_filters],
initializer=tf.constant_initializer())
conv_out = tf.nn.conv2d(x, kernel, strides, padding=padding)
conv_out_b = tf.nn.bias_add(conv_out, b)
return conv_out_b
else:
return tf.nn.conv2d(x, kernel, strides, padding=padding)
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim, name=None):
"""FullyConnected layer for final output."""
if name == None:
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
else:
with tf.variable_scope(name):
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import tensorflow as tf
import numpy as np
import cifar10_input
import config_attack
import sys
import math
from tqdm import tqdm
import tqdm_utils
from PGD_attack import LinfPGDAttack
from active_tests.decision_boundary_binarization import interior_boundary_discrimination_attack
if __name__ == '__main__':
config = vars(config_attack.get_args())
tf.set_random_seed(config['tf_seed'])
np.random.seed(config['np_seed'])
model_file = tf.train.latest_checkpoint(config['model_dir'])
# print("config['model_dir']: ", config['model_dir'])
if model_file is None:
print('No model found')
sys.exit()
if 'GTP' in config['model_dir']:
from model_new import Model, ModelTinyImagnet
if config['dataset'] == 'cifar10' or config['dataset'] == 'cifar100':
# TODO: verify this with the authors
# ATTENTION: mode was "train" before
model = Model(mode=config["inference_mode"], dataset=config['dataset'],
train_batch_size=config['eval_batch_size'],
normalize_zero_mean=True)
else:
model = ModelTinyImagnet(mode='train', dataset=config['dataset'],
train_batch_size=config['eval_batch_size'],
normalize_zero_mean=True)
elif 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config[
'model_dir']:
print("finetuned tinyimagenet MODEL")
from model_new import ModelTinyImagenetSourceExtendedLogits
full_source_model_x_input = tf.placeholder(tf.float32,
shape=[None, 32, 32, 3])
upresized_full_source_model_x_input = tf.image.resize_images(
full_source_model_x_input, size=[64, 64])
if config['dataset'] == 'cifar10':
model = ModelTinyImagenetSourceExtendedLogits(mode='train',
dataset='tinyimagenet',
target_task_class_num=10,
train_batch_size=config[
'eval_batch_size'],
input_tensor=upresized_full_source_model_x_input)
elif config['dataset'] == 'cifar100':
model = ModelTinyImagenetSourceExtendedLogits(mode='train',
dataset='tinyimagenet',
target_task_class_num=100,
train_batch_size=config[
'eval_batch_size'],
input_tensor=upresized_full_source_model_x_input)
model.x_input = full_source_model_x_input
t_vars = tf.trainable_variables()
source_model_vars = [var for var in t_vars if (
'discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
source_model_target_logit_vars = [var for var in t_vars if
'target_task_logit' in var.name]
source_model_saver = tf.train.Saver(var_list=source_model_vars)
finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
finetuned_source_model_saver = tf.train.Saver(
var_list=finetuned_source_model_vars)
elif 'finetuned_on_cifar100' in config['model_dir']:
raise NotImplementedError
print("finetuned MODEL")
from model_original_cifar_challenge import ModelExtendedLogits
model = ModelExtendedLogits(mode='train', target_task_class_num=100,
train_batch_size=config['eval_batch_size'])
t_vars = tf.trainable_variables()
source_model_vars = [var for var in t_vars if (
'discriminator' not in var.name and 'classifier' not in var.name and 'target_task_logit' not in var.name)]
source_model_target_logit_vars = [var for var in t_vars if
'target_task_logit' in var.name]
source_model_saver = tf.train.Saver(var_list=source_model_vars)
finetuned_source_model_vars = source_model_vars + source_model_target_logit_vars
finetuned_source_model_saver = tf.train.Saver(
var_list=finetuned_source_model_vars)
elif ('adv_trained' in config['model_dir'] or 'naturally_trained' in config[
'model_dir'] or 'a_very_robust_model' in config['model_dir']):
raise NotImplementedError
print("original challenge MODEL")
from free_model_original import Model
model = Model(mode='eval', dataset=config['dataset'],
train_batch_size=config['eval_batch_size'])
elif 'IGAM' in config['model_dir']:
print("IGAM MODEL")
from model_new import Model
model = Model(mode='train', dataset=config['dataset'],
train_batch_size=config['eval_batch_size'],
normalize_zero_mean=True)
else:
raise NotImplementedError
print("other MODEL")
from free_model import Model
model = Model(mode='eval', dataset=config['dataset'],
train_batch_size=config['eval_batch_size'])
saver = tf.train.Saver()
data_path = config['data_path']
# print(data_path)
# x = input()
if config['dataset'] == 'cifar10':
# print("load cifar10 dataset")
cifar = cifar10_input.CIFAR10Data(data_path)
elif config['dataset'] == 'cifar100':
raise NotImplementedError
print("load cifar100 dataset")
cifar = cifar100_input.CIFAR100Data(data_path)
else:
raise NotImplementedError
print("load tinyimagenet dataset")
cifar = tinyimagenet_input.TinyImagenetData()
with tf.Session() as sess:
# Restore the checkpoint
if 'adv_trained_tinyimagenet_finetuned_on_c10_upresize' in config[
'model_dir']:
sess.run(tf.global_variables_initializer())
source_model_file = tf.train.latest_checkpoint(
"models/model_AdvTrain-igamsource-IGAM-tinyimagenet_b16")
source_model_saver.restore(sess, source_model_file)
finetuned_source_model_file = tf.train.latest_checkpoint(
config['model_dir'])
finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
elif 'finetuned_on_cifar100' in config['model_dir']:
sess.run(tf.global_variables_initializer())
source_model_file = tf.train.latest_checkpoint("models/adv_trained")
source_model_saver.restore(sess, source_model_file)
finetuned_source_model_file = tf.train.latest_checkpoint(
config['model_dir'])
finetuned_source_model_saver.restore(sess, finetuned_source_model_file)
else:
saver.restore(sess, model_file)
# Iterate over the samples batch-by-batch
num_eval_examples = config['num_eval_examples']
eval_batch_size = config['eval_batch_size']
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
class ModelWrapper:
def __init__(self, model, weight_shape, bias_shape, num_classes=2):
self.weight = tf.placeholder(dtype=tf.float32, shape=weight_shape)
self.bias = tf.placeholder(dtype=tf.float32, shape=bias_shape)
y = model.neck
# TODO: check whether we need a separate placeholder for the binary label
self.y_input = model.y_input
self.x_input = model.x_input
self.logits = y @ tf.transpose(self.weight) + tf.reshape(self.bias, (1, -1))
self.predictions = tf.argmax(self.logits, 1)
self.pre_softmax = self.logits
# define losses
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
# for top-2 logit diff loss
self.label_mask = tf.one_hot(self.y_input,
num_classes,
on_value=1.0,
off_value=0.0,
dtype=tf.float32)
self.correct_logit = tf.reduce_sum(self.label_mask * self.pre_softmax, axis=1)
self.wrong_logit = tf.reduce_max((1-self.label_mask) * self.pre_softmax - 1e4*self.label_mask, axis=1)
# TODO: why the plus 50?
# self.top2_logit_diff_loss = -tf.nn.relu(self.correct_logit - self.wrong_logit + 50)
self.top2_logit_diff_loss = -self.correct_logit + self.wrong_logit
wrapped_model = ModelWrapper(model, (2, 640), (2,))
attack = LinfPGDAttack(wrapped_model,
config['epsilon'],
config['num_steps'],
config['step_size'],
config['random_start'],
config['loss_func'],
dataset=config['dataset'])
def run_attack(m, l):
linear_layer = m[-1]
del m
# initialize an attack (it's a white box attack, and it's allowed to look
# at the internals of the model in any way it wants)
# attack = BPDA(sess, model, epsilon=model.threat_model.epsilon, debug=args.debug)
# m = PyTorchToTensorFlow1Wrapper(m, "cpu")
weights_feed_dict = {
wrapped_model.weight: linear_layer.weight.data.numpy(),
wrapped_model.bias: linear_layer.bias.data.numpy()
}
assert len(l) == 1, len(l)
for x, y in l:
x_batch = x.numpy().transpose((0, 2, 3, 1)) * 255.0
y_batch = y.numpy()
if config['attack_norm'] == 'inf':
x_batch_adv = attack.perturb(x_batch, y_batch, sess, weights_feed_dict)
elif config['attack_norm'] == '2':
x_batch_adv = attack.perturb_l2(x_batch, y_batch, sess, weights_feed_dict)
elif config['attack_norm'] == 'TRADES':
x_batch_adv = attack.perturb_TRADES(x_batch, y_batch, sess, weights_feed_dict)
logits, y_pred = sess.run((wrapped_model.logits, wrapped_model.predictions),
feed_dict={model.x_input: x_batch_adv,
**weights_feed_dict})
is_adv = y_pred != y_batch
return is_adv, (torch.Tensor(x_batch_adv) / 255.0, torch.Tensor(logits))
random_indices = list(range(len(cifar.eval_data.xs)))
np.random.shuffle(random_indices)
x_batch = []
y_batch = []
for j in range(config['num_eval_examples']):
x_ = cifar.eval_data.xs[random_indices[j]]
y_ = cifar.eval_data.ys[random_indices[j]]
x_batch.append(x_)
y_batch.append(y_)
x_batch = np.array(x_batch).transpose((0, 3, 1, 2)) / 255.0
y_batch = np.array(y_batch)
from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper, PyTorchToTensorFlow1Wrapper
from utils import build_dataloader_from_arrays
test_loader = build_dataloader_from_arrays(x_batch, y_batch, batch_size=32)
def _model_forward_pass(x, features_and_logits: bool = False, features_only: bool = False):
if features_and_logits:
assert not features_only, "Only one of the flags must be set."
if features_and_logits:
return sess.run(
(model.neck, model.pre_softmax),
feed_dict={model.x_input: x.transpose(0, 2, 3, 1) * 255.0})
elif features_only:
return sess.run(
model.neck,
feed_dict={model.x_input: x.transpose(0, 2, 3, 1) * 255.0})
else:
raise ValueError
feature_extractor = TensorFlow1ToPyTorchWrapper(
logit_forward_pass=_model_forward_pass,
logit_forward_and_backward_pass=lambda x, **kwargs: sess.run(
model.feature_grad,
feed_dict={model.x_input: x.transpose(0, 2, 3, 1) * 255.0}) / 255.0
)
assert config["n_boundary_points"] is not None
assert config["n_inner_points"] is not None
from argparse_utils import DecisionBoundaryBinarizationSettings
with tqdm_utils.tqdm_print():
scores_logit_differences_and_validation_accuracies = \
interior_boundary_discrimination_attack(
feature_extractor,
test_loader,
attack_fn=lambda m, l, kwargs: run_attack(m, l),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=config["epsilon"]/255.0,
norm="linf",
lr=10000,
n_boundary_points=config["n_boundary_points"],
n_inner_points=config["n_inner_points"],
adversarial_attack_settings=None,
optimizer="sklearn"
),
n_samples=config['num_eval_examples'],
device="cpu",
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
rescale_logits="adaptive",
sample_training_data_from_corners=config["sample_from_corners"],
decision_boundary_closeness=0.99999
#args.num_samples_test * 10
)
scores = [it[0] for it in scores_logit_differences_and_validation_accuracies]
validation_scores = [it[3] for it in scores_logit_differences_and_validation_accuracies]
if validation_scores[0] is None:
validation_scores = (np.nan, np.nan)
else:
validation_scores = np.array(validation_scores)
validation_scores = tuple(np.mean(validation_scores, 0))
logit_differences = [(it[1], it[2]) for it in
scores_logit_differences_and_validation_accuracies]
logit_differences = np.array(logit_differences)
relative_performance = (logit_differences[:, 0] - logit_differences[:,
1]) / logit_differences[:,
1]
test_result = (np.mean(scores), np.mean(relative_performance),
np.std(relative_performance), validation_scores)
print("\tinterior-vs-boundary discrimination (ce loss), ASR: {0}\n”, "
"\t\tNormalized Logit-Difference-Improvement: {1} +- {2}\n"
"\t\tValidation Accuracy (inner, boundary): {3}".format(
*test_result))
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilty functions for discretizing image tensors in various ways.
For the discretization, we may either use uniform buckets or supply our own
custom buckets. One way to compute custom buckets is to use percentile
information from the data distribution. The final discretized representation
can either be a one-hot or a thermometer encoding. A thermometer encoding
is of the form (1, 1, 1,..,1, 0, .., 0) with the transition from 1 to 0
signifying which bucket it belongs to. To reduce the dimension, one may
project back by convolving with a fixed random or trainable matrix.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def flatten_last(x):
"""Flatten the last two dimensions of a tensor into one.
Args:
x: Discretized input tensor of shape [-1, height, width, channels, levels]
to flatten.
Returns:
Flattened version of x, of shape [-1, height, width, channels * levels].
"""
shape = x.get_shape().as_list()
new_shape = shape[:-1]
new_shape[-1] *= shape[-1]
new_shape[0] = tf.shape(x)[0]
flattened_x = tf.reshape(x, new_shape)
return flattened_x
def unflatten_last(x, levels):
"""Unflatten input tensor by separating the last two dimensions.
Args:
x: Discretized input tensor of shape [-1, height, width, channels * levels]
to unflatten.
levels: Number of levels the tensor has been discretized into.
Returns:
Unflattened version of x, of shape [-1, height, width, channels, levels].
"""
shape = x.get_shape().as_list()
shape[-1] /= levels
shape[-1] = int(shape[-1])
shape.append(levels)
shape[0] = tf.shape(x)[0]
unflattened_x = tf.reshape(x, shape)
return unflattened_x
def discretize_uniform(x, levels, thermometer=False):
"""Discretize input into levels using uniformly distributed buckets.
Args:
x: Input tensor to discretize, assumed to be between (0, 1).
levels: Number of levels to discretize into.
thermometer: Whether to encode the discretized tensor in thermometer encoding
(Default: False).
Returns:
Discretized version of x of shape [-1, height, width, channels * levels].
"""
clipped_x = tf.clip_by_value(x, 0., 1.)
int_x = tf.to_int32((.99999 * clipped_x) * levels)
one_hot = tf.one_hot(
int_x, depth=levels, on_value=1., off_value=0., dtype=tf.float32, axis=-1)
# Check to see if we are encoding in thermometer
discretized_x = one_hot
if thermometer:
discretized_x = one_hot_to_thermometer(one_hot, levels, flattened=False)
# Reshape x to [-1, height, width, channels * levels]
discretized_x = flatten_last(discretized_x)
return discretized_x
def get_centroids_by_percentile(x, levels):
"""Get the custom centroids by percentiles of the per-pixel distribution of x.
Args:
x: Input data set of shape [-1, height, width, channels]
whose centroids we wish to compute.
levels: Number of centroids to compute.
Returns:
Custom centroids as a tensor.
"""
def quantile(q):
return tf.contrib.distributions.percentile(x, q=q, axis=None)
start = 0.
end = 100.
quantile_range = tf.lin_space(start, end, levels)
centroids = tf.map_fn(quantile, quantile_range)
return centroids
def discretize_centroids(x, levels, centroids, thermometer=False):
"""Discretize input into levels using custom centroids.
Args:
x: Input tensor to discretize, assumed to be between (0, 1).
levels: Number of levels to discretize into.
centroids: Custom centroids into which the input is to be discretized.
thermometer: Whether to encode the discretized tensor in thermometer encoding
(Default: False).
Returns:
Discretized version of x of shape [-1, height, width, channels * levels]
using supplied centroids.
"""
x_stacked = tf.stack(levels * [x], axis=-1)
dist = tf.to_float(tf.squared_difference(x_stacked, centroids))
idx = tf.argmin(dist, axis=-1)
one_hot = tf.one_hot(idx, depth=levels, on_value=1., off_value=0.)
# Check to see if we are encoding in thermometer
discretized_x = one_hot
if thermometer:
discretized_x = one_hot_to_thermometer(one_hot, levels, flattened=False)
# Reshape x to [-1, height, width, channels * levels]
discretized_x = flatten_last(discretized_x)
return discretized_x
def undiscretize_uniform(x, levels, flattened=False, thermometer=False):
"""Undiscretize a discretized tensor.
Args:
x: Input tensor in discretized form.
levels: Number of levels the input has been discretized into.
flattened: True if x is of the form [-1, height, width, channels * levels]
else it is of shape [-1, height, width, channels, levels].
(Default: False).
thermometer: Determines if we are using one-hot or thermometer encoding
(Default: False).
Returns:
Undiscretized version of x.
"""
# Unflatten if flattened, so that x has shape
# [-1, height, width, channels, levels]
if flattened:
x = unflatten_last(x, levels)
if thermometer:
int_x = tf.reduce_sum(x, -1) - 1
else:
int_x = tf.argmax(x, -1)
out = tf.to_float(int_x) / (levels - 1)
return out
def undiscretize_centroids(x,
levels,
centroids,
flattened=False,
thermometer=False):
"""Undiscretize a tensor that has been discretized using custom centroids.
Args:
x: Input tensor in discretized form.
levels: Number of levels the input has been discretized into.
centroids: The custom centroids used to discretize.
flattened: True if x is of the form [-1, height, width, channels * levels]
else it is of shape [-1, height, width, channels, levels].
(Default: False).
thermometer: Determines if we are using one-hot or thermometer encoding
(Default: False).
Returns:
Undiscretized version of x.
"""
# Unflatten if flattened, so that x has shape
# [-1, height, width, channels, levels]
if flattened:
x = unflatten_last(x, levels)
if thermometer:
x = thermometer_to_one_hot(x, levels, flattened=False)
out = tf.reduce_sum(tf.multiply(x, centroids), axis=-1)
return out
def one_hot_to_thermometer(x, levels, flattened=False):
"""Convert one hot to thermometer code.
Args:
x: Input tensor in one hot encoding to convert to thermometer.
levels: Number of levels the input has been discretized into.
flattened: True if x is of the form [-1, height, width, channels * levels]
else it is of shape [-1, height, width, channels, levels].
(Default: False).
Returns:
Thermometer encoding of x.
"""
# Unflatten if flattened, so that x has shape
# [-1, height, width, channels, levels]
if flattened:
x = unflatten_last(x, levels)
thermometer = tf.cumsum(x, axis=-1, reverse=True)
# Flatten back if original input was flattened
if flattened:
thermometer = flatten_last(thermometer)
return thermometer
def thermometer_to_one_hot(x, levels, flattened=False):
"""Convert thermometer to one hot code.
Args:
x: Input tensor in thermometer encoding to convert to one-hot. Input is
assumed to be
of shape [-1, height, width, channels, levels].
levels: Number of levels the input has been discretized into.
flattened: True if x is of the form [-1, height, width, channels * levels]
else it is of shape [-1, height, width, channels, levels].
(Default: False).
Returns:
One hot encoding of x.
"""
# Unflatten if flattened, so that x has shape
# [-1, height, width, channels, levels]
if flattened:
x = unflatten_last(x, levels)
int_x = tf.to_int32(tf.reduce_sum(x, axis=-1)) - 1
one_hot = tf.one_hot(
int_x, depth=levels, on_value=1., off_value=0., dtype=tf.float32, axis=-1)
# Flatten back if input was flattened
if flattened:
one_hot = flatten_last(one_hot)
return one_hot
def random_convolution(x,
projection_dim,
levels,
flattened=True,
trainable=False):
"""Reduce dimension by random convolutions using a standard Gaussian.
Args:
x: Discretized input tensor in one hot or thermometer encoding to project.
projection_dim: Dimension to project the output tensor to.
levels: Number of levels the input has been discretized into.
flattened: True if x is of the form [-1, height, width, channels * levels]
else it is of shape [-1, height, width, channels * levels].
(Default: False).
trainable: If True then the weights for projection are learned (Default:
False).
Returns:
Projection of x using a fixed random convolution.
Raises:
ValueError: If projection dimension is higher than the number of levels.
"""
if projection_dim > levels:
raise ValueError('Projection dimension higher than the number of levels')
# Unflatten first to get number of channels
if flattened:
x = unflatten_last(x, levels)
channels = x.get_shape().as_list()[3]
# Flatten so that x has shape [-1, height, width, channels * levels]
x = flatten_last(x)
scope = 'projection'
if trainable:
scope = 'trainable_projection'
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
kernel = tf.get_variable(
'conv_projection', [1, 1, channels * levels, channels * projection_dim],
trainable=trainable)
x_proj = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='SAME')
# Unflatten back if input was not flattened
if not flattened:
x_proj = unflatten_last(x_proj, levels)
return x_proj
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for importing the CIFAR10 dataset.
Each image in the dataset is a numpy array of shape (32, 32, 3), with the values
being unsigned integers (i.e., in the range 0,1,...,255).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import sys
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
version = sys.version_info
import numpy as np
class CIFAR10Data(object):
"""
Unpickles the CIFAR10 dataset from a specified folder containing a pickled
version following the format of Krizhevsky which can be found
[here](https://www.cs.toronto.edu/~kriz/cifar.html).
Inputs to constructor
=====================
- path: path to the pickled dataset. The training data must be pickled
into five files named data_batch_i for i = 1, ..., 5, containing 10,000
examples each, the test data
must be pickled into a single file called test_batch containing 10,000
examples, and the 10 class names must be
pickled into a file called batches.meta. The pickled examples should
be stored as a tuple of two objects: an array of 10,000 32x32x3-shaped
arrays, and an array of their 10,000 true labels.
"""
def __init__(self, path="../cifar10_data"):
train_filenames = ['data_batch_{}'.format(ii + 1) for ii in range(5)]
eval_filename = 'test_batch'
metadata_filename = 'batches.meta'
train_images = np.zeros((50000, 32, 32, 3), dtype='uint8')
train_labels = np.zeros(50000, dtype='int32')
for ii, fname in enumerate(train_filenames):
cur_images, cur_labels = self._load_datafile(os.path.join(path, fname))
train_images[ii * 10000 : (ii+1) * 10000, ...] = cur_images
train_labels[ii * 10000 : (ii+1) * 10000, ...] = cur_labels
eval_images, eval_labels = self._load_datafile(
os.path.join(path, eval_filename))
with open(os.path.join(path, metadata_filename), 'rb') as fo:
if version.major == 3:
data_dict = pickle.load(fo, encoding='bytes')
else:
data_dict = pickle.load(fo)
self.label_names = data_dict[b'label_names']
for ii in range(len(self.label_names)):
self.label_names[ii] = self.label_names[ii].decode('utf-8')
self.train_data = DataSubset(train_images, train_labels)
self.eval_data = DataSubset(eval_images, eval_labels)
@staticmethod
def _load_datafile(filename):
with open(filename, 'rb') as fo:
if version.major == 3:
data_dict = pickle.load(fo, encoding='bytes')
else:
data_dict = pickle.load(fo)
assert data_dict[b'data'].dtype == np.uint8
image_data = data_dict[b'data']
image_data = image_data.reshape((10000, 3, 32, 32)).transpose(0, 2, 3, 1)
return image_data, np.array(data_dict[b'labels'])
class AugmentedCIFAR10Data(object):
"""
Data augmentation wrapper over a loaded dataset.
Inputs to constructor
=====================
- raw_cifar10data: the loaded CIFAR10 dataset, via the CIFAR10Data class
- sess: current tensorflow session
- model: current model (needed for input tensor)
"""
def __init__(self, raw_cifar10data, sess, model):
assert isinstance(raw_cifar10data, CIFAR10Data)
self.image_size = 32
# create augmentation computational graph
self.x_input_placeholder = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
padded = tf.map_fn(lambda img: tf.image.resize_image_with_crop_or_pad(
img, self.image_size + 4, self.image_size + 4),
self.x_input_placeholder)
cropped = tf.map_fn(lambda img: tf.random_crop(img, [self.image_size,
self.image_size,
3]), padded)
flipped = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), cropped)
self.augmented = flipped
self.train_data = AugmentedDataSubset(raw_cifar10data.train_data, sess,
self.x_input_placeholder,
self.augmented)
self.eval_data = AugmentedDataSubset(raw_cifar10data.eval_data, sess,
self.x_input_placeholder,
self.augmented)
self.label_names = raw_cifar10data.label_names
class DataSubset(object):
def __init__(self, xs, ys):
self.xs = xs
self.n = xs.shape[0]
self.ys = ys
self.batch_start = 0
self.cur_order = np.random.permutation(self.n)
def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
if self.n < batch_size:
raise ValueError('Batch size can be at most the dataset size')
if not multiple_passes:
actual_batch_size = min(batch_size, self.n - self.batch_start)
if actual_batch_size <= 0:
raise ValueError('Pass through the dataset is complete.')
batch_end = self.batch_start + actual_batch_size
batch_xs = self.xs[self.cur_order[self.batch_start : batch_end], ...]
batch_ys = self.ys[self.cur_order[self.batch_start : batch_end], ...]
self.batch_start += actual_batch_size
return batch_xs, batch_ys
actual_batch_size = min(batch_size, self.n - self.batch_start)
if actual_batch_size < batch_size:
if reshuffle_after_pass:
self.cur_order = np.random.permutation(self.n)
self.batch_start = 0
batch_end = self.batch_start + batch_size
batch_xs = self.xs[self.cur_order[self.batch_start : batch_end], ...]
batch_ys = self.ys[self.cur_order[self.batch_start : batch_end], ...]
self.batch_start += actual_batch_size
return batch_xs, batch_ys
class AugmentedDataSubset(object):
def __init__(self, raw_datasubset, sess, x_input_placeholder,
augmented):
self.sess = sess
self.raw_datasubset = raw_datasubset
self.x_input_placeholder = x_input_placeholder
self.augmented = augmented
def get_next_batch(self, batch_size, multiple_passes=False, reshuffle_after_pass=True):
raw_batch = self.raw_datasubset.get_next_batch(batch_size, multiple_passes,
reshuffle_after_pass)
images = raw_batch[0].astype(np.float32)
return self.sess.run(self.augmented, feed_dict={self.x_input_placeholder:
raw_batch[0]}), raw_batch[1]
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# based on https://github.com/tensorflow/models/tree/master/resnet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class Model(object):
"""ResNet model."""
def __init__(self, restore=None, sess=None, tiny=True,
thermometer=True, levels=8, mode='eval'):
"""ResNet constructor.
Args:
mode: One of 'train' and 'eval'.
"""
self.mode = mode
self.tiny = tiny
self.thermometer = thermometer
self.levels = levels
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
# print("Called")
self.first = True
self._build_model()
self.first = False
if restore:
path = tf.train.latest_checkpoint(restore)
saver = tf.train.Saver()
saver.restore(sess, path)
# print("restored")
def __call__(self, xs, **kwargs):
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
return self._build_model(xs, **kwargs)
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _build_model(self, x_input = None, features_only = False, **kwargs):
assert self.mode == 'train' or self.mode == 'eval'
"""Build the core model within the graph."""
with tf.variable_scope('input'):
if x_input == None:
assert self.first
ch = 3
if self.thermometer:
ch = self.levels*3
x_input = self.x_input = tf.placeholder(
tf.float32,
shape=[None, 32, 32, ch], name='x_input_model')
else:
assert not self.first
if self.first:
self.y_input = tf.placeholder(tf.int64, shape=None, name='y_input_model')
input_standardized = tf.map_fn(lambda img: tf.image.per_image_standardization(img),
x_input)
ch = x_input.get_shape().as_list()[3]
x = self._conv('init_conv', input_standardized, 3, ch, 16, self._stride_arr(1))
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
res_func = self._residual
# Uncomment the following codes to use w28-10 wide residual network.
# It is more memory efficient than very deep residual network and has
# comparably good performance.
# https://arxiv.org/pdf/1605.07146v1.pdf
if self.tiny:
filters = [16, 16, 32, 64]
layers = 2
else:
filters = [16, 160, 320, 640]
layers = 5
# Update hps.num_residual_units to 9
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]),
activate_before_residual[0])
for i in range(1, layers):
with tf.variable_scope('unit_1_%d' % i):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]),
activate_before_residual[1])
for i in range(1, layers):
with tf.variable_scope('unit_2_%d' % i):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]),
activate_before_residual[2])
for i in range(1, layers):
with tf.variable_scope('unit_3_%d' % i):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, 0.1)
x = self._global_avg_pool(x)
if features_only:
return x
if self.first:
self.features = x
if self.first:
with tf.variable_scope('logit'):
self.pre_softmax = self._fully_connected(x, 10)
self.predictions = tf.argmax(self.pre_softmax, 1)
self.correct_prediction = tf.equal(self.predictions, self.y_input)
self.num_correct = tf.reduce_sum(
tf.cast(self.correct_prediction, tf.int64))
self.accuracy = tf.reduce_mean(
tf.cast(self.correct_prediction, tf.float32))
with tf.variable_scope('costs'):
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
self.mean_xent = tf.reduce_mean(self.y_xent)
self.weight_decay_loss = self._decay()
return self.pre_softmax
else:
with tf.variable_scope('logit'):
return self._fully_connected(x, 10)
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(
inputs=x,
decay=.9,
center=True,
scale=True,
activation_fn=None,
updates_collections=None,
is_training=(self.mode == 'train'))
def _residual(self, x, in_filter, out_filter, stride,
activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, 0.1)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter-in_filter)//2, (out_filter-in_filter)//2]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay(self):
"""L2 weight decay loss."""
costs = []
for var in tf.trainable_variables():
if var.op.name.find('DW') > 0:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
'DW', [filter_size, filter_size, in_filters, out_filters],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0/n)))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range(num_non_batch_dimensions - 1):
prod_non_batch_dimensions *= int(x.shape[ii + 1])
x = tf.reshape(x, [tf.shape(x)[0], -1])
w = tf.get_variable(
'DW', [prod_non_batch_dimensions, out_dim],
initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim],
initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import robustml
import tensorflow as tf
from discretization_utils import discretize_uniform
import numpy as np
from cifar_model import Model
LEVELS = 16
class Thermometer(robustml.model.Model):
def __init__(self, sess, epsilon):
self._sess = sess
self._x = tf.placeholder(tf.float32, (None, 32, 32, 3))
self._encode = discretize_uniform(self._x/255.0, levels=LEVELS, thermometer=True)
self._model = Model(
'checkpoints/original_thermometer_wrn/thermometer_advtrain/',
sess,
tiny=False,
mode='eval',
thermometer=True,
levels=LEVELS
)
self._dataset = robustml.dataset.CIFAR10()
self._threat_model = robustml.threat_model.Linf(epsilon=epsilon/255.0)
@property
def dataset(self):
return self._dataset
@property
def threat_model(self):
return self._threat_model
def classify(self, x, skip_encoding=False):
x = x * 255.0
if not skip_encoding:
# first encode the input, then classify it
x = self.encode(x)
return self._sess.run(self._model.predictions, {self._model.x_input: x})
def get_features_and_gradients(self, x):
x = x * 255.0
x = self.encode(x)
grad = tf.gradients(self._model.features, self._model.x_input)[0]
return self._sess.run((self._model.features, grad),
{self._model.x_input: x})
def get_features(self, x):
x = x * 255.0
x = self.encode(x)
return self._sess.run(self._model.features,
{self._model.x_input: x})
def get_features_and_logits(self, x):
x = x * 255.0
x = self.encode(x)
return self._sess.run((self._model.features, self._model.pre_softmax),
{self._model.x_input: x})
# expose internals for white box attacks
@property
def model(self):
return self._model
# x should be in [0, 255]
def encode(self, x):
return self._sess.run(self._encode, {self._x: x})
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import robustml
from robustml_model import Thermometer
import sys
import argparse
import numpy as np
from robustml_attack import LSPGDAttack, Attack
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--cifar-path', type=str, required=True,
help='path to the test_batch file from http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')
parser.add_argument('--start', type=int, default=0)
parser.add_argument('--end', type=int, default=100)
parser.add_argument('--debug', action='store_true')
parser.add_argument("--attack", default="adaptive", choices=("original", "adaptive", "modified", "modified2"))
parser.add_argument("--batch-size", default=256, type=int)
parser.add_argument("--epsilon", type=int, default=8)
args = parser.parse_args()
# set up TensorFlow session
sess = tf.Session()
# initialize a model
model = Thermometer(sess, args.epsilon)
batch_size = args.batch_size
# initialize an attack (it's a white box attack, and it's allowed to look
# at the internals of the model in any way it wants)
# attack = BPDA(sess, model, epsilon=model.threat_model.epsilon, debug=args.debug)
# ATTENTION: Original attack did _not_ use the labels
use_labels=True
if args.attack == "adaptive":
attack = Attack(sess, model.model, epsilon=model.threat_model.epsilon, batch_size=batch_size, n_classes=10)
elif args.attack == "original":
attack = LSPGDAttack(sess, model.model, epsilon=model.threat_model.epsilon, use_labels=use_labels)
elif args.attack == "modified":
attack = LSPGDAttack(sess, model.model, epsilon=model.threat_model.epsilon, num_steps=50, step_size=0.25, use_labels=use_labels)
elif args.attack == "modified2":
attack = LSPGDAttack(sess, model.model, epsilon=model.threat_model.epsilon, num_steps=100, step_size=0.1, use_labels=use_labels)
else:
raise ValueError("invalid attack mode")
# initialize a data provider for CIFAR-10 images
provider = robustml.provider.CIFAR10(args.cifar_path)
success = 0
total = 0
random_indices = list(range(len(provider)))
if args.end == -1:
args.end = int(len(random_indices) / batch_size)
assert args.end <= len(random_indices) / batch_size
assert args.start <= len(random_indices) / batch_size
"""
print("using robustml...")
success_rate = robustml.evaluate.evaluate(
model,
attack,
provider,
start=args.start,
end=args.end,
deterministic=True,
debug=args.debug,
)
print('attack success rate: %.2f%% (over %d data points)' % (success_rate*100, args.end-args.start))
print("now using own eval...")
"""
np.random.shuffle(random_indices)
for i in range(args.start, args.end):
print('evaluating batch %d of [%d, %d)' % (i, args.start, args.end), file=sys.stderr)
x_batch = []
y_batch = []
for j in range(batch_size):
x_, y_ = provider[random_indices[i*batch_size + j]]
x_batch.append(x_)
y_batch.append(y_)
x_batch = np.array(x_batch)
y_batch = np.array(y_batch)
total += len(x_batch)
assert len(x_batch) == batch_size
x_batch_adv = attack.run(x_batch, y_batch, None)
y_batch_adv = model.classify(x_batch_adv, skip_encoding=args.attack in ("original", "modified", "modified2"))
# adv_acc = (y_batch_adv == y_batch).mean()
success += (y_batch_adv != y_batch).sum()
success_rate = success / total
print('attack success rate: %.2f%%, robust accuracy: %.sf%% (over %d data points)' % (success_rate*100, 100-success_rate*100, total))
if __name__ == '__main__':
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from robustml_model import LEVELS
from discretization_utils import discretize_uniform
import numpy as np
from discretization_attacks import adv_lspga
class Attack:
def __init__(self, sess, model, epsilon, num_steps=30, step_size=1, batch_size=1, n_classes=10):
self._sess = sess
self.model = model
self.num_steps = num_steps
self.step_size = step_size
self.xs = tf.Variable(np.zeros((batch_size, 32, 32, 3), dtype=np.float32),
name='modifier')
self.orig_xs = tf.placeholder(tf.float32, [None, 32, 32, 3])
self.ys = tf.placeholder(tf.int32, [None])
self.epsilon = epsilon * 255
delta = tf.clip_by_value(self.xs, 0, 255) - self.orig_xs
delta = tf.clip_by_value(delta, -self.epsilon, self.epsilon)
self.do_clip_xs = tf.assign(self.xs, self.orig_xs+delta)
compare = tf.constant((256.0/LEVELS)*np.arange(-1,LEVELS-1).reshape((1,1,1,1,LEVELS)),
dtype=tf.float32)
inner = tf.reshape(self.xs,(-1, 32, 32, 3, 1)) - compare
inner = tf.maximum(tf.minimum(inner/(256.0/LEVELS), 1.0), 0.0)
self.therm = tf.reshape(inner, (-1, 32, 32, LEVELS*3))
self.logits = logits = model(self.therm)
self.uniform = discretize_uniform(self.xs/255.0, levels=LEVELS, thermometer=True)
self.real_logits = model(self.uniform)
label_mask = tf.one_hot(self.ys, n_classes)
correct_logit = tf.reduce_sum(label_mask * logits, axis=1)
wrong_logit = tf.reduce_max((1-label_mask) * logits - 1e4*label_mask, axis=1)
self.loss = (correct_logit - wrong_logit)
start_vars = set(x.name for x in tf.global_variables())
optimizer = tf.train.AdamOptimizer(step_size*1)
self.grad = tf.sign(tf.gradients(self.loss, self.xs)[0])
grad,var = optimizer.compute_gradients(self.loss, [self.xs])[0]
self.train = optimizer.apply_gradients([(tf.sign(grad),var)])
end_vars = tf.global_variables()
self.new_vars = [x for x in end_vars if x.name not in start_vars]
#@profile
def perturb(self, x, y, sess, feed_dict={}):
sess.run(tf.variables_initializer(self.new_vars))
sess.run(self.xs.initializer)
sess.run(self.do_clip_xs,
{self.orig_xs: x})
for i in range(self.num_steps):
t = sess.run(self.uniform)
sess.run(self.train, feed_dict={self.ys: y,
self.therm: t,
**feed_dict})
sess.run(self.do_clip_xs,
{self.orig_xs: x})
x_batch_adv = sess.run(self.xs)
return x_batch_adv
def run(self, x, y, target, feed_dict={}):
if len(x.shape) == 3:
x = np.array([x])
y = np.array([y])
if target is not None:
raise NotImplementedError
return self.perturb(x * 255.0, y, self._sess, feed_dict) / 255.0
class LSPGDAttack:
def __init__(self, sess, model, epsilon, num_steps=7, step_size=0.1,
use_labels=True, n_classes=10):
# ATTENTION: use_labels is a modification from AUTHOR
self._sess = sess
self.model = model
self.xin = tf.placeholder(tf.float32, (None, 32, 32, 3))
if use_labels:
self.yin = tf.placeholder(tf.int64, shape=None)
self.y_filled = tf.one_hot(
self.yin,
n_classes)
else:
self.yin = None
steps = num_steps
eps = epsilon
attack_step = step_size
projection_fn = tf.identity
self.attack = adv_lspga(self.xin, model, discretize_uniform,
projection_fn, 16, tf.constant(False), steps, eps,
attack_step, thermometer=True, noisy_grads=False,
y=self.y_filled)
def perturb(self, x, y, sess, feed_dict={}):
if self.yin is None:
x_batch_adv = sess.run(self.attack,
{self.xin: x/255.0, **feed_dict})
else:
x_batch_adv = sess.run(self.attack,
{self.xin: x/255.0, **feed_dict,
self.yin: y})
return x_batch_adv
def run(self, x, y, target, feed_dict={}):
if len(x.shape) == 3:
x = np.array([x])
y = np.array([y])
if target is not None:
raise NotImplementedError
return self.perturb(x * 255.0, y, self._sess, feed_dict) / 255.0
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import scipy.misc
from datetime import datetime
import json
import os
import shutil
from timeit import default_timer as timer
from discretization_utils import discretize_uniform
from discretization_attacks import adv_lspga
import tensorflow as tf
import numpy as np
from cifar_model import Model
import cifar10_input
with open('config.json') as config_file:
config = json.load(config_file)
# seeding randomness
tf.set_random_seed(config['tf_random_seed'])
np.random.seed(config['np_random_seed'])
# Setting up training parameters
max_num_training_steps = config['max_num_training_steps']
num_output_steps = config['num_output_steps']
num_summary_steps = config['num_summary_steps']
num_checkpoint_steps = config['num_checkpoint_steps']
step_size_schedule = config['step_size_schedule']
weight_decay = config['weight_decay']
data_path = config['data_path']
momentum = config['momentum']
batch_size = config['training_batch_size']
levels = 16
# Setting up the data and the model
raw_cifar = cifar10_input.CIFAR10Data(data_path)
global_step = tf.contrib.framework.get_or_create_global_step()
model = Model(mode='train', tiny=False,
thermometer=True, levels=levels)
# Setting up the optimizer
boundaries = [int(sss[0]) for sss in step_size_schedule]
boundaries = boundaries[1:]
values = [sss[1] for sss in step_size_schedule]
learning_rate = tf.train.piecewise_constant(
tf.cast(global_step, tf.int32),
boundaries,
values)
total_loss = model.mean_xent + weight_decay * model.weight_decay_loss
train_step = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(
total_loss,
global_step=global_step)
xin = tf.placeholder(tf.float32, (None, 32, 32, 3))
steps = 7
eps = 0.031
attack_step = 0.01
projection_fn = tf.identity
attack = adv_lspga(xin, model, discretize_uniform,
projection_fn, levels, tf.constant(True), steps, eps,
attack_step, thermometer=True, noisy_grads=False)
thermometerize = discretize_uniform(xin, levels=levels, thermometer=True)
# Setting up the Tensorboard and checkpoint outputs
model_dir = config['model_dir']
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# We add accuracy and xent twice so we can easily make three types of
# comparisons in Tensorboard:
# - train vs eval (for a single run)
# - train of different runs
# - eval of different runs
saver = tf.train.Saver(max_to_keep=3)
tf.summary.scalar('accuracy adv train', model.accuracy)
tf.summary.scalar('accuracy adv', model.accuracy)
tf.summary.scalar('xent adv train', model.xent / batch_size)
tf.summary.scalar('xent adv', model.xent / batch_size)
#tf.summary.image('images adv train', model.x_input)
merged_summaries = tf.summary.merge_all()
# keep the configuration file with the model for reproducibility
shutil.copy('config.json', model_dir)
with tf.Session() as sess:
# initialize data augmentation
cifar = cifar10_input.AugmentedCIFAR10Data(raw_cifar, sess, model)
# Initialize the summary writer, global variables, and our time counter.
summary_writer = tf.summary.FileWriter(model_dir, sess.graph)
sess.run(tf.global_variables_initializer())
training_time = 0.0
attack_time = 0.0
if sys.argv[-1] == 'restore':
saver.restore(sess,
os.path.join("models/adv_train_fixed/", 'checkpoint-105000'))
# Main training loop
for ii in range(max_num_training_steps):
x_batch, y_batch = cifar.train_data.get_next_batch(batch_size,
multiple_passes=True)
# Compute Adversarial Perturbations
start = timer()
x_batch, x_batch_adv = sess.run((thermometerize, attack),
{xin: x_batch/255.0})
end = timer()
attack_time += end - start
nat_dict = {model.x_input: x_batch,
model.y_input: y_batch}
adv_dict = {model.x_input: x_batch_adv,
model.y_input: y_batch}
# Output to stdout
if ii % (num_output_steps) == 0:
nat_acc = sess.run(model.accuracy, feed_dict=nat_dict)
adv_acc = sess.run(model.accuracy, feed_dict=adv_dict)
print('Step {}: ({})'.format(ii, datetime.now()))
print(' training nat accuracy {:.4}%'.format(nat_acc * 100))
print(' training adv accuracy {:.4}%'.format(adv_acc * 100))
if ii != 0:
print(' {} examples per second (train)'.format(
num_output_steps * batch_size / (training_time)))
print(' {} examples per second (attack)'.format(
num_output_steps * batch_size / (attack_time)))
attack_time = training_time = 0.0
# Tensorboard summaries
if ii % num_summary_steps == 0:
summary = sess.run(merged_summaries, feed_dict=adv_dict)
summary_writer.add_summary(summary, global_step.eval(sess))
# Write a checkpoint
if ii % num_checkpoint_steps == 0:
saver.save(sess,
os.path.join(model_dir, 'checkpoint'),
global_step=global_step)
# Actual training step
start = timer()
sess.run(train_step, feed_dict=adv_dict)
end = timer()
training_time += end - start
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains all the attacks on discretized inputs.
The attacks implemented are Discrete Gradient Ascent (DGA) and
Logit Space-Projected Gradient Ascent (LS-PGA).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import discretization_utils
def discretize_range(discretize_fn, levels, low, high, thermometer=False):
"""Get range of discretized values for in the interval (low, high).
For example, assume discretize_fn uniformly discretizes the values
between 0 and 1 into 10 bins each represented by either a one hot encoding
or a thermometer encoding. Then discretize_range(discretize_fn, .3, .7)
would return [0., 0., 0., 1., 1., 1., 1., 0., 0., 0.]. Note that it's output
is independent of the encoding used.
Args:
discretize_fn: Discretization function used to discretize input.
levels: Number of levels to discretize the input into.
low: Minimum value in the interval.
high: Maximum value in the interval.
thermometer: If True, then the discretize_fn returns thermometer codes,
else it returns one hot codes. (Default: False).
Returns:
Mask of 1's over the interval.
"""
low = tf.clip_by_value(low, 0., 1.)
high = tf.clip_by_value(high, 0., 1.)
out = 0.
for alpha in np.linspace(0., 1., levels):
q = discretize_fn(alpha * low + (1. - alpha) * high, levels, thermometer)
# Convert into one hot encoding if q is in thermometer encoding
if thermometer:
q = discretization_utils.thermometer_to_one_hot(q, levels, flattened=True)
out += q
return tf.to_float(tf.greater(out, 0.))
def adv_dga(x, model, discretize_fn, projection_fn, levels, phase,
steps, eps, thermometer=False, noisy_grads=True, y=None):
"""Compute adversarial examples for discretized input using DGA.
Args:
x: Input image of shape [-1, height, width, channels] to attack.
model: Model function which given input returns logits.
discretize_fn: Function used to discretize the input into one-hot or thermometer
encoding.
projection_fn: Function used to project the input before feeding to the
model (can be identity).
levels: Number of levels the input has been discretized into.
phase: Learning phase of the model, corresponding to train and test time.
steps: Number of steps to iterate when creating adversarial examples.
eps: Eps ball within which the perturbed image must stay.
thermometer: Whether the discretized input is in thermometer encoding or one
hot encoding. (Default: False).
noisy_grads: If True then compute attack over noisy input.
y: Optional argument to provide the true labels as opposed to model
predictions to compute the loss. (Default: None).
Returns:
Adversarial image for discretized inputs. The output
is in the same form of discretization as the input.
"""
# Add noise
noise = 0
if noisy_grads:
noise = tf.random_uniform(
shape=tf.shape(x), minval=-eps, maxval=eps, dtype=tf.float32)
x_noisy = x + noise
# Clip so that x_noisy is in [0, 1]
x_noisy = tf.clip_by_value(x_noisy, 0., 1.)
# Compute the mask over the bits that we are allowed to attack
mask = discretize_range(
discretize_fn, levels, x - eps, x + eps, thermometer=thermometer)
cur_x_discretized = discretize_fn(x_noisy)
for i in range(steps):
# Compute one hot representation if input is in thermometer encoding.
cur_x_one_hot = cur_x_discretized
if thermometer:
cur_x_one_hot = discretization_utils.thermometer_to_one_hot(
cur_x_discretized, levels, flattened=True)
logits_discretized = model(projection_fn(cur_x_discretized),
is_training=phase)
if i == 0 and y is None:
# Get one hot version from predictions
y = tf.one_hot(
tf.argmax(logits_discretized, 1),
tf.shape(logits_discretized)[1])
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=y, logits=logits_discretized)
# compute the gradients wrt to current input
grad, = tf.gradients(loss, cur_x_discretized)
# The harm done by choosing a particular bit to be active
harm = grad * (1. + cur_x_one_hot - 2 * cur_x_discretized)
# If we are using thermometer harm is the cumsum
if thermometer:
harm_r = discretization_utils.unflatten_last(harm, levels)
harm_r = tf.cumsum(harm_r, axis=-1, reverse=True)
harm = discretization_utils.flatten_last(harm_r)
# Make sure values outside the global mask lose the max
harm = harm * mask - (1. - mask) * 1000.0
harm_r = discretization_utils.unflatten_last(harm, levels)
bit_to_activate = tf.argmax(harm_r, axis=-1)
one_hot = tf.one_hot(
bit_to_activate,
depth=levels,
on_value=1.,
off_value=0.,
dtype=tf.float32,
axis=-1)
# Convert into thermometer if we are doing thermometer encodings
inp = one_hot
if thermometer:
inp = discretization_utils.one_hot_to_thermometer(
one_hot, levels, flattened=False)
flattened_inp = discretization_utils.flatten_last(inp)
flattened_inp.mask = mask
flattened_inp = tf.stop_gradient(flattened_inp)
cur_x_discretized = flattened_inp
return flattened_inp
#@profile
def adv_lspga(x, model, discretize_fn, projection_fn, levels, phase,
steps, eps, attack_step=1., thermometer=False,
noisy_grads=True, y=None, inv_temp=1., anneal_rate=1.2):
"""Compute adversarial examples for discretized input by LS-PGA.
Args:
x: Input image of shape [-1, height, width, channels] to attack.
model: Model function which given input returns logits.
discretize_fn: Function used to discretize the input into one-hot or thermometer
encoding.
projection_fn: Function used to project the input before feeding to the
model (can be identity).
levels: Number of levels the input has been discretized into.
phase: Learning phase of the model, corresponding to train and test time.
steps: Number of steps to iterate when creating adversarial examples.
eps: Eps ball within which the perturbed image must stay.
attack_step: Attack step for one iteration of the iterative attack.
thermometer: Whether the discretized input is in thermometer encoding or one
hot encoding. (Default: False).
noisy_grads: If True then compute attack over noisy input.
y: True labels corresponding to x. If it is None, then use model predictions
to compute loss, else use true labels. (Default: None).
inv_temp: Inverse of the temperature parameter for softmax.
anneal_rate: Rate for annealing the temperature after every iteration of
attack.
Returns:
Adversarial image for discretized inputs. The output
is in the same form of discretization as the input.
"""
# Compute the mask over the bits that we are allowed to attack
flat_mask = discretize_range(
discretize_fn, levels, x - eps, x + eps, thermometer=thermometer)
mask = discretization_utils.unflatten_last(flat_mask, levels)
if noisy_grads:
activation_logits = tf.random_normal(tf.shape(mask))
else:
activation_logits = tf.zeros_like(mask)
for i in range(steps):
print("Preparing step", i)
# Compute one hot representation if input is in thermometer encoding.
activation_probs = tf.nn.softmax(
inv_temp * (activation_logits * mask - 999999. * (1. - mask)))
if thermometer:
activation_probs = tf.cumsum(activation_probs, axis=-1, reverse=True)
logits_discretized = model(
projection_fn(discretization_utils.flatten_last(activation_probs)),
is_training=phase)
if i == 0 and y is None:
# Get one hot version from model predictions
y = tf.one_hot(
tf.argmax(logits_discretized, 1),
tf.shape(logits_discretized)[1])
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=y, logits=logits_discretized)
# compute the gradients wrt to current logits
grad, = tf.gradients(loss, activation_logits)
# Get the sign of the gradient
signed_grad = tf.sign(grad)
signed_grad = tf.stop_gradient(grad)
# Modify activation logits
activation_logits += attack_step * signed_grad
# Anneal temperature
inv_temp *= anneal_rate
# Convert from logits to actual one-hot image
final_al = activation_logits * mask - 999999. * (1. - mask)
bit_to_activate = tf.argmax(final_al, axis=-1)
one_hot = tf.one_hot(
bit_to_activate,
depth=levels,
on_value=1.,
off_value=0.,
dtype=tf.float32,
axis=-1)
# Convert into thermometer if we are doing thermometer encodings
inp = one_hot
if thermometer:
inp = discretization_utils.one_hot_to_thermometer(
one_hot, levels, flattened=False)
flattened_inp = discretization_utils.flatten_last(inp)
flattened_inp.mask = mask
flattened_inp = tf.stop_gradient(flattened_inp)
print("Attack set up.")
return flattened_inp
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import torch
import robustml
from robustml_model import Thermometer
import sys
import argparse
import numpy as np
from robustml_attack import LSPGDAttack, Attack
from active_tests.decision_boundary_binarization import interior_boundary_discrimination_attack, format_result
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--cifar-path', type=str, required=True,
help='path to the test_batch file from http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')
parser.add_argument('--debug', action='store_true')
parser.add_argument("--attack", default="adaptive", choices=("original", "adaptive", "modified", "modified2"))
parser.add_argument("--n-samples", default=512, type=int)
parser.add_argument("--n-boundary-points", default=49, type=int)
parser.add_argument("--n-inner-points", default=10, type=int)
parser.add_argument("--epsilon", default=8, type=int)
parser.add_argument("--decision-boundary-closeness", type=float, default=None)
parser.add_argument("--sample-from-corners", action="store_true")
args = parser.parse_args()
# set up TensorFlow session
sess = tf.Session()
# initialize a model
model = Thermometer(sess, epsilon=args.epsilon)
# initialize a data provider for CIFAR-10 images
provider = robustml.provider.CIFAR10(args.cifar_path)
random_indices = list(range(len(provider)))
np.random.shuffle(random_indices)
x_batch = []
y_batch = []
for j in range(args.n_samples):
x_, y_ = provider[random_indices[j]]
x_batch.append(x_)
y_batch.append(y_)
x_batch = np.array(x_batch).transpose((0, 3, 1, 2))
y_batch = np.array(y_batch)
from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper, PyTorchToTensorFlow1Wrapper
from utils import build_dataloader_from_arrays
test_loader = build_dataloader_from_arrays(x_batch, y_batch, batch_size=32)
def _model_forward_pass(x, features_and_logits: bool = False, features_only: bool = False):
if features_and_logits:
assert not features_only, "Only one of the flags must be set."
if features_and_logits:
return model.get_features_and_logits(x.transpose(0, 2, 3, 1))
elif features_only:
return model.get_features(x.transpose(0, 2, 3, 1))
else:
raise ValueError
feature_extractor = TensorFlow1ToPyTorchWrapper(
logit_forward_pass=_model_forward_pass,
logit_forward_and_backward_pass=lambda x: model.get_features_and_gradients(x.transpose(0, 2, 3, 1))
)
class ModelWrapper:
def __init__(self, model, weight_shape, bias_shape):
self.weight = tf.placeholder(dtype=tf.float32, shape=weight_shape)
self.bias = tf.placeholder(dtype=tf.float32, shape=bias_shape)
self.model = model
self.first = True
def __call__(self, x, **kwargs):
y = self.model(x, features_only=True, **kwargs)
logits = y @ tf.transpose(self.weight) + tf.reshape(self.bias, (1, -1))
return logits
def logits_and_predictions(self, x = None):
if x == None: assert not self.first
if self.first:
self.logits = self(x)
self.predictions = tf.argmax(self.logits, 1)
self.first = False
return self.logits, self.predictions
wrapped_model = ModelWrapper(model.model, (2, 640), (2,))
if args.attack == "adaptive":
attack = Attack(sess, wrapped_model, epsilon=model.threat_model.epsilon, batch_size=1, n_classes=2)
elif args.attack == "original":
attack = LSPGDAttack(sess, wrapped_model, epsilon=model.threat_model.epsilon, n_classes=2)
elif args.attack == "modified":
attack = LSPGDAttack(sess, wrapped_model, epsilon=model.threat_model.epsilon, num_steps=50, step_size=0.25, n_classes=2)
elif args.attack == "modified2":
attack = LSPGDAttack(sess, wrapped_model, epsilon=model.threat_model.epsilon, num_steps=100, step_size=0.1, n_classes=2)
else:
raise ValueError("invalid attack mode")
#@profile
def run_attack(m, l, epsilon):
linear_layer = m[-1]
del m
# initialize an attack (it's a white box attack, and it's allowed to look
# at the internals of the model in any way it wants)
# attack = BPDA(sess, model, epsilon=model.threat_model.epsilon, debug=args.debug)
# m = PyTorchToTensorFlow1Wrapper(m, "cpu")
weights_feed_dict = {
wrapped_model.weight: linear_layer.weight.data.numpy(),
wrapped_model.bias: linear_layer.bias.data.numpy()
}
for x, y in l:
x = x.numpy().transpose((0, 2, 3, 1))
y = y.numpy()
x_adv = attack.run(x, y, None, weights_feed_dict)
x_adv = x_adv * 255.0
if not args.attack in ("original", "modified", "modified2"):
# first encode the input, then classify it
x_adv = model.encode(x_adv)
logits, y_adv = model._sess.run(
wrapped_model.logits_and_predictions(model._model.x_input),
{
model._model.x_input: x_adv,
**weights_feed_dict
}
)
is_adv = (y_adv != y).mean()
return is_adv, (torch.Tensor(x_adv), torch.Tensor(logits))
from argparse_utils import DecisionBoundaryBinarizationSettings
scores_logit_differences_and_validation_accuracies = \
interior_boundary_discrimination_attack(
feature_extractor,
test_loader,
attack_fn=lambda m, l, kw: run_attack(m, l, args.epsilon/255.0),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=args.epsilon/255.0,
norm="linf",
lr=10000,
n_boundary_points=args.n_boundary_points,
n_inner_points=args.n_inner_points,
adversarial_attack_settings=None,
optimizer="sklearn"
),
n_samples=args.n_samples,
device="cpu",
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
decision_boundary_closeness=args.decision_boundary_closeness,
rescale_logits="adaptive",
sample_training_data_from_corners=args.sample_from_corners
#args.num_samples_test * 10
)
print(format_result(scores_logit_differences_and_validation_accuracies,
args.n_samples))
if __name__ == '__main__':
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import PIL
import PIL.Image
from io import BytesIO
import numpy as np
import tensorflow as tf
def defend_reduce(arr, depth=3):
arr = (arr * 255.0).astype(np.uint8)
shift = 8 - depth
arr = (arr >> shift) << shift
arr = arr.astype(np.float32)/255.0
return arr
batched_defend_reduce = defend_reduce
def defend_jpeg(input_array):
pil_image = PIL.Image.fromarray((input_array*255.0).astype(np.uint8))
f = BytesIO()
pil_image.save(f, format='jpeg', quality=75) # quality level specified in paper
jpeg_image = np.asarray(PIL.Image.open(f)).astype(np.float32)/255.0
return jpeg_image
def batched_defend_jpeg(xs):
return np.stack([defend_jpeg(x) for x in xs], 0)
# based on https://github.com/scikit-image/scikit-image/blob/master/skimage/restoration/_denoise_cy.pyx
# super slow since this is implemented in pure python :'(
def bregman(image, mask, weight, eps=1e-3, max_iter=100):
rows, cols, dims = image.shape
rows2 = rows + 2
cols2 = cols + 2
total = rows * cols * dims
shape_ext = (rows2, cols2, dims)
u = np.zeros(shape_ext)
dx = np.zeros(shape_ext)
dy = np.zeros(shape_ext)
bx = np.zeros(shape_ext)
by = np.zeros(shape_ext)
u[1:-1, 1:-1] = image
# reflect image
u[0, 1:-1] = image[1, :]
u[1:-1, 0] = image[:, 1]
u[-1, 1:-1] = image[-2, :]
u[1:-1, -1] = image[:, -2]
i = 0
rmse = np.inf
lam = 2 * weight
norm = (weight + 4 * lam)
while i < max_iter and rmse > eps:
rmse = 0
for k in range(dims):
for r in range(1, rows+1):
for c in range(1, cols+1):
uprev = u[r, c, k]
# forward derivatives
ux = u[r, c+1, k] - uprev
uy = u[r+1, c, k] - uprev
# Gauss-Seidel method
if mask[r-1, c-1]:
unew = (lam * (u[r+1, c, k] +
u[r-1, c, k] +
u[r, c+1, k] +
u[r, c-1, k] +
dx[r, c-1, k] -
dx[r, c, k] +
dy[r-1, c, k] -
dy[r, c, k] -
bx[r, c-1, k] +
bx[r, c, k] -
by[r-1, c, k] +
by[r, c, k]
) + weight * image[r-1, c-1, k]
) / norm
else:
# similar to the update step above, except we take
# lim_{weight->0} of the update step, effectively
# ignoring the l2 loss
unew = (u[r+1, c, k] +
u[r-1, c, k] +
u[r, c+1, k] +
u[r, c-1, k] +
dx[r, c-1, k] -
dx[r, c, k] +
dy[r-1, c, k] -
dy[r, c, k] -
bx[r, c-1, k] +
bx[r, c, k] -
by[r-1, c, k] +
by[r, c, k]
) / 4.0
u[r, c, k] = unew
# update rms error
rmse += (unew - uprev)**2
bxx = bx[r, c, k]
byy = by[r, c, k]
# d_subproblem
s = ux + bxx
if s > 1/lam:
dxx = s - 1/lam
elif s < -1/lam:
dxx = s + 1/lam
else:
dxx = 0
s = uy + byy
if s > 1/lam:
dyy = s - 1/lam
elif s < -1/lam:
dyy = s + 1/lam
else:
dyy = 0
dx[r, c, k] = dxx
dy[r, c, k] = dyy
bx[r, c, k] += ux - dxx
by[r, c, k] += uy - dyy
rmse = np.sqrt(rmse / total)
i += 1
return np.squeeze(np.asarray(u[1:-1, 1:-1]))
def defend_tv(input_array, keep_prob=0.5, lambda_tv=0.03):
mask = np.random.uniform(size=input_array.shape[:2])
mask = mask < keep_prob
return bregman(input_array, mask, weight=2.0/lambda_tv)
def batched_defend_tv(xs):
return np.stack([defend_tv(x) for x in xs], 0)
def make_defend_quilt(sess):
# setup for quilting
quilt_db = np.load('checkpoints/inputtransformations_inceptionv3/quilt_db.npy')
quilt_db_reshaped = quilt_db.reshape(1000000, -1)
TILE_SIZE = 5
TILE_OVERLAP = 2
tile_skip = TILE_SIZE - TILE_OVERLAP
K = 10
db_tensor = tf.placeholder(tf.float32, quilt_db_reshaped.shape)
query_imgs = tf.placeholder(tf.float32, (TILE_SIZE * TILE_SIZE * 3, None))
norms = tf.reduce_sum(tf.square(db_tensor), axis=1)[:, tf.newaxis] \
- 2*tf.matmul(db_tensor, query_imgs)
_, topk_indices = tf.nn.top_k(-tf.transpose(norms), k=K, sorted=False)
def min_error_table(arr, direction):
assert direction in ('horizontal', 'vertical')
y, x = arr.shape
cum = np.zeros_like(arr)
if direction == 'horizontal':
cum[:, -1] = arr[:, -1]
for ix in range(x-2, -1, -1):
for iy in range(y):
m = arr[iy, ix+1]
if iy > 0:
m = min(m, arr[iy-1, ix+1])
if iy < y - 1:
m = min(m, arr[iy+1, ix+1])
cum[iy, ix] = arr[iy, ix] + m
elif direction == 'vertical':
cum[-1, :] = arr[-1, :]
for iy in range(y-2, -1, -1):
for ix in range(x):
m = arr[iy+1, ix]
if ix > 0:
m = min(m, arr[iy+1, ix-1])
if ix < x - 1:
m = min(m, arr[iy+1, ix+1])
cum[iy, ix] = arr[iy, ix] + m
return cum
def index_exists(arr, index):
if arr.ndim != len(index):
return False
return all(i > 0 for i in index) and all(index[i] < arr.shape[i] for i in range(arr.ndim))
def assign_block(ix, iy, tile, synth):
posx = tile_skip * ix
posy = tile_skip * iy
if ix == 0 and iy == 0:
synth[posy:posy+TILE_SIZE, posx:posx+TILE_SIZE, :] = tile
elif iy == 0:
# first row, only have horizontal overlap of the block
tile_left = tile[:, :TILE_OVERLAP, :]
synth_right = synth[:TILE_SIZE, posx:posx+TILE_OVERLAP, :]
errors = np.sum(np.square(tile_left - synth_right), axis=2)
table = min_error_table(errors, direction='vertical')
# copy row by row into synth
xoff = np.argmin(table[0, :])
synth[posy, posx+xoff:posx+TILE_SIZE] = tile[0, xoff:]
for yoff in range(1, TILE_SIZE):
# explore nearby xoffs
candidates = [(yoff, xoff), (yoff, xoff-1), (yoff, xoff+1)]
index = min((i for i in candidates if index_exists(table, i)), key=lambda i: table[i])
xoff = index[1]
synth[posy+yoff, posx+xoff:posx+TILE_SIZE] = tile[yoff, xoff:]
elif ix == 0:
# first column, only have vertical overlap of the block
tile_up = tile[:TILE_OVERLAP, :, :]
synth_bottom = synth[posy:posy+TILE_OVERLAP, :TILE_SIZE, :]
errors = np.sum(np.square(tile_up - synth_bottom), axis=2)
table = min_error_table(errors, direction='horizontal')
# copy column by column into synth
yoff = np.argmin(table[:, 0])
synth[posy+yoff:posy+TILE_SIZE, posx] = tile[yoff:, 0]
for xoff in range(1, TILE_SIZE):
# explore nearby yoffs
candidates = [(yoff, xoff), (yoff-1, xoff), (yoff+1, xoff)]
index = min((i for i in candidates if index_exists(table, i)), key=lambda i: table[i])
yoff = index[0]
synth[posy+yoff:posy+TILE_SIZE, posx+xoff] = tile[yoff:, xoff]
else:
# glue cuts along diagonal
tile_up = tile[:TILE_OVERLAP, :, :]
synth_bottom = synth[posy:posy+TILE_OVERLAP, :TILE_SIZE, :]
errors_up = np.sum(np.square(tile_up - synth_bottom), axis=2)
table_up = min_error_table(errors_up, direction='horizontal')
tile_left = tile[:, :TILE_OVERLAP, :]
synth_right = synth[:TILE_SIZE, posx:posx+TILE_OVERLAP, :]
errors_left = np.sum(np.square(tile_left - synth_right), axis=2)
table_left = min_error_table(errors_left, direction='vertical')
glue_index = -1
glue_value = np.inf
for i in range(TILE_OVERLAP):
e = table_up[i, i] + table_left[i, i]
if e < glue_value:
glue_value = e
glue_index = i
# copy left part first, up to the overlap column
xoff = glue_index
synth[posy+glue_index, posx+xoff:posx+TILE_OVERLAP] = tile[glue_index, xoff:TILE_OVERLAP]
for yoff in range(glue_index+1, TILE_SIZE):
# explore nearby xoffs
candidates = [(yoff, xoff), (yoff, xoff-1), (yoff, xoff+1)]
index = min((i for i in candidates if index_exists(table_left, i)), key=lambda i: table_left[i])
xoff = index[1]
synth[posy+yoff, posx+xoff:posx+TILE_OVERLAP] = tile[yoff, xoff:TILE_OVERLAP]
# copy right part, down to overlap row
yoff = glue_index
synth[posy+yoff:posy+TILE_OVERLAP, posx+glue_index] = tile[yoff:TILE_OVERLAP, glue_index]
for xoff in range(glue_index+1, TILE_SIZE):
# explore nearby yoffs
candidates = [(yoff, xoff), (yoff-1, xoff), (yoff+1, xoff)]
index = min((i for i in candidates if index_exists(table_up, i)), key=lambda i: table_up[i])
yoff = index[0]
synth[posy+yoff:posy+TILE_OVERLAP, posx+xoff] = tile[yoff:TILE_OVERLAP, xoff]
# copy rest of image
synth[posy+TILE_OVERLAP:posy+TILE_SIZE, posx+TILE_OVERLAP:posx+TILE_SIZE] = tile[TILE_OVERLAP:, TILE_OVERLAP:]
KNN_MAX_BATCH = 1000
def quilt(arr, graphcut=True):
h, w, c = arr.shape
assert (h - TILE_SIZE) % tile_skip == 0
assert (w - TILE_SIZE) % tile_skip == 0
horiz_blocks = (w - TILE_SIZE) // tile_skip + 1
vert_blocks = (h - TILE_SIZE) // tile_skip + 1
num_patches = horiz_blocks * vert_blocks
patches = np.zeros((TILE_SIZE * TILE_SIZE * 3, num_patches))
idx = 0
for iy in range(vert_blocks):
for ix in range(horiz_blocks):
posx = tile_skip*ix
posy = tile_skip*iy
patches[:, idx] = arr[posy:posy+TILE_SIZE, posx:posx+TILE_SIZE, :].ravel()
idx += 1
ind = []
for chunk in range(num_patches // KNN_MAX_BATCH + (1 if num_patches % KNN_MAX_BATCH != 0 else 0)):
start = KNN_MAX_BATCH * chunk
end = start + KNN_MAX_BATCH
# for some reason, the code below is 10x slower when run in a Jupyter notebook
# not sure why...
indices_ = sess.run(topk_indices, {db_tensor: quilt_db_reshaped, query_imgs: patches[:, start:end]})
for i in indices_:
ind.append(np.random.choice(i))
synth = np.zeros((299, 299, 3))
idx = 0
for iy in range(vert_blocks):
for ix in range(horiz_blocks):
posx = tile_skip*ix
posy = tile_skip*iy
tile = quilt_db[ind[idx]]
if not graphcut:
synth[posy:posy+TILE_SIZE, posx:posx+TILE_SIZE, :] = tile
else:
assign_block(ix, iy, tile, synth)
idx += 1
return synth
return quilt
def batched_make_defend_jpeg(sess):
quilt = make_defend_quilt(sess)
def inner(xs, *args, **kwargs):
return np.stack([quilt(x, *args, **kwargs) for x in xs], 0)
return inner
# x is a square image (3-tensor)
def defend_crop(x, crop_size=90, ensemble_size=30):
x_size = tf.to_float(x.shape[1])
frac = crop_size/x_size
start_fraction_max = (x_size - crop_size)/x_size
def randomizing_crop(x):
start_x = tf.random_uniform((), 0, start_fraction_max)
start_y = tf.random_uniform((), 0, start_fraction_max)
return tf.image.crop_and_resize([x], boxes=[[start_y, start_x, start_y+frac, start_x+frac]],
box_ind=[0], crop_size=[crop_size, crop_size])
return tf.concat([randomizing_crop(x) for _ in range(ensemble_size)], axis=0) |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tensorflow as tf
import torch
import torchvision
tf.logging.set_verbosity(tf.logging.ERROR)
from robustml_model import InputTransformations
from robustml_attack import BPDA
import argparse
import numpy as np
import tensorflow as tf
import robustml
import torch.utils.data
import sys
import tqdm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--imagenet-path', type=str, required=True,
help='directory containing `val.txt` and `val/` folder')
parser.add_argument('--defense', type=str, required=True,
help='bitdepth | jpeg | crop | quilt | tv')
parser.add_argument('--n-samples', type=int, default=100)
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument("--epsilon", default=0.05, type=float)
parser.add_argument("--pgd-steps", type=int, default=100)
args = parser.parse_args()
# set up TensorFlow session
sess = tf.Session()
# initialize a model
model = InputTransformations(sess, args.defense)
# initialize an attack (it's a white box attack, and it's allowed to look
# at the internals of the model in any way it wants)
# XXX restore
# TODO: use the distance conversion from original code; I think there is a
# factor sqrt(3) missing here
attack = BPDA(sess, model, args.epsilon * 299, debug=False, max_steps=args.pgd_steps)
# initialize a data provider for ImageNet images
provider = robustml.provider.ImageNet(args.imagenet_path, model.dataset.shape)
dataset = torchvision.datasets.ImageFolder(
os.path.join(args.imagenet_path, 'val'),
torchvision.transforms.Compose([
torchvision.transforms.Resize(299),
torchvision.transforms.CenterCrop(299),
torchvision.transforms.ToTensor(),
]))
random_indices = list(range(len(provider)))
if args.n_samples == -1:
args.n_samples = len(random_indices)
np.random.shuffle(random_indices)
random_indices = random_indices[:args.n_samples]
dataset = torch.utils.data.Subset(dataset, random_indices)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=args.batch_size,
shuffle=False,
pin_memory=False)
success = 0
total = 0
for x_batch, y_batch in tqdm.tqdm(data_loader):
x_batch = x_batch.numpy().transpose((0, 2, 3, 1))
y_batch = y_batch.numpy()
total += len(x_batch)
x_batch_adv = attack.run(x_batch, y_batch, None)
y_batch_adv = model.classify(x_batch_adv)
# adv_acc = (y_batch_adv == y_batch).mean()
success += (y_batch_adv != y_batch).sum()
success_rate = success / total
print('attack success rate: %.2f%% (over %d data points)' % (success_rate*100, total))
if __name__ == '__main__':
main() |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import robustml
from defense import *
from inceptionv3 import model as inceptionv3_model
import tensorflow as tf
class InputTransformations(robustml.model.Model):
def __init__(self, sess, defense):
self._sess = sess
self._input = tf.placeholder(tf.float32, (None, 299, 299, 3))
#self._input_single = tf.placeholder(tf.float32, (299, 299, 3))
#input_expanded = tf.expand_dims(self._input, axis=0)
if defense == 'crop':
raise NotImplementedError("crop transformation not properly "
"implemented yet")
cropped_xs = defend_crop(self._input)
self._logits, _ = inceptionv3_model(sess, cropped_xs)
self._probs = tf.reduce_mean(tf.nn.softmax(self._logits), axis=0, keepdims=True)
else:
self._logits, _ = inceptionv3_model(sess, self._input)
self._probs = tf.nn.softmax(self._logits)
self._predictions = tf.argmax(self._probs, 1)
if defense == 'bitdepth':
self._defend = batched_defend_reduce
elif defense == 'jpeg':
self._defend = batched_defend_jpeg
elif defense == 'crop':
raise NotImplementedError("crop transformation not properly "
"implemented yet")
self._defend = lambda x: x # implemented as part of model so it's differentiable
elif defense == 'quilt':
self._defend = batched_make_defend_jpeg(sess)
elif defense == 'tv':
self._defend = batched_defend_tv
elif defense == None:
self._defend = lambda x: x
else:
raise ValueError('invalid defense: %s' % defense)
#self._dataset = robustml.dataset.ImageNet((299, 299, 3))
#self._threat_model = robustml.threat_model.L2(epsilon=0.05*299) # 0.05 * sqrt(299*299)
# TODO: I think there is a factor sqrt(3) missing here
@property
def dataset(self):
return self._dataset
@property
def threat_model(self):
return self._threat_model
def classify(self, x):
x_defended = self.defend(x)
return self._sess.run(self._predictions, {self._input: x_defended})
# expose internals for white box attacks
def defend(self, x):
return self._defend(x)
@property
def input(self):
return self._input
@property
def logits(self):
return self._logits
@property
def predictions(self):
return self._predictions
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import robustml
import sys
import tensorflow as tf
import numpy as np
class BPDA(robustml.attack.Attack):
def __init__(self, sess, model, epsilon, max_steps=1000, learning_rate=0.1, lam=1e-6, debug=False):
self._sess = sess
self._model = model
self._input = model.input
self._l2_input = tf.placeholder(tf.float32, self._input.shape, name="l2_input") # using BPDA, so we want this to pass the original adversarial example
self._original = tf.placeholder(tf.float32, self._input.shape, name="original")
self._label = tf.placeholder(tf.int32, (None,), name="label")
one_hot = tf.one_hot(self._label, 1000)
#ensemble_labels = tf.tile(one_hot, (model.logits.shape[0], 1))
self._l2 = tf.sqrt(2*tf.nn.l2_loss(self._l2_input - self._original))
self._xent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model.logits, labels=one_hot))
self._loss = lam * tf.maximum(self._l2 - epsilon, 0) + self._xent
self._grad, = tf.gradients(self._loss, self._input)
self._epsilon = epsilon
self._max_steps = max_steps
self._learning_rate = learning_rate
self._debug = debug
def run(self, x, y, target):
if target is not None:
raise NotImplementedError
adv = np.copy(x)
for i in range(self._max_steps):
adv_def = self._model.defend(adv)
p, ll2, lxent, g = self._sess.run(
[self._model.predictions, self._l2, self._xent, self._grad],
{self._input: adv_def, self._label: y, self._l2_input: adv, self._original: x}
)
if self._debug:
print(
'attack: step %d/%d, xent loss = %g, l2 loss = %g (max %g), (true %d, predicted %s)' % (
i+1,
self._max_steps,
lxent,
ll2,
self._epsilon,
y,
p
),
file=sys.stderr
)
is_adv = np.logical_and(y != p, ll2 < self._epsilon)
print(is_adv.sum())
if np.all(is_adv):
#if y not in p and ll2 < self._epsilon:
# we're done
#if self._debug:
print('returning early', file=sys.stderr)
break
g *= (~is_adv).astype(int).reshape(-1, 1, 1, 1)
adv += self._learning_rate * g
adv = np.clip(adv, 0, 1)
adv_l2 = np.sqrt(((adv - x)**2).sum((1, 2, 3), keepdims=True))
factor = self._epsilon / adv_l2
factor = np.minimum(factor, np.ones_like(factor))
diff = adv - x
adv = diff*factor + x
return adv
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.python.framework import ops
import numpy as np
import PIL.Image
from imagenet_labels import label_to_name
import matplotlib.pyplot as plt
def one_hot(index, total):
arr = np.zeros((total))
arr[index] = 1.0
return arr
def optimistic_restore(session, save_file):
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted([(var.name, var.name.split(':')[0]) for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes])
restore_vars = []
with tf.variable_scope('', reuse=True):
for var_name, saved_var_name in var_names:
curr_var = tf.get_variable(saved_var_name)
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
restore_vars.append(curr_var)
saver = tf.train.Saver(restore_vars)
saver.restore(session, save_file)
def load_image(path):
return (np.asarray(PIL.Image.open(path).resize((299, 299)))/255.0).astype(np.float32)
def make_classify(sess, input_, probs):
def classify(img, correct_class=None, target_class=None):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))
fig.sca(ax1)
p = sess.run(probs, feed_dict={input_: img})[0]
ax1.imshow(img)
fig.sca(ax1)
topk = list(p.argsort()[-10:][::-1])
topprobs = p[topk]
barlist = ax2.bar(range(10), topprobs)
if target_class in topk:
barlist[topk.index(target_class)].set_color('r')
if correct_class in topk:
barlist[topk.index(correct_class)].set_color('g')
plt.sca(ax2)
plt.ylim([0, 1.1])
plt.xticks(range(10),
[label_to_name(i)[:15] for i in topk],
rotation='vertical')
fig.subplots_adjust(bottom=0.2)
plt.show()
return classify |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from utils import optimistic_restore
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
import functools
INCEPTION_CHECKPOINT_PATH = 'checkpoints/inputtransformations_inceptionv3/inception_v3.ckpt'
def _get_model(reuse):
arg_scope = nets.inception.inception_v3_arg_scope(weight_decay=0.0)
func = nets.inception.inception_v3
@functools.wraps(func)
def network_fn(images):
with slim.arg_scope(arg_scope):
return func(images, 1001, is_training=False, reuse=reuse)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn
def _preprocess(image, height, width, scope=None):
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
# input is [batch, ?, ?, 3], pixels in [0, 1]
# it's rescaled to [batch, 299, 299, 3] and shifted to [-1, 1]
# output is [batch, 1000] (imagenet classes)
_inception_initialized = False
def model(sess, image):
global _inception_initialized
network_fn = _get_model(reuse=_inception_initialized)
size = network_fn.default_image_size
preprocessed = _preprocess(image, size, size)
logits, _ = network_fn(preprocessed)
logits = logits[:,1:] # ignore background class
predictions = tf.argmax(logits, 1)
if not _inception_initialized:
optimistic_restore(sess, INCEPTION_CHECKPOINT_PATH)
_inception_initialized = True
return logits, predictions
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_lut = [
'tench, Tinca tinca',
'goldfish, Carassius auratus',
'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
'tiger shark, Galeocerdo cuvieri',
'hammerhead, hammerhead shark',
'electric ray, crampfish, numbfish, torpedo',
'stingray',
'cock',
'hen',
'ostrich, Struthio camelus',
'brambling, Fringilla montifringilla',
'goldfinch, Carduelis carduelis',
'house finch, linnet, Carpodacus mexicanus',
'junco, snowbird',
'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
'robin, American robin, Turdus migratorius',
'bulbul',
'jay',
'magpie',
'chickadee',
'water ouzel, dipper',
'kite',
'bald eagle, American eagle, Haliaeetus leucocephalus',
'vulture',
'great grey owl, great gray owl, Strix nebulosa',
'European fire salamander, Salamandra salamandra',
'common newt, Triturus vulgaris',
'eft',
'spotted salamander, Ambystoma maculatum',
'axolotl, mud puppy, Ambystoma mexicanum',
'bullfrog, Rana catesbeiana',
'tree frog, tree-frog',
'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
'loggerhead, loggerhead turtle, Caretta caretta',
'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
'mud turtle',
'terrapin',
'box turtle, box tortoise',
'banded gecko',
'common iguana, iguana, Iguana iguana',
'American chameleon, anole, Anolis carolinensis',
'whiptail, whiptail lizard',
'agama',
'frilled lizard, Chlamydosaurus kingi',
'alligator lizard',
'Gila monster, Heloderma suspectum',
'green lizard, Lacerta viridis',
'African chameleon, Chamaeleo chamaeleon',
'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
'African crocodile, Nile crocodile, Crocodylus niloticus',
'American alligator, Alligator mississipiensis',
'triceratops',
'thunder snake, worm snake, Carphophis amoenus',
'ringneck snake, ring-necked snake, ring snake',
'hognose snake, puff adder, sand viper',
'green snake, grass snake',
'king snake, kingsnake',
'garter snake, grass snake',
'water snake',
'vine snake',
'night snake, Hypsiglena torquata',
'boa constrictor, Constrictor constrictor',
'rock python, rock snake, Python sebae',
'Indian cobra, Naja naja',
'green mamba',
'sea snake',
'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
'diamondback, diamondback rattlesnake, Crotalus adamanteus',
'sidewinder, horned rattlesnake, Crotalus cerastes',
'trilobite',
'harvestman, daddy longlegs, Phalangium opilio',
'scorpion',
'black and gold garden spider, Argiope aurantia',
'barn spider, Araneus cavaticus',
'garden spider, Aranea diademata',
'black widow, Latrodectus mactans',
'tarantula',
'wolf spider, hunting spider',
'tick',
'centipede',
'black grouse',
'ptarmigan',
'ruffed grouse, partridge, Bonasa umbellus',
'prairie chicken, prairie grouse, prairie fowl',
'peacock',
'quail',
'partridge',
'African grey, African gray, Psittacus erithacus',
'macaw',
'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
'lorikeet',
'coucal',
'bee eater',
'hornbill',
'hummingbird',
'jacamar',
'toucan',
'drake',
'red-breasted merganser, Mergus serrator',
'goose',
'black swan, Cygnus atratus',
'tusker',
'echidna, spiny anteater, anteater',
'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
'wallaby, brush kangaroo',
'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
'wombat',
'jellyfish',
'sea anemone, anemone',
'brain coral',
'flatworm, platyhelminth',
'nematode, nematode worm, roundworm',
'conch',
'snail',
'slug',
'sea slug, nudibranch',
'chiton, coat-of-mail shell, sea cradle, polyplacophore',
'chambered nautilus, pearly nautilus, nautilus',
'Dungeness crab, Cancer magister',
'rock crab, Cancer irroratus',
'fiddler crab',
'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
'American lobster, Northern lobster, Maine lobster, Homarus americanus',
'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
'crayfish, crawfish, crawdad, crawdaddy',
'hermit crab',
'isopod',
'white stork, Ciconia ciconia',
'black stork, Ciconia nigra',
'spoonbill',
'flamingo',
'little blue heron, Egretta caerulea',
'American egret, great white heron, Egretta albus',
'bittern',
'crane',
'limpkin, Aramus pictus',
'European gallinule, Porphyrio porphyrio',
'American coot, marsh hen, mud hen, water hen, Fulica americana',
'bustard',
'ruddy turnstone, Arenaria interpres',
'red-backed sandpiper, dunlin, Erolia alpina',
'redshank, Tringa totanus',
'dowitcher',
'oystercatcher, oyster catcher',
'pelican',
'king penguin, Aptenodytes patagonica',
'albatross, mollymawk',
'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
'dugong, Dugong dugon',
'sea lion',
'Chihuahua',
'Japanese spaniel',
'Maltese dog, Maltese terrier, Maltese',
'Pekinese, Pekingese, Peke',
'Shih-Tzu',
'Blenheim spaniel',
'papillon',
'toy terrier',
'Rhodesian ridgeback',
'Afghan hound, Afghan',
'basset, basset hound',
'beagle',
'bloodhound, sleuthhound',
'bluetick',
'black-and-tan coonhound',
'Walker hound, Walker foxhound',
'English foxhound',
'redbone',
'borzoi, Russian wolfhound',
'Irish wolfhound',
'Italian greyhound',
'whippet',
'Ibizan hound, Ibizan Podenco',
'Norwegian elkhound, elkhound',
'otterhound, otter hound',
'Saluki, gazelle hound',
'Scottish deerhound, deerhound',
'Weimaraner',
'Staffordshire bullterrier, Staffordshire bull terrier',
'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
'Bedlington terrier',
'Border terrier',
'Kerry blue terrier',
'Irish terrier',
'Norfolk terrier',
'Norwich terrier',
'Yorkshire terrier',
'wire-haired fox terrier',
'Lakeland terrier',
'Sealyham terrier, Sealyham',
'Airedale, Airedale terrier',
'cairn, cairn terrier',
'Australian terrier',
'Dandie Dinmont, Dandie Dinmont terrier',
'Boston bull, Boston terrier',
'miniature schnauzer',
'giant schnauzer',
'standard schnauzer',
'Scotch terrier, Scottish terrier, Scottie',
'Tibetan terrier, chrysanthemum dog',
'silky terrier, Sydney silky',
'soft-coated wheaten terrier',
'West Highland white terrier',
'Lhasa, Lhasa apso',
'flat-coated retriever',
'curly-coated retriever',
'golden retriever',
'Labrador retriever',
'Chesapeake Bay retriever',
'German short-haired pointer',
'vizsla, Hungarian pointer',
'English setter',
'Irish setter, red setter',
'Gordon setter',
'Brittany spaniel',
'clumber, clumber spaniel',
'English springer, English springer spaniel',
'Welsh springer spaniel',
'cocker spaniel, English cocker spaniel, cocker',
'Sussex spaniel',
'Irish water spaniel',
'kuvasz',
'schipperke',
'groenendael',
'malinois',
'briard',
'kelpie',
'komondor',
'Old English sheepdog, bobtail',
'Shetland sheepdog, Shetland sheep dog, Shetland',
'collie',
'Border collie',
'Bouvier des Flandres, Bouviers des Flandres',
'Rottweiler',
'German shepherd, German shepherd dog, German police dog, alsatian',
'Doberman, Doberman pinscher',
'miniature pinscher',
'Greater Swiss Mountain dog',
'Bernese mountain dog',
'Appenzeller',
'EntleBucher',
'boxer',
'bull mastiff',
'Tibetan mastiff',
'French bulldog',
'Great Dane',
'Saint Bernard, St Bernard',
'Eskimo dog, husky',
'malamute, malemute, Alaskan malamute',
'Siberian husky',
'dalmatian, coach dog, carriage dog',
'affenpinscher, monkey pinscher, monkey dog',
'basenji',
'pug, pug-dog',
'Leonberg',
'Newfoundland, Newfoundland dog',
'Great Pyrenees',
'Samoyed, Samoyede',
'Pomeranian',
'chow, chow chow',
'keeshond',
'Brabancon griffon',
'Pembroke, Pembroke Welsh corgi',
'Cardigan, Cardigan Welsh corgi',
'toy poodle',
'miniature poodle',
'standard poodle',
'Mexican hairless',
'timber wolf, grey wolf, gray wolf, Canis lupus',
'white wolf, Arctic wolf, Canis lupus tundrarum',
'red wolf, maned wolf, Canis rufus, Canis niger',
'coyote, prairie wolf, brush wolf, Canis latrans',
'dingo, warrigal, warragal, Canis dingo',
'dhole, Cuon alpinus',
'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
'hyena, hyaena',
'red fox, Vulpes vulpes',
'kit fox, Vulpes macrotis',
'Arctic fox, white fox, Alopex lagopus',
'grey fox, gray fox, Urocyon cinereoargenteus',
'tabby, tabby cat',
'tiger cat',
'Persian cat',
'Siamese cat, Siamese',
'Egyptian cat',
'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
'lynx, catamount',
'leopard, Panthera pardus',
'snow leopard, ounce, Panthera uncia',
'jaguar, panther, Panthera onca, Felis onca',
'lion, king of beasts, Panthera leo',
'tiger, Panthera tigris',
'cheetah, chetah, Acinonyx jubatus',
'brown bear, bruin, Ursus arctos',
'American black bear, black bear, Ursus americanus, Euarctos americanus',
'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
'sloth bear, Melursus ursinus, Ursus ursinus',
'mongoose',
'meerkat, mierkat',
'tiger beetle',
'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
'ground beetle, carabid beetle',
'long-horned beetle, longicorn, longicorn beetle',
'leaf beetle, chrysomelid',
'dung beetle',
'rhinoceros beetle',
'weevil',
'fly',
'bee',
'ant, emmet, pismire',
'grasshopper, hopper',
'cricket',
'walking stick, walkingstick, stick insect',
'cockroach, roach',
'mantis, mantid',
'cicada, cicala',
'leafhopper',
'lacewing, lacewing fly',
"dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
'damselfly',
'admiral',
'ringlet, ringlet butterfly',
'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
'cabbage butterfly',
'sulphur butterfly, sulfur butterfly',
'lycaenid, lycaenid butterfly',
'starfish, sea star',
'sea urchin',
'sea cucumber, holothurian',
'wood rabbit, cottontail, cottontail rabbit',
'hare',
'Angora, Angora rabbit',
'hamster',
'porcupine, hedgehog',
'fox squirrel, eastern fox squirrel, Sciurus niger',
'marmot',
'beaver',
'guinea pig, Cavia cobaya',
'sorrel',
'zebra',
'hog, pig, grunter, squealer, Sus scrofa',
'wild boar, boar, Sus scrofa',
'warthog',
'hippopotamus, hippo, river horse, Hippopotamus amphibius',
'ox',
'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
'bison',
'ram, tup',
'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
'ibex, Capra ibex',
'hartebeest',
'impala, Aepyceros melampus',
'gazelle',
'Arabian camel, dromedary, Camelus dromedarius',
'llama',
'weasel',
'mink',
'polecat, fitch, foulmart, foumart, Mustela putorius',
'black-footed ferret, ferret, Mustela nigripes',
'otter',
'skunk, polecat, wood pussy',
'badger',
'armadillo',
'three-toed sloth, ai, Bradypus tridactylus',
'orangutan, orang, orangutang, Pongo pygmaeus',
'gorilla, Gorilla gorilla',
'chimpanzee, chimp, Pan troglodytes',
'gibbon, Hylobates lar',
'siamang, Hylobates syndactylus, Symphalangus syndactylus',
'guenon, guenon monkey',
'patas, hussar monkey, Erythrocebus patas',
'baboon',
'macaque',
'langur',
'colobus, colobus monkey',
'proboscis monkey, Nasalis larvatus',
'marmoset',
'capuchin, ringtail, Cebus capucinus',
'howler monkey, howler',
'titi, titi monkey',
'spider monkey, Ateles geoffroyi',
'squirrel monkey, Saimiri sciureus',
'Madagascar cat, ring-tailed lemur, Lemur catta',
'indri, indris, Indri indri, Indri brevicaudatus',
'Indian elephant, Elephas maximus',
'African elephant, Loxodonta africana',
'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
'barracouta, snoek',
'eel',
'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
'rock beauty, Holocanthus tricolor',
'anemone fish',
'sturgeon',
'gar, garfish, garpike, billfish, Lepisosteus osseus',
'lionfish',
'puffer, pufferfish, blowfish, globefish',
'abacus',
'abaya',
"academic gown, academic robe, judge's robe",
'accordion, piano accordion, squeeze box',
'acoustic guitar',
'aircraft carrier, carrier, flattop, attack aircraft carrier',
'airliner',
'airship, dirigible',
'altar',
'ambulance',
'amphibian, amphibious vehicle',
'analog clock',
'apiary, bee house',
'apron',
'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
'assault rifle, assault gun',
'backpack, back pack, knapsack, packsack, rucksack, haversack',
'bakery, bakeshop, bakehouse',
'balance beam, beam',
'balloon',
'ballpoint, ballpoint pen, ballpen, Biro',
'Band Aid',
'banjo',
'bannister, banister, balustrade, balusters, handrail',
'barbell',
'barber chair',
'barbershop',
'barn',
'barometer',
'barrel, cask',
'barrow, garden cart, lawn cart, wheelbarrow',
'baseball',
'basketball',
'bassinet',
'bassoon',
'bathing cap, swimming cap',
'bath towel',
'bathtub, bathing tub, bath, tub',
'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
'beacon, lighthouse, beacon light, pharos',
'beaker',
'bearskin, busby, shako',
'beer bottle',
'beer glass',
'bell cote, bell cot',
'bib',
'bicycle-built-for-two, tandem bicycle, tandem',
'bikini, two-piece',
'binder, ring-binder',
'binoculars, field glasses, opera glasses',
'birdhouse',
'boathouse',
'bobsled, bobsleigh, bob',
'bolo tie, bolo, bola tie, bola',
'bonnet, poke bonnet',
'bookcase',
'bookshop, bookstore, bookstall',
'bottlecap',
'bow',
'bow tie, bow-tie, bowtie',
'brass, memorial tablet, plaque',
'brassiere, bra, bandeau',
'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
'breastplate, aegis, egis',
'broom',
'bucket, pail',
'buckle',
'bulletproof vest',
'bullet train, bullet',
'butcher shop, meat market',
'cab, hack, taxi, taxicab',
'caldron, cauldron',
'candle, taper, wax light',
'cannon',
'canoe',
'can opener, tin opener',
'cardigan',
'car mirror',
'carousel, carrousel, merry-go-round, roundabout, whirligig',
"carpenter's kit, tool kit",
'carton',
'car wheel',
'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
'cassette',
'cassette player',
'castle',
'catamaran',
'CD player',
'cello, violoncello',
'cellular telephone, cellular phone, cellphone, cell, mobile phone',
'chain',
'chainlink fence',
'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
'chain saw, chainsaw',
'chest',
'chiffonier, commode',
'chime, bell, gong',
'china cabinet, china closet',
'Christmas stocking',
'church, church building',
'cinema, movie theater, movie theatre, movie house, picture palace',
'cleaver, meat cleaver, chopper',
'cliff dwelling',
'cloak',
'clog, geta, patten, sabot',
'cocktail shaker',
'coffee mug',
'coffeepot',
'coil, spiral, volute, whorl, helix',
'combination lock',
'computer keyboard, keypad',
'confectionery, confectionary, candy store',
'container ship, containership, container vessel',
'convertible',
'corkscrew, bottle screw',
'cornet, horn, trumpet, trump',
'cowboy boot',
'cowboy hat, ten-gallon hat',
'cradle',
'crane',
'crash helmet',
'crate',
'crib, cot',
'Crock Pot',
'croquet ball',
'crutch',
'cuirass',
'dam, dike, dyke',
'desk',
'desktop computer',
'dial telephone, dial phone',
'diaper, nappy, napkin',
'digital clock',
'digital watch',
'dining table, board',
'dishrag, dishcloth',
'dishwasher, dish washer, dishwashing machine',
'disk brake, disc brake',
'dock, dockage, docking facility',
'dogsled, dog sled, dog sleigh',
'dome',
'doormat, welcome mat',
'drilling platform, offshore rig',
'drum, membranophone, tympan',
'drumstick',
'dumbbell',
'Dutch oven',
'electric fan, blower',
'electric guitar',
'electric locomotive',
'entertainment center',
'envelope',
'espresso maker',
'face powder',
'feather boa, boa',
'file, file cabinet, filing cabinet',
'fireboat',
'fire engine, fire truck',
'fire screen, fireguard',
'flagpole, flagstaff',
'flute, transverse flute',
'folding chair',
'football helmet',
'forklift',
'fountain',
'fountain pen',
'four-poster',
'freight car',
'French horn, horn',
'frying pan, frypan, skillet',
'fur coat',
'garbage truck, dustcart',
'gasmask, respirator, gas helmet',
'gas pump, gasoline pump, petrol pump, island dispenser',
'goblet',
'go-kart',
'golf ball',
'golfcart, golf cart',
'gondola',
'gong, tam-tam',
'gown',
'grand piano, grand',
'greenhouse, nursery, glasshouse',
'grille, radiator grille',
'grocery store, grocery, food market, market',
'guillotine',
'hair slide',
'hair spray',
'half track',
'hammer',
'hamper',
'hand blower, blow dryer, blow drier, hair dryer, hair drier',
'hand-held computer, hand-held microcomputer',
'handkerchief, hankie, hanky, hankey',
'hard disc, hard disk, fixed disk',
'harmonica, mouth organ, harp, mouth harp',
'harp',
'harvester, reaper',
'hatchet',
'holster',
'home theater, home theatre',
'honeycomb',
'hook, claw',
'hoopskirt, crinoline',
'horizontal bar, high bar',
'horse cart, horse-cart',
'hourglass',
'iPod',
'iron, smoothing iron',
"jack-o'-lantern",
'jean, blue jean, denim',
'jeep, landrover',
'jersey, T-shirt, tee shirt',
'jigsaw puzzle',
'jinrikisha, ricksha, rickshaw',
'joystick',
'kimono',
'knee pad',
'knot',
'lab coat, laboratory coat',
'ladle',
'lampshade, lamp shade',
'laptop, laptop computer',
'lawn mower, mower',
'lens cap, lens cover',
'letter opener, paper knife, paperknife',
'library',
'lifeboat',
'lighter, light, igniter, ignitor',
'limousine, limo',
'liner, ocean liner',
'lipstick, lip rouge',
'Loafer',
'lotion',
'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
"loupe, jeweler's loupe",
'lumbermill, sawmill',
'magnetic compass',
'mailbag, postbag',
'mailbox, letter box',
'maillot',
'maillot, tank suit',
'manhole cover',
'maraca',
'marimba, xylophone',
'mask',
'matchstick',
'maypole',
'maze, labyrinth',
'measuring cup',
'medicine chest, medicine cabinet',
'megalith, megalithic structure',
'microphone, mike',
'microwave, microwave oven',
'military uniform',
'milk can',
'minibus',
'miniskirt, mini',
'minivan',
'missile',
'mitten',
'mixing bowl',
'mobile home, manufactured home',
'Model T',
'modem',
'monastery',
'monitor',
'moped',
'mortar',
'mortarboard',
'mosque',
'mosquito net',
'motor scooter, scooter',
'mountain bike, all-terrain bike, off-roader',
'mountain tent',
'mouse, computer mouse',
'mousetrap',
'moving van',
'muzzle',
'nail',
'neck brace',
'necklace',
'nipple',
'notebook, notebook computer',
'obelisk',
'oboe, hautboy, hautbois',
'ocarina, sweet potato',
'odometer, hodometer, mileometer, milometer',
'oil filter',
'organ, pipe organ',
'oscilloscope, scope, cathode-ray oscilloscope, CRO',
'overskirt',
'oxcart',
'oxygen mask',
'packet',
'paddle, boat paddle',
'paddlewheel, paddle wheel',
'padlock',
'paintbrush',
"pajama, pyjama, pj's, jammies",
'palace',
'panpipe, pandean pipe, syrinx',
'paper towel',
'parachute, chute',
'parallel bars, bars',
'park bench',
'parking meter',
'passenger car, coach, carriage',
'patio, terrace',
'pay-phone, pay-station',
'pedestal, plinth, footstall',
'pencil box, pencil case',
'pencil sharpener',
'perfume, essence',
'Petri dish',
'photocopier',
'pick, plectrum, plectron',
'pickelhaube',
'picket fence, paling',
'pickup, pickup truck',
'pier',
'piggy bank, penny bank',
'pill bottle',
'pillow',
'ping-pong ball',
'pinwheel',
'pirate, pirate ship',
'pitcher, ewer',
"plane, carpenter's plane, woodworking plane",
'planetarium',
'plastic bag',
'plate rack',
'plow, plough',
"plunger, plumber's helper",
'Polaroid camera, Polaroid Land camera',
'pole',
'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
'poncho',
'pool table, billiard table, snooker table',
'pop bottle, soda bottle',
'pot, flowerpot',
"potter's wheel",
'power drill',
'prayer rug, prayer mat',
'printer',
'prison, prison house',
'projectile, missile',
'projector',
'puck, hockey puck',
'punching bag, punch bag, punching ball, punchball',
'purse',
'quill, quill pen',
'quilt, comforter, comfort, puff',
'racer, race car, racing car',
'racket, racquet',
'radiator',
'radio, wireless',
'radio telescope, radio reflector',
'rain barrel',
'recreational vehicle, RV, R.V.',
'reel',
'reflex camera',
'refrigerator, icebox',
'remote control, remote',
'restaurant, eating house, eating place, eatery',
'revolver, six-gun, six-shooter',
'rifle',
'rocking chair, rocker',
'rotisserie',
'rubber eraser, rubber, pencil eraser',
'rugby ball',
'rule, ruler',
'running shoe',
'safe',
'safety pin',
'saltshaker, salt shaker',
'sandal',
'sarong',
'sax, saxophone',
'scabbard',
'scale, weighing machine',
'school bus',
'schooner',
'scoreboard',
'screen, CRT screen',
'screw',
'screwdriver',
'seat belt, seatbelt',
'sewing machine',
'shield, buckler',
'shoe shop, shoe-shop, shoe store',
'shoji',
'shopping basket',
'shopping cart',
'shovel',
'shower cap',
'shower curtain',
'ski',
'ski mask',
'sleeping bag',
'slide rule, slipstick',
'sliding door',
'slot, one-armed bandit',
'snorkel',
'snowmobile',
'snowplow, snowplough',
'soap dispenser',
'soccer ball',
'sock',
'solar dish, solar collector, solar furnace',
'sombrero',
'soup bowl',
'space bar',
'space heater',
'space shuttle',
'spatula',
'speedboat',
"spider web, spider's web",
'spindle',
'sports car, sport car',
'spotlight, spot',
'stage',
'steam locomotive',
'steel arch bridge',
'steel drum',
'stethoscope',
'stole',
'stone wall',
'stopwatch, stop watch',
'stove',
'strainer',
'streetcar, tram, tramcar, trolley, trolley car',
'stretcher',
'studio couch, day bed',
'stupa, tope',
'submarine, pigboat, sub, U-boat',
'suit, suit of clothes',
'sundial',
'sunglass',
'sunglasses, dark glasses, shades',
'sunscreen, sunblock, sun blocker',
'suspension bridge',
'swab, swob, mop',
'sweatshirt',
'swimming trunks, bathing trunks',
'swing',
'switch, electric switch, electrical switch',
'syringe',
'table lamp',
'tank, army tank, armored combat vehicle, armoured combat vehicle',
'tape player',
'teapot',
'teddy, teddy bear',
'television, television system',
'tennis ball',
'thatch, thatched roof',
'theater curtain, theatre curtain',
'thimble',
'thresher, thrasher, threshing machine',
'throne',
'tile roof',
'toaster',
'tobacco shop, tobacconist shop, tobacconist',
'toilet seat',
'torch',
'totem pole',
'tow truck, tow car, wrecker',
'toyshop',
'tractor',
'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
'tray',
'trench coat',
'tricycle, trike, velocipede',
'trimaran',
'tripod',
'triumphal arch',
'trolleybus, trolley coach, trackless trolley',
'trombone',
'tub, vat',
'turnstile',
'typewriter keyboard',
'umbrella',
'unicycle, monocycle',
'upright, upright piano',
'vacuum, vacuum cleaner',
'vase',
'vault',
'velvet',
'vending machine',
'vestment',
'viaduct',
'violin, fiddle',
'volleyball',
'waffle iron',
'wall clock',
'wallet, billfold, notecase, pocketbook',
'wardrobe, closet, press',
'warplane, military plane',
'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
'washer, automatic washer, washing machine',
'water bottle',
'water jug',
'water tower',
'whiskey jug',
'whistle',
'wig',
'window screen',
'window shade',
'Windsor tie',
'wine bottle',
'wing',
'wok',
'wooden spoon',
'wool, woolen, woollen',
'worm fence, snake fence, snake-rail fence, Virginia fence',
'wreck',
'yawl',
'yurt',
'web site, website, internet site, site',
'comic book',
'crossword puzzle, crossword',
'street sign',
'traffic light, traffic signal, stoplight',
'book jacket, dust cover, dust jacket, dust wrapper',
'menu',
'plate',
'guacamole',
'consomme',
'hot pot, hotpot',
'trifle',
'ice cream, icecream',
'ice lolly, lolly, lollipop, popsicle',
'French loaf',
'bagel, beigel',
'pretzel',
'cheeseburger',
'hotdog, hot dog, red hot',
'mashed potato',
'head cabbage',
'broccoli',
'cauliflower',
'zucchini, courgette',
'spaghetti squash',
'acorn squash',
'butternut squash',
'cucumber, cuke',
'artichoke, globe artichoke',
'bell pepper',
'cardoon',
'mushroom',
'Granny Smith',
'strawberry',
'orange',
'lemon',
'fig',
'pineapple, ananas',
'banana',
'jackfruit, jak, jack',
'custard apple',
'pomegranate',
'hay',
'carbonara',
'chocolate sauce, chocolate syrup',
'dough',
'meat loaf, meatloaf',
'pizza, pizza pie',
'potpie',
'burrito',
'red wine',
'espresso',
'cup',
'eggnog',
'alp',
'bubble',
'cliff, drop, drop-off',
'coral reef',
'geyser',
'lakeside, lakeshore',
'promontory, headland, head, foreland',
'sandbar, sand bar',
'seashore, coast, seacoast, sea-coast',
'valley, vale',
'volcano',
'ballplayer, baseball player',
'groom, bridegroom',
'scuba diver',
'rapeseed',
'daisy',
"yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
'corn',
'acorn',
'hip, rose hip, rosehip',
'buckeye, horse chestnut, conker',
'coral fungus',
'agaric',
'gyromitra',
'stinkhorn, carrion fungus',
'earthstar',
'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
'bolete',
'ear, spike, capitulum',
'toilet tissue, toilet paper, bathroom tissue'
]
def label_to_name(label):
global _lut
return _lut[label]
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import PIL
import PIL.Image
import numpy as np
import sys
SAMPLES = 1000000
DIM = 5
RESIZE = True
RESIZE_DIM = 300
OUTPUT_FILE = 'quilt_db.npy'
def main(argv):
imagenet_train_dir = argv[1]
assert SAMPLES % 1000 == 0
db = np.zeros((SAMPLES, DIM, DIM, 3), dtype=np.float32)
idx = 0
files = []
for d in os.listdir(imagenet_train_dir):
d = os.path.join(imagenet_train_dir, d)
files.extend(os.path.join(d, i) for i in os.listdir(d) if i.endswith('.JPEG'))
for f in random.sample(files, SAMPLES):
img = load_image(f)
h, w, _ = img.shape
h_start = random.randint(0, h - DIM)
w_start = random.randint(0, w - DIM)
crop = img[h_start:h_start+DIM, w_start:w_start+DIM, :]
db[idx, :, :, :] = crop
idx += 1
if idx % 100 == 0:
print('%.2f%% done' % (100 * (float(idx) / SAMPLES)))
np.save(OUTPUT_FILE, db)
def load_image(path):
image = PIL.Image.open(path)
if RESIZE:
if image.height > image.width:
image = image.resize((int(float(image.width) / image.height * RESIZE_DIM), RESIZE_DIM))
elif image.width > image.height:
image = image.resize((RESIZE_DIM, int(float(image.height) / image.width * RESIZE_DIM)))
img = np.asarray(image).astype(np.float32) / 255.0
if img.ndim == 2:
img = np.repeat(img[:,:,np.newaxis], repeats=3, axis=2)
if img.shape[2] == 4:
# alpha channel
img = img[:,:,:3]
return img
if __name__ == '__main__':
main(sys.argv)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Wed Jan 23 10:15:27 2019
@author: aamir-mustafa
Implementation Part 2 of Paper:
"Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks"
Here it is not necessary to save the best performing model (in terms of accuracy). The model with high robustness
against adversarial attacks is chosen.
This coe implements Adversarial Training using FGSM Attack.
"""
#Essential Imports
import os
import sys
import argparse
import datetime
import time
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from pcl_utils import AverageMeter, Logger
from proximity import Proximity
from contrastive_proximity import Con_Proximity
from resnet_model import * # Imports the ResNet Model
parser = argparse.ArgumentParser("Prototype Conformity Loss Implementation")
parser.add_argument('-j', '--workers', default=4, type=int,
help="number of data loading workers (default: 4)")
parser.add_argument('--train-batch', default=64, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=100, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--schedule', type=int, nargs='+', default=[142, 230, 360],
help='Decrease learning rate at these epochs.')
parser.add_argument('--lr_model', type=float, default=0.01, help="learning rate for model")
parser.add_argument('--lr_prox', type=float, default=0.5, help="learning rate for Proximity Loss") # as per paper
parser.add_argument('--weight-prox', type=float, default=1, help="weight for Proximity Loss") # as per paper
parser.add_argument('--lr_conprox', type=float, default=0.00001, help="learning rate for Con-Proximity Loss") # as per paper
parser.add_argument('--weight-conprox', type=float, default=0.00001, help="weight for Con-Proximity Loss") # as per paper
parser.add_argument('--max-epoch', type=int, default=500)
parser.add_argument('--gamma', type=float, default=0.1, help="learning rate decay")
parser.add_argument('--eval-freq', type=int, default=10)
parser.add_argument('--print-freq', type=int, default=50)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--use-cpu', action='store_true')
parser.add_argument('--save-dir', type=str, default='log')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
def normalize(t):
t[:, 0, :, :] = (t[:, 0, :, :] - mean[0])/std[0]
t[:, 1, :, :] = (t[:, 1, :, :] - mean[1])/std[1]
t[:, 2, :, :] = (t[:, 2, :, :] - mean[2])/std[2]
return t
def un_normalize(t):
t[:, 0, :, :] = (t[:, 0, :, :] * std[0]) + mean[0]
t[:, 1, :, :] = (t[:, 1, :, :] * std[1]) + mean[1]
t[:, 2, :, :] = (t[:, 2, :, :] * std[2]) + mean[2]
return t
def FGSM(model, criterion, img, label, eps):
adv = img.clone()
adv.requires_grad = True
_,_,_, out= model(adv)
loss = criterion(out, label)
loss.backward()
adv.data = un_normalize(adv.data) + eps * adv.grad.sign()
adv.data.clamp_(0.0, 1.0)
adv.grad.data.zero_()
return adv.detach()
def main():
torch.manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
sys.stdout = Logger(osp.join(args.save_dir, 'log_' + 'CIFAR-10_PC_Loss_FGSM_AdvTrain' + '.txt'))
if use_gpu:
print("Currently using GPU: {}".format(args.gpu))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU")
# Data Load
num_classes=10
print('==> Preparing dataset')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
trainset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch, pin_memory=True,
shuffle=True, num_workers=args.workers)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch, pin_memory=True,
shuffle=False, num_workers=args.workers)
# Loading the Model
model = resnet(num_classes=num_classes,depth=110)
if True:
model = nn.DataParallel(model).cuda()
criterion_xent = nn.CrossEntropyLoss()
criterion_prox_1024 = Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
criterion_prox_256 = Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
criterion_conprox_1024 = Con_Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
criterion_conprox_256 = Con_Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=1e-04, momentum=0.9)
optimizer_prox_1024 = torch.optim.SGD(criterion_prox_1024.parameters(), lr=args.lr_prox)
optimizer_prox_256 = torch.optim.SGD(criterion_prox_256.parameters(), lr=args.lr_prox)
optimizer_conprox_1024 = torch.optim.SGD(criterion_conprox_1024.parameters(), lr=args.lr_conprox)
optimizer_conprox_256 = torch.optim.SGD(criterion_conprox_256.parameters(), lr=args.lr_conprox)
filename= 'Models_Softmax/CIFAR10_Softmax.pth.tar'
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
optimizer_model.load_state_dict= checkpoint['optimizer_model']
start_time = time.time()
for epoch in range(args.max_epoch):
adjust_learning_rate(optimizer_model, epoch)
adjust_learning_rate_prox(optimizer_prox_1024, epoch)
adjust_learning_rate_prox(optimizer_prox_256, epoch)
adjust_learning_rate_conprox(optimizer_conprox_1024, epoch)
adjust_learning_rate_conprox(optimizer_conprox_256, epoch)
print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
criterion_conprox_1024, criterion_conprox_256,
optimizer_model, optimizer_prox_1024, optimizer_prox_256,
optimizer_conprox_1024, optimizer_conprox_256,
trainloader, use_gpu, num_classes, epoch)
if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
print("==> Test") #Tests after every 10 epochs
acc, err = test(model, testloader, use_gpu, num_classes, epoch)
print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
state_ = {'epoch': epoch + 1, 'state_dict': model.state_dict(),
'optimizer_model': optimizer_model.state_dict(), 'optimizer_prox_1024': optimizer_prox_1024.state_dict(),
'optimizer_prox_256': optimizer_prox_256.state_dict(), 'optimizer_conprox_1024': optimizer_conprox_1024.state_dict(),
'optimizer_conprox_256': optimizer_conprox_256.state_dict(),}
torch.save(state_, 'Models_PCL_AdvTrain_FGSM/CIFAR10_PCL_AdvTrain_FGSM.pth.tar')
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
def train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
criterion_conprox_1024, criterion_conprox_256,
optimizer_model, optimizer_prox_1024, optimizer_prox_256,
optimizer_conprox_1024, optimizer_conprox_256,
trainloader, use_gpu, num_classes, epoch):
# model.train()
xent_losses = AverageMeter() #Computes and stores the average and current value
prox_losses_1024 = AverageMeter()
prox_losses_256= AverageMeter()
conprox_losses_1024 = AverageMeter()
conprox_losses_256= AverageMeter()
losses = AverageMeter()
#Batchwise training
for batch_idx, (data, labels) in enumerate(trainloader):
if use_gpu:
data, labels = data.cuda(), labels.cuda()
model.eval()
eps= np.random.uniform(0.02,0.05)
adv = FGSM(model, criterion_xent, data, labels, eps=eps) # Generates Batch-wise Adv Images
adv.requires_grad= False
adv= normalize(adv)
adv= adv.cuda()
true_labels_adv= labels
data= torch.cat((data, adv),0)
labels= torch.cat((labels, true_labels_adv))
model.train()
feats128, feats256, feats1024, outputs = model(data)
loss_xent = criterion_xent(outputs, labels)
loss_prox_1024 = criterion_prox_1024(feats1024, labels)
loss_prox_256= criterion_prox_256(feats256, labels)
loss_conprox_1024 = criterion_conprox_1024(feats1024, labels)
loss_conprox_256= criterion_conprox_256(feats256, labels)
loss_prox_1024 *= args.weight_prox
loss_prox_256 *= args.weight_prox
loss_conprox_1024 *= args.weight_conprox
loss_conprox_256 *= args.weight_conprox
loss = loss_xent + loss_prox_1024 + loss_prox_256 - loss_conprox_1024 - loss_conprox_256 # total loss
optimizer_model.zero_grad()
optimizer_prox_1024.zero_grad()
optimizer_prox_256.zero_grad()
optimizer_conprox_1024.zero_grad()
optimizer_conprox_256.zero_grad()
loss.backward()
optimizer_model.step()
for param in criterion_prox_1024.parameters():
param.grad.data *= (1. / args.weight_prox)
optimizer_prox_1024.step()
for param in criterion_prox_256.parameters():
param.grad.data *= (1. / args.weight_prox)
optimizer_prox_256.step()
for param in criterion_conprox_1024.parameters():
param.grad.data *= (1. / args.weight_conprox)
optimizer_conprox_1024.step()
for param in criterion_conprox_256.parameters():
param.grad.data *= (1. / args.weight_conprox)
optimizer_conprox_256.step()
losses.update(loss.item(), labels.size(0))
xent_losses.update(loss_xent.item(), labels.size(0))
prox_losses_1024.update(loss_prox_1024.item(), labels.size(0))
prox_losses_256.update(loss_prox_256.item(), labels.size(0))
conprox_losses_1024.update(loss_conprox_1024.item(), labels.size(0))
conprox_losses_256.update(loss_conprox_256.item(), labels.size(0))
if (batch_idx+1) % args.print_freq == 0:
print("Batch {}/{}\t Loss {:.6f} ({:.6f}) XentLoss {:.6f} ({:.6f}) ProxLoss_1024 {:.6f} ({:.6f}) ProxLoss_256 {:.6f} ({:.6f}) \n ConProxLoss_1024 {:.6f} ({:.6f}) ConProxLoss_256 {:.6f} ({:.6f}) " \
.format(batch_idx+1, len(trainloader), losses.val, losses.avg, xent_losses.val, xent_losses.avg,
prox_losses_1024.val, prox_losses_1024.avg, prox_losses_256.val, prox_losses_256.avg ,
conprox_losses_1024.val, conprox_losses_1024.avg, conprox_losses_256.val,
conprox_losses_256.avg ))
def test(model, testloader, use_gpu, num_classes, epoch):
model.eval()
correct, total = 0, 0
with torch.no_grad():
for data, labels in testloader:
if True:
data, labels = data.cuda(), labels.cuda()
feats128, feats256, feats1024, outputs = model(data)
predictions = outputs.data.max(1)[1]
total += labels.size(0)
correct += (predictions == labels.data).sum()
acc = correct * 100. / total
err = 100. - acc
return acc, err
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr_model'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr_model'] = state['lr_model']
def adjust_learning_rate_prox(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr_prox'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr_prox'] = state['lr_prox']
def adjust_learning_rate_conprox(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr_conprox'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr_conprox'] = state['lr_conprox']
if __name__ == '__main__':
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import errno
import shutil
import os.path as osp
import torch
def mkdir_if_missing(directory):
if not osp.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class AverageMeter(object):
"""Computes and stores the average and current value.
Code imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, fpath='checkpoint.pth.tar'):
mkdir_if_missing(osp.dirname(fpath))
torch.save(state, fpath)
if is_best:
shutil.copy(fpath, osp.join(osp.dirname(fpath), 'best_model.pth.tar'))
class Logger(object):
"""
Write console output to external text file.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py.
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(os.path.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close() |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Wed Jan 23 10:15:27 2019
@author: aamir-mustafa
Implementation Part 2 of Paper:
"Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks"
Here it is not necessary to save the best performing model (in terms of accuracy). The model with high robustness
against adversarial attacks is chosen.
This coe implements Adversarial Training using PGD Attack.
"""
#Essential Imports
import os
import sys
import argparse
import datetime
import time
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from pcl_utils import AverageMeter, Logger
from proximity import Proximity
from contrastive_proximity import Con_Proximity
from resnet_model import * # Imports the ResNet Model
parser = argparse.ArgumentParser("Prototype Conformity Loss Implementation")
parser.add_argument('-j', '--workers', default=4, type=int,
help="number of data loading workers (default: 4)")
parser.add_argument('--train-batch', default=64, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=100, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--schedule', type=int, nargs='+', default=[142, 230, 360],
help='Decrease learning rate at these epochs.')
parser.add_argument('--lr_model', type=float, default=0.01, help="learning rate for model")
parser.add_argument('--lr_prox', type=float, default=0.5, help="learning rate for Proximity Loss") # as per paper
parser.add_argument('--weight-prox', type=float, default=1, help="weight for Proximity Loss") # as per paper
parser.add_argument('--lr_conprox', type=float, default=0.00001, help="learning rate for Con-Proximity Loss") # as per paper
parser.add_argument('--weight-conprox', type=float, default=0.00001, help="weight for Con-Proximity Loss") # as per paper
parser.add_argument('--max-epoch', type=int, default=500)
parser.add_argument('--gamma', type=float, default=0.1, help="learning rate decay")
parser.add_argument('--eval-freq', type=int, default=10)
parser.add_argument('--print-freq', type=int, default=50)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--use-cpu', action='store_true')
parser.add_argument('--save-dir', type=str, default='log')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
def normalize(t):
t[:, 0, :, :] = (t[:, 0, :, :] - mean[0])/std[0]
t[:, 1, :, :] = (t[:, 1, :, :] - mean[1])/std[1]
t[:, 2, :, :] = (t[:, 2, :, :] - mean[2])/std[2]
return t
def un_normalize(t):
t[:, 0, :, :] = (t[:, 0, :, :] * std[0]) + mean[0]
t[:, 1, :, :] = (t[:, 1, :, :] * std[1]) + mean[1]
t[:, 2, :, :] = (t[:, 2, :, :] * std[2]) + mean[2]
return t
def attack(model, criterion, img, label, eps, attack_type, iters):
adv = img.detach()
adv.requires_grad = True
if attack_type == 'fgsm':
iterations = 1
else:
iterations = iters
if attack_type == 'pgd':
step = 2 / 255
else:
step = eps / iterations
noise = 0
for j in range(iterations):
_,_,_,out_adv = model(adv.clone())
loss = criterion(out_adv, label)
loss.backward()
if attack_type == 'mim':
adv_mean= torch.mean(torch.abs(adv.grad), dim=1, keepdim=True)
adv_mean= torch.mean(torch.abs(adv_mean), dim=2, keepdim=True)
adv_mean= torch.mean(torch.abs(adv_mean), dim=3, keepdim=True)
adv.grad = adv.grad / adv_mean
noise = noise + adv.grad
else:
noise = adv.grad
# Optimization step
adv.data = un_normalize(adv.data) + step * noise.sign()
# adv.data = adv.data + step * adv.grad.sign()
if attack_type == 'pgd':
adv.data = torch.where(adv.data > img.data + eps, img.data + eps, adv.data)
adv.data = torch.where(adv.data < img.data - eps, img.data - eps, adv.data)
adv.data.clamp_(0.0, 1.0)
adv.grad.data.zero_()
return adv.detach()
def main():
torch.manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
sys.stdout = Logger(osp.join(args.save_dir, 'log_' + 'CIFAR-10_PC_Loss_PGD_AdvTrain' + '.txt'))
if use_gpu:
print("Currently using GPU: {}".format(args.gpu))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU")
# Data Load
num_classes=10
print('==> Preparing dataset')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
trainset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch, pin_memory=True,
shuffle=True, num_workers=args.workers)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch, pin_memory=True,
shuffle=False, num_workers=args.workers)
# Loading the Model
model = resnet(num_classes=num_classes,depth=110)
if True:
model = nn.DataParallel(model).cuda()
criterion_xent = nn.CrossEntropyLoss()
criterion_prox_1024 = Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
criterion_prox_256 = Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
criterion_conprox_1024 = Con_Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
criterion_conprox_256 = Con_Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=1e-04, momentum=0.9)
optimizer_prox_1024 = torch.optim.SGD(criterion_prox_1024.parameters(), lr=args.lr_prox)
optimizer_prox_256 = torch.optim.SGD(criterion_prox_256.parameters(), lr=args.lr_prox)
optimizer_conprox_1024 = torch.optim.SGD(criterion_conprox_1024.parameters(), lr=args.lr_conprox)
optimizer_conprox_256 = torch.optim.SGD(criterion_conprox_256.parameters(), lr=args.lr_conprox)
filename= 'Models_Softmax/CIFAR10_Softmax.pth.tar'
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
optimizer_model.load_state_dict= checkpoint['optimizer_model']
start_time = time.time()
for epoch in range(args.max_epoch):
adjust_learning_rate(optimizer_model, epoch)
adjust_learning_rate_prox(optimizer_prox_1024, epoch)
adjust_learning_rate_prox(optimizer_prox_256, epoch)
adjust_learning_rate_conprox(optimizer_conprox_1024, epoch)
adjust_learning_rate_conprox(optimizer_conprox_256, epoch)
print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
criterion_conprox_1024, criterion_conprox_256,
optimizer_model, optimizer_prox_1024, optimizer_prox_256,
optimizer_conprox_1024, optimizer_conprox_256,
trainloader, use_gpu, num_classes, epoch)
if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
print("==> Test") #Tests after every 10 epochs
acc, err = test(model, testloader, use_gpu, num_classes, epoch)
print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
state_ = {'epoch': epoch + 1, 'state_dict': model.state_dict(),
'optimizer_model': optimizer_model.state_dict(), 'optimizer_prox_1024': optimizer_prox_1024.state_dict(),
'optimizer_prox_256': optimizer_prox_256.state_dict(), 'optimizer_conprox_1024': optimizer_conprox_1024.state_dict(),
'optimizer_conprox_256': optimizer_conprox_256.state_dict(),}
torch.save(state_, 'Models_PCL_AdvTrain_PGD/CIFAR10_PCL_AdvTrain_PGD.pth.tar')
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
def train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
criterion_conprox_1024, criterion_conprox_256,
optimizer_model, optimizer_prox_1024, optimizer_prox_256,
optimizer_conprox_1024, optimizer_conprox_256,
trainloader, use_gpu, num_classes, epoch):
# model.train()
xent_losses = AverageMeter() #Computes and stores the average and current value
prox_losses_1024 = AverageMeter()
prox_losses_256= AverageMeter()
conprox_losses_1024 = AverageMeter()
conprox_losses_256= AverageMeter()
losses = AverageMeter()
#Batchwise training
for batch_idx, (data, labels) in enumerate(trainloader):
if use_gpu:
data, labels = data.cuda(), labels.cuda()
model.eval()
eps= np.random.uniform(0.02,0.05)
adv = attack(model, criterion_xent, data, labels, eps=eps, attack_type='pgd', iters= 10) # Generates Batch-wise Adv Images
adv.requires_grad= False
adv= normalize(adv)
adv= adv.cuda()
true_labels_adv= labels
data= torch.cat((data, adv),0)
labels= torch.cat((labels, true_labels_adv))
model.train()
feats128, feats256, feats1024, outputs = model(data)
loss_xent = criterion_xent(outputs, labels)
loss_prox_1024 = criterion_prox_1024(feats1024, labels)
loss_prox_256= criterion_prox_256(feats256, labels)
loss_conprox_1024 = criterion_conprox_1024(feats1024, labels)
loss_conprox_256= criterion_conprox_256(feats256, labels)
loss_prox_1024 *= args.weight_prox
loss_prox_256 *= args.weight_prox
loss_conprox_1024 *= args.weight_conprox
loss_conprox_256 *= args.weight_conprox
loss = loss_xent + loss_prox_1024 + loss_prox_256 - loss_conprox_1024 - loss_conprox_256 # total loss
optimizer_model.zero_grad()
optimizer_prox_1024.zero_grad()
optimizer_prox_256.zero_grad()
optimizer_conprox_1024.zero_grad()
optimizer_conprox_256.zero_grad()
loss.backward()
optimizer_model.step()
for param in criterion_prox_1024.parameters():
param.grad.data *= (1. / args.weight_prox)
optimizer_prox_1024.step()
for param in criterion_prox_256.parameters():
param.grad.data *= (1. / args.weight_prox)
optimizer_prox_256.step()
for param in criterion_conprox_1024.parameters():
param.grad.data *= (1. / args.weight_conprox)
optimizer_conprox_1024.step()
for param in criterion_conprox_256.parameters():
param.grad.data *= (1. / args.weight_conprox)
optimizer_conprox_256.step()
losses.update(loss.item(), labels.size(0))
xent_losses.update(loss_xent.item(), labels.size(0))
prox_losses_1024.update(loss_prox_1024.item(), labels.size(0))
prox_losses_256.update(loss_prox_256.item(), labels.size(0))
conprox_losses_1024.update(loss_conprox_1024.item(), labels.size(0))
conprox_losses_256.update(loss_conprox_256.item(), labels.size(0))
if (batch_idx+1) % args.print_freq == 0:
print("Batch {}/{}\t Loss {:.6f} ({:.6f}) XentLoss {:.6f} ({:.6f}) ProxLoss_1024 {:.6f} ({:.6f}) ProxLoss_256 {:.6f} ({:.6f}) \n ConProxLoss_1024 {:.6f} ({:.6f}) ConProxLoss_256 {:.6f} ({:.6f}) " \
.format(batch_idx+1, len(trainloader), losses.val, losses.avg, xent_losses.val, xent_losses.avg,
prox_losses_1024.val, prox_losses_1024.avg, prox_losses_256.val, prox_losses_256.avg ,
conprox_losses_1024.val, conprox_losses_1024.avg, conprox_losses_256.val,
conprox_losses_256.avg ))
def test(model, testloader, use_gpu, num_classes, epoch):
model.eval()
correct, total = 0, 0
with torch.no_grad():
for data, labels in testloader:
if True:
data, labels = data.cuda(), labels.cuda()
feats128, feats256, feats1024, outputs = model(data)
predictions = outputs.data.max(1)[1]
total += labels.size(0)
correct += (predictions == labels.data).sum()
acc = correct * 100. / total
err = 100. - acc
return acc, err
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr_model'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr_model'] = state['lr_model']
def adjust_learning_rate_prox(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr_prox'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr_prox'] = state['lr_prox']
def adjust_learning_rate_conprox(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr_conprox'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr_conprox'] = state['lr_conprox']
if __name__ == '__main__':
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Wed Jan 23 10:15:27 2019
@author: aamir-mustafa
Implementation Part 2 of Paper:
"Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks"
Here it is not necessary to save the best performing model (in terms of accuracy). The model with high robustness
against adversarial attacks is chosen.
"""
#Essential Imports
import os
import sys
import argparse
import datetime
import time
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from pcl_utils import AverageMeter, Logger
from proximity import Proximity
from contrastive_proximity import Con_Proximity
from resnet_model import * # Imports the ResNet Model
parser = argparse.ArgumentParser("Prototype Conformity Loss Implementation")
parser.add_argument('-j', '--workers', default=4, type=int,
help="number of data loading workers (default: 4)")
parser.add_argument('--train-batch', default=128, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=100, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--schedule', type=int, nargs='+', default=[142, 230, 360],
help='Decrease learning rate at these epochs.')
parser.add_argument('--lr_model', type=float, default=0.01, help="learning rate for CE Loss")
parser.add_argument('--lr_prox', type=float, default=0.5, help="learning rate for Proximity Loss") # as per paper
parser.add_argument('--weight-prox', type=float, default=1, help="weight for Proximity Loss") # as per paper
parser.add_argument('--lr_conprox', type=float, default=0.0001, help="learning rate for Con-Proximity Loss") # as per paper
parser.add_argument('--weight-conprox', type=float, default=0.0001, help="weight for Con-Proximity Loss") # as per paper
parser.add_argument('--max-epoch', type=int, default=400)
parser.add_argument('--gamma', type=float, default=0.1, help="learning rate decay")
parser.add_argument('--eval-freq', type=int, default=10)
parser.add_argument('--print-freq', type=int, default=50)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--use-cpu', action='store_true')
parser.add_argument('--save-dir', type=str, default='log')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
def main():
torch.manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
sys.stdout = Logger(osp.join(args.save_dir, 'log_' + 'CIFAR-10_PC_Loss' + '.txt'))
if use_gpu:
print("Currently using GPU: {}".format(args.gpu))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU")
# Data Load
num_classes=10
print('==> Preparing dataset')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
trainset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch, pin_memory=True,
shuffle=True, num_workers=args.workers)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch, pin_memory=True,
shuffle=False, num_workers=args.workers)
# Loading the Model
model = resnet(num_classes=num_classes,depth=110)
if True:
model = nn.DataParallel(model).cuda()
criterion_xent = nn.CrossEntropyLoss()
criterion_prox_1024 = Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
criterion_prox_256 = Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
criterion_conprox_1024 = Con_Proximity(num_classes=num_classes, feat_dim=1024, use_gpu=use_gpu)
criterion_conprox_256 = Con_Proximity(num_classes=num_classes, feat_dim=256, use_gpu=use_gpu)
optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=1e-04, momentum=0.9)
optimizer_prox_1024 = torch.optim.SGD(criterion_prox_1024.parameters(), lr=args.lr_prox)
optimizer_prox_256 = torch.optim.SGD(criterion_prox_256.parameters(), lr=args.lr_prox)
optimizer_conprox_1024 = torch.optim.SGD(criterion_conprox_1024.parameters(), lr=args.lr_conprox)
optimizer_conprox_256 = torch.optim.SGD(criterion_conprox_256.parameters(), lr=args.lr_conprox)
filename= 'Models_Softmax/CIFAR10_Softmax.pth.tar'
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
optimizer_model.load_state_dict= checkpoint['optimizer_model']
start_time = time.time()
for epoch in range(args.max_epoch):
adjust_learning_rate(optimizer_model, epoch)
adjust_learning_rate_prox(optimizer_prox_1024, epoch)
adjust_learning_rate_prox(optimizer_prox_256, epoch)
adjust_learning_rate_conprox(optimizer_conprox_1024, epoch)
adjust_learning_rate_conprox(optimizer_conprox_256, epoch)
print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
criterion_conprox_1024, criterion_conprox_256,
optimizer_model, optimizer_prox_1024, optimizer_prox_256,
optimizer_conprox_1024, optimizer_conprox_256,
trainloader, use_gpu, num_classes, epoch)
if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
print("==> Test") #Tests after every 10 epochs
acc, err = test(model, testloader, use_gpu, num_classes, epoch)
print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
state_ = {'epoch': epoch + 1, 'state_dict': model.state_dict(),
'optimizer_model': optimizer_model.state_dict(), 'optimizer_prox_1024': optimizer_prox_1024.state_dict(),
'optimizer_prox_256': optimizer_prox_256.state_dict(), 'optimizer_conprox_1024': optimizer_conprox_1024.state_dict(),
'optimizer_conprox_256': optimizer_conprox_256.state_dict(),}
torch.save(state_, 'Models_PCL/CIFAR10_PCL.pth.tar')
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
def train(model, criterion_xent, criterion_prox_1024, criterion_prox_256,
criterion_conprox_1024, criterion_conprox_256,
optimizer_model, optimizer_prox_1024, optimizer_prox_256,
optimizer_conprox_1024, optimizer_conprox_256,
trainloader, use_gpu, num_classes, epoch):
model.train()
xent_losses = AverageMeter() #Computes and stores the average and current value
prox_losses_1024 = AverageMeter()
prox_losses_256= AverageMeter()
conprox_losses_1024 = AverageMeter()
conprox_losses_256= AverageMeter()
losses = AverageMeter()
#Batchwise training
for batch_idx, (data, labels) in enumerate(trainloader):
if use_gpu:
data, labels = data.cuda(), labels.cuda()
feats128, feats256, feats1024, outputs = model(data)
loss_xent = criterion_xent(outputs, labels)
loss_prox_1024 = criterion_prox_1024(feats1024, labels)
loss_prox_256= criterion_prox_256(feats256, labels)
loss_conprox_1024 = criterion_conprox_1024(feats1024, labels)
loss_conprox_256= criterion_conprox_256(feats256, labels)
loss_prox_1024 *= args.weight_prox
loss_prox_256 *= args.weight_prox
loss_conprox_1024 *= args.weight_conprox
loss_conprox_256 *= args.weight_conprox
loss = loss_xent + loss_prox_1024 + loss_prox_256 - loss_conprox_1024 - loss_conprox_256 # total loss
optimizer_model.zero_grad()
optimizer_prox_1024.zero_grad()
optimizer_prox_256.zero_grad()
optimizer_conprox_1024.zero_grad()
optimizer_conprox_256.zero_grad()
loss.backward()
optimizer_model.step()
for param in criterion_prox_1024.parameters():
param.grad.data *= (1. / args.weight_prox)
optimizer_prox_1024.step()
for param in criterion_prox_256.parameters():
param.grad.data *= (1. / args.weight_prox)
optimizer_prox_256.step()
for param in criterion_conprox_1024.parameters():
param.grad.data *= (1. / args.weight_conprox)
optimizer_conprox_1024.step()
for param in criterion_conprox_256.parameters():
param.grad.data *= (1. / args.weight_conprox)
optimizer_conprox_256.step()
losses.update(loss.item(), labels.size(0))
xent_losses.update(loss_xent.item(), labels.size(0))
prox_losses_1024.update(loss_prox_1024.item(), labels.size(0))
prox_losses_256.update(loss_prox_256.item(), labels.size(0))
conprox_losses_1024.update(loss_conprox_1024.item(), labels.size(0))
conprox_losses_256.update(loss_conprox_256.item(), labels.size(0))
if (batch_idx+1) % args.print_freq == 0:
print("Batch {}/{}\t Loss {:.6f} ({:.6f}) XentLoss {:.6f} ({:.6f}) ProxLoss_1024 {:.6f} ({:.6f}) ProxLoss_256 {:.6f} ({:.6f}) \n ConProxLoss_1024 {:.6f} ({:.6f}) ConProxLoss_256 {:.6f} ({:.6f}) " \
.format(batch_idx+1, len(trainloader), losses.val, losses.avg, xent_losses.val, xent_losses.avg,
prox_losses_1024.val, prox_losses_1024.avg, prox_losses_256.val, prox_losses_256.avg ,
conprox_losses_1024.val, conprox_losses_1024.avg, conprox_losses_256.val,
conprox_losses_256.avg ))
def test(model, testloader, use_gpu, num_classes, epoch):
model.eval()
correct, total = 0, 0
with torch.no_grad():
for data, labels in testloader:
if True:
data, labels = data.cuda(), labels.cuda()
feats128, feats256, feats1024, outputs = model(data)
predictions = outputs.data.max(1)[1]
total += labels.size(0)
correct += (predictions == labels.data).sum()
acc = correct * 100. / total
err = 100. - acc
return acc, err
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr_model'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr_model'] = state['lr_model']
def adjust_learning_rate_prox(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr_prox'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr_prox'] = state['lr_prox']
def adjust_learning_rate_conprox(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr_conprox'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr_conprox'] = state['lr_conprox']
if __name__ == '__main__':
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Tue Apr 2 14:21:30 2019
@author: aamir-mustafa
"""
import torch.nn as nn
import math
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x): # (conv-bn-relu) x 3 times
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual # in our case is none
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, depth, num_classes=10):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
n = (depth - 2) // 6
block = Bottleneck if depth >=44 else BasicBlock
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.maxpool2= nn.MaxPool2d(16)
self.fc = nn.Linear(64 * block.expansion, 1024)
self.fcf = nn.Linear(1024,num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, features_only=False, features_and_logits=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
m = self.maxpool2(x)
m = m.view(m.size(0), -1) # 128 dimensional
x = self.layer3(x)
x = self.avgpool(x)
z = x.view(x.size(0), -1) # 256 dimensional
x = self.fc(z) # 1024 dimensional
y = self.fcf(x) # num_classes dimensional
if features_only:
return z
elif features_and_logits:
return z, y
else:
return m, z, x, y
def resnet(**kwargs):
"""
Constructs a ResNet model.
"""
return ResNet(**kwargs) |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class Con_Proximity(nn.Module):
def __init__(self, num_classes=100, feat_dim=1024, use_gpu=True):
super(Con_Proximity, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())#100 x feats- for 100 centers
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
def forward(self, x, labels):
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, x, self.centers.t())
classes = torch.arange(self.num_classes).long()
if self.use_gpu: classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
dist = []
for i in range(batch_size):
k= mask[i].clone().to(dtype=torch.int8)
k= -1* k +1
kk= k.clone().to(dtype=torch.uint8)
value = distmat[i][kk]
value = value.clamp(min=1e-12, max=1e+12) # for numerical stability
dist.append(value)
dist = torch.cat(dist)
loss = dist.mean()
return loss
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Sun Mar 24 17:51:08 2019
@author: aamir-mustafa
"""
import inspect
import os
import sys
import warnings
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import utils
from resnet_model import * # Imports the ResNet Model
"""
Adversarial Attack Options: fgsm, bim, mim, pgd
"""
warnings.simplefilter('once', RuntimeWarning)
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
grandarentdir = os.path.dirname(os.path.dirname(currentdir))
sys.path.insert(0, grandarentdir)
import active_tests.decision_boundary_binarization
from attacks.autopgd import auto_pgd
from attacks.fab import fab
from functools import partial
import argparse
import utils
parser = argparse.ArgumentParser()
parser.add_argument("--attack", choices=("autopgd", "autopgd2", "fab", "fgsm", "bim", "mim", "pgd"), default="pgd")
parser.add_argument("--baseline", action="store_true")
parser.add_argument("--binarization-test", action="store_true")
parser.add_argument("--num-samples-test", type=int, default=512)
parser.add_argument('--n-inner-points',
default=49,
type=int)
parser.add_argument('--n-boundary-points',
default=10,
type=int)
parser.add_argument("--epsilon", type=int, default=8)
parser.add_argument("--use-autopgd-boundary-adversarials", action="store_true")
parser.add_argument("--use-autoattack", action="store_true")
parser.add_argument("--sample-from-corners", action="store_true")
parser.add_argument("--decision-boundary-closeness", type=float, default=0.999)
args = parser.parse_args()
num_classes = 10
model = resnet(num_classes=num_classes, depth=110)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = nn.DataParallel(model).to(device)
# Loading Trained Model
if args.baseline:
filename = 'checkpoints/pcl_defense_rn110_softmax_baseline.pth.tar'
else:
filename = 'checkpoints/pcl_defense_rn110.pth.tar'
print(f"Loading checkpoint from: {filename}")
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
# Loading Test Data (Un-normalized)
transform_test = transforms.Compose([transforms.ToTensor(), ])
unfiltered_testset = torchvision.datasets.CIFAR10(root='./data/', train=False,
download=True, transform=transform_test)
unfiltered_test_loader = torch.utils.data.DataLoader(unfiltered_testset, batch_size=256,
pin_memory=True,
shuffle=False)
# create test subset where model has perfect accuracy
xs, ys = [], []
n_checked = 0
for x, y in unfiltered_test_loader:
x, y = x, y
with torch.no_grad():
y_pred = model(x.to(device))[3].argmax(-1).to("cpu")
x = x[y_pred == y]
y = y[y_pred == y]
xs.append(x)
ys.append(y)
n_checked += len(x)
if n_checked >= args.num_samples_test:
break
xs = torch.cat(xs, 0)
ys = torch.cat(ys, 0)
filtered_testset = torch.utils.data.TensorDataset(xs, ys)
test_loader = torch.utils.data.DataLoader(filtered_testset, batch_size=256,
pin_memory=True,
shuffle=False)
# Mean and Standard Deviation of the Dataset
mean = torch.tensor([0.4914, 0.4822, 0.4465]).view((1, 3, 1, 1)).to(device)
std = torch.tensor( [0.2023, 0.1994, 0.2010]).view((1, 3, 1, 1)).to(device)
def normalize(t):
return (t - mean)/std
#t[:, 0, :, :] = (t[:, 0, :, :] - mean[0]) / std[0]
#t[:, 1, :, :] = (t[:, 1, :, :] - mean[1]) / std[1]
#t[:, 2, :, :] = (t[:, 2, :, :] - mean[2]) / std[2]
#return t
def un_normalize(t):
return (t*std) + mean
#t[:, 0, :, :] = (t[:, 0, :, :] * std[0]) + mean[0]
#t[:, 1, :, :] = (t[:, 1, :, :] * std[1]) + mean[1]
#t[:, 2, :, :] = (t[:, 2, :, :] * std[2]) + mean[2]
#return t
class ZeroOneStandardizedNetwork(nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x, **kwargs):
return self.model(normalize(x), **kwargs)
model = ZeroOneStandardizedNetwork(model)
# Attacking Images batch-wise
def attack(model, criterion, img, label, eps, attack_type, iters):
adv = img.detach()
adv.requires_grad = True
if attack_type == 'fgsm':
iterations = 1
else:
iterations = iters
if attack_type == 'pgd':
step = 2 / 255
else:
step = eps / iterations
noise = 0
for j in range(iterations):
output = model(adv.clone())
if isinstance(output, tuple):
_, _, _, out_adv = output
else:
out_adv = output
loss = criterion(out_adv, label)
loss.backward()
if attack_type == 'mim':
adv_mean = torch.mean(torch.abs(adv.grad), dim=1, keepdim=True)
adv_mean = torch.mean(torch.abs(adv_mean), dim=2, keepdim=True)
adv_mean = torch.mean(torch.abs(adv_mean), dim=3, keepdim=True)
adv.grad = adv.grad / adv_mean
noise = noise + adv.grad
else:
noise = adv.grad
# Optimization step
adv.data = adv.data + step * noise.sign()
# adv.data = adv.data + step * adv.grad.sign()
if attack_type == 'pgd':
adv.data = torch.where(adv.data > img.data + eps, img.data + eps,
adv.data)
adv.data = torch.where(adv.data < img.data - eps, img.data - eps,
adv.data)
adv.data.clamp_(0.0, 1.0)
adv.grad.data.zero_()
return adv.detach()
def get_boundary_adversarials(x, y, n_samples, epsilon, model):
"""Generate adversarial examples for the base classifier using AutoAttack."""
x_advs = []
for _ in range(n_samples):
x_adv = auto_pgd(model, x, y, 100, epsilon, "linf", n_classes=10,
n_restarts=5)[0]
x_advs.append(x_adv)
x_advs = torch.cat(x_advs, 0)
# replace examples for which no adversarials could be found with rnd. noise
is_identical = torch.max(torch.abs(x_advs.flatten(1) - x.flatten(1))) < 1e-6
random_noise = 2 * torch.rand_like(x) - 1.0
x_advs[is_identical] = random_noise[is_identical]
x_advs = utils.clipping_aware_rescaling(x, x_advs - x, epsilon, "linf")
return x_advs
def binarization_test(feature_extractor, attack_type, epsilon):
def run_attack(model, loader):
adv_acc = 0
n_total_samples = 0
x_adv = []
logits_adv = []
for i, (img, label) in enumerate(loader):
img, label = img.to(device), label.to(device)
if attack_type == "autopgd":
adv = auto_pgd(model, img, label, 200, epsilon, "linf",
n_restarts=5, n_classes=2)[0]
elif attack_type == "autopgd2":
adv = auto_pgd(model, img, label, 400, epsilon, "linf",
n_restarts=10, n_classes=2)[0]
elif attack_type == "fab":
adv = fab(model, img, label, 200, epsilon, "linf",
n_restarts=5, n_classes=2)[0]
else:
adv = attack(model, criterion, img, label, eps=epsilon, attack_type=attack_type,
iters=10)
with torch.no_grad():
outputs = model(adv.clone().detach())
adv_acc += torch.sum(
outputs.argmax(dim=-1) == label).item()
n_total_samples += len(img)
x_adv.append(adv.detach().cpu())
logits_adv.append(outputs.detach().cpu())
x_adv = torch.cat(x_adv, 0)
logits_adv = torch.cat(logits_adv, 0)
asr = 1.0 - adv_acc / n_total_samples
return asr, (x_adv, logits_adv)
from argparse_utils import DecisionBoundaryBinarizationSettings
scores_logit_differences_and_validation_accuracies = active_tests.decision_boundary_binarization.interior_boundary_discrimination_attack(
feature_extractor,
test_loader,
attack_fn=lambda m, l, kwargs: run_attack(m, l, **kwargs),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=epsilon,
norm="linf",
lr=10000,
n_boundary_points=args.n_boundary_points,
n_inner_points=args.n_inner_points,
adversarial_attack_settings=None,
optimizer="sklearn",
n_boundary_adversarial_points=1 if args.use_autopgd_boundary_adversarials else 0
),
n_samples=args.num_samples_test,
device=device,
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
#args.num_samples_test * 10
decision_boundary_closeness=args.decision_boundary_closeness,
# TODO: activate this again
rescale_logits="adaptive",
get_boundary_adversarials_fn=partial(get_boundary_adversarials, model=lambda x: model(x)[3]) \
if args.use_autopgd_boundary_adversarials else None,
sample_training_data_from_corners=args.sample_from_corners
)
print(active_tests.decision_boundary_binarization.format_result(
scores_logit_differences_and_validation_accuracies,
args.num_samples_test
))
def adversarial_test():
adv_acc = 0
clean_acc = 0
for i, (img, label) in enumerate(test_loader):
img, label = img.to(device), label.to(device)
clean_acc += torch.sum(
model(img.clone().detach())[3].argmax(dim=-1) == label).item()
adv = attack(model, criterion, img, label, eps=eps, attack_type=attack_type,
iters=10)
adv_acc += torch.sum(
model(adv.clone().detach())[3].argmax(dim=-1) == label).item()
# print('Batch: {0}'.format(i))
print('Clean accuracy:{0:.3%}\t Adversarial ({2}) accuracy:{1:.3%}'.format(
clean_acc / len(testset), adv_acc / len(testset), attack_type))
# Loss Criteria
criterion = nn.CrossEntropyLoss()
eps = args.epsilon / 255 # Epsilon for Adversarial Attack
print("eps:", eps)
for attack_type in (args.attack,): # ("fgsm", "bim", "mim", "pgd"):
if args.binarization_test:
binarization_test(model, attack_type, eps)
else:
adversarial_test()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Wed Jan 23 10:15:27 2019
@author: aamir-mustafa
This is Part 1 file for replicating the results for Paper:
"Adversarial Defense by Restricting the Hidden Space of Deep Neural Networks"
Here a ResNet model is trained with Softmax Loss for 164 epochs.
"""
# Essential Imports
import os
import sys
import argparse
import datetime
import time
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from pcl_utils import AverageMeter, Logger
from resnet_model import * # Imports the ResNet Model
parser = argparse.ArgumentParser("Softmax Training for CIFAR-10 Dataset")
parser.add_argument('-j', '--workers', default=4, type=int,
help="number of data loading workers (default: 4)")
parser.add_argument('--train-batch', default=128, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=100, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--lr', type=float, default=0.1, help="learning rate for model")
parser.add_argument('--schedule', type=int, nargs='+', default=[81, 122, 140],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--max-epoch', type=int, default=164)
parser.add_argument('--eval-freq', type=int, default=10)
parser.add_argument('--print-freq', type=int, default=50)
parser.add_argument('--gpu', type=str, default='0') #gpu to be used
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--use-cpu', action='store_true')
parser.add_argument('--save-dir', type=str, default='log')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
#%%
def main():
torch.manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
sys.stdout = Logger(osp.join(args.save_dir, 'log_' + 'CIFAR-10_OnlySoftmax' + '.txt'))
if use_gpu:
print("Currently using GPU: {}".format(args.gpu))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU")
# Data Loading
num_classes=10
print('==> Preparing dataset ')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
trainset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=True,
download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch, pin_memory=True,
shuffle=True, num_workers=args.workers)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10', train=False,
download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch, pin_memory=True,
shuffle=False, num_workers=args.workers)
# Loading the Model
model = resnet(num_classes=num_classes,depth=110)
if use_gpu:
model = nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
start_time = time.time()
for epoch in range(args.max_epoch):
adjust_learning_rate(optimizer, epoch)
print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
print('LR: %f' % (state['lr']))
train(trainloader, model, criterion, optimizer, epoch, use_gpu, num_classes)
if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
print("==> Test") #Tests after every 10 epochs
acc, err = test(model, testloader, use_gpu, num_classes, epoch)
print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
checkpoint = {'epoch': epoch + 1, 'state_dict': model.state_dict(),
'optimizer_model': optimizer.state_dict(), }
torch.save(checkpoint, 'Models_Softmax/CIFAR10_Softmax.pth.tar')
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
def train(trainloader, model, criterion, optimizer, epoch, use_gpu, num_classes):
model.train()
losses = AverageMeter()
#Batch-wise Training
for batch_idx, (data, labels) in enumerate(trainloader):
if use_gpu:
data, labels = data.cuda(), labels.cuda()
feats_128, feats_256, feats_1024, outputs = model(data)
loss_xent = criterion(outputs, labels) # cross-entropy loss calculation
optimizer.zero_grad()
loss_xent.backward()
optimizer.step()
losses.update(loss_xent.item(), labels.size(0)) # AverageMeter() has this param
if (batch_idx+1) % args.print_freq == 0:
print("Batch {}/{}\t Loss {:.6f} ({:.6f}) " \
.format(batch_idx+1, len(trainloader), losses.val, losses.avg))
def test(model, testloader, use_gpu, num_classes, epoch):
model.eval()
correct, total = 0, 0
with torch.no_grad():
for data, labels in testloader:
if use_gpu:
data, labels = data.cuda(), labels.cuda()
feats_128, feats_256, feats_1024, outputs = model(data)
predictions = outputs.data.max(1)[1]
total += labels.size(0)
correct += (predictions == labels.data).sum()
acc = correct * 100. / total
err = 100. - acc
return acc, err
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
if __name__ == '__main__':
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class Proximity(nn.Module):
def __init__(self, num_classes=100, feat_dim=1024, use_gpu=True):
super(Proximity, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
def forward(self, x, labels):
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, x, self.centers.t())
classes = torch.arange(self.num_classes).long()
if self.use_gpu: classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
dist = []
for i in range(batch_size):
value = distmat[i][mask[i]]
value = value.clamp(min=1e-12, max=1e+12)
dist.append(value)
dist = torch.cat(dist)
loss = dist.mean()
return loss
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten, Lambda
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import mnist, cifar10, cifar100
import tensorflow as tf
import numpy as np
import os
from scipy.io import loadmat
import math
from utils.model import resnet_v1, resnet_v2
import cleverhans.attacks as attacks
from cleverhans.utils_tf import model_eval
from utils.keras_wraper_ensemble import KerasModelWrapper
from utils.utils_model_eval import model_eval_targetacc
FLAGS = tf.app.flags.FLAGS
#Common Flags for two models
tf.app.flags.DEFINE_integer('batch_size', 50, 'batch_size for attack')
tf.app.flags.DEFINE_string('optimizer', 'mom', '')
tf.app.flags.DEFINE_string('attack_method', 'FastGradientMethod', '')
tf.app.flags.DEFINE_integer('version', 2, '')
tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
tf.app.flags.DEFINE_bool('target', True, 'is target attack or not')
tf.app.flags.DEFINE_integer('num_iter', 10, '')
tf.app.flags.DEFINE_string('dataset', 'cifar10', '')
tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
tf.app.flags.DEFINE_bool('use_dense', True, 'whether use extra dense layer in the network')
tf.app.flags.DEFINE_bool('use_leaky', False, 'whether use leaky relu in the network')
tf.app.flags.DEFINE_integer('epoch', 180, 'the epoch of model to load')
tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
# SCE, MMC-10, MMC-100, AT-SCE, AT-MMC-10, AT-MMC-100
tf.app.flags.DEFINE_string('model_1', 'SCE', '')
tf.app.flags.DEFINE_string('model_2', 'MMC-10', '')
#Specific Flags for model 1
tf.app.flags.DEFINE_float('mean_var_1', 10, 'parameter in MMLDA')
tf.app.flags.DEFINE_string('attack_method_for_advtrain_1', 'FastGradientMethod', '')
tf.app.flags.DEFINE_bool('use_target_1', False, 'whether use target attack or untarget attack for adversarial training')
tf.app.flags.DEFINE_bool('use_ball_1', True, 'whether use ball loss or softmax')
tf.app.flags.DEFINE_bool('use_MMLDA_1', True, 'whether use MMLDA or softmax')
tf.app.flags.DEFINE_bool('use_advtrain_1', True, 'whether use advtraining or normal training')
tf.app.flags.DEFINE_float('adv_ratio_1', 1.0, 'the ratio of adversarial examples in each mini-batch')
tf.app.flags.DEFINE_bool('normalize_output_for_ball_1', True, 'whether apply softmax in the inference phase')
#Specific Flags for model 2
tf.app.flags.DEFINE_float('mean_var_2', 10, 'parameter in MMLDA')
tf.app.flags.DEFINE_string('attack_method_for_advtrain_2', 'FastGradientMethod', '')
tf.app.flags.DEFINE_bool('use_target_2', False, 'whether use target attack or untarget attack for adversarial training')
tf.app.flags.DEFINE_bool('use_ball_2', True, 'whether use ball loss or softmax')
tf.app.flags.DEFINE_bool('use_MMLDA_2', True, 'whether use MMLDA or softmax')
tf.app.flags.DEFINE_bool('use_advtrain_2', True, 'whether use advtraining or normal training')
tf.app.flags.DEFINE_float('adv_ratio_2', 1.0, 'the ratio of adversarial examples in each mini-batch')
tf.app.flags.DEFINE_bool('normalize_output_for_ball_2', True, 'whether apply softmax in the inference phase')
##### model 1 is the substitute model used to craft adversarial examples, model 2 is the original model used to classify these adversarial examples.
def return_paras(model_name):
if model_name == 'SCE':
return 0, None, False, False, False, False, 0.0, True
elif model_name == 'MMC-10':
return 10.0, None, False, True, True, False, 0.0, False
elif model_name == 'MMC-100':
return 100.0, None, False, True, True, False, 0.0, False
elif model_name == 'AT-SCE':
return 0, 'MadryEtAl', True, False, False, True, 1.0, True
elif model_name == 'AT-MMC-10':
return 10, 'MadryEtAl', True, True, True, True, 1.0, False
elif model_name == 'AT-MMC-100':
return 100, 'MadryEtAl', True, True, True, True, 1.0, False
else:
return None
FLAGS.mean_var_1, FLAGS.attack_method_for_advtrain_1, FLAGS.use_target_1, FLAGS.use_ball_1, \
FLAGS.use_MMLDA_1, FLAGS.use_advtrain_1, FLAGS.adv_ratio_1, FLAGS.normalize_output_for_ball_1 = return_paras(FLAGS.model_1)
FLAGS.mean_var_2, FLAGS.attack_method_for_advtrain_2, FLAGS.use_target_2, FLAGS.use_ball_2, \
FLAGS.use_MMLDA_2, FLAGS.use_advtrain_2, FLAGS.adv_ratio_2, FLAGS.normalize_output_for_ball_2 = return_paras(FLAGS.model_2)
# Load the dataset
if FLAGS.dataset=='mnist':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
epochs = 50
num_class = 10
epochs_inter = [30,40]
x_place = tf.placeholder(tf.float32, shape=(None, 28, 28, 3))
elif FLAGS.dataset=='cifar10':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
epochs = 200
num_class = 10
epochs_inter = [100,150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
elif FLAGS.dataset=='cifar100':
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
epochs = 200
num_class = 100
epochs_inter = [100,150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
else:
print('Unknown dataset')
# These parameters are usually fixed
subtract_pixel_mean = True
version = FLAGS.version # Model version
n = 5 # n=5 for resnet-32 v1
# Computed depth from supplied model parameter n
if version == 1:
depth = n * 6 + 2
feature_dim = 64
elif version == 2:
depth = n * 9 + 2
feature_dim = 256
if FLAGS.use_BN==True:
BN_name = '_withBN'
print('Use BN in the model')
else:
BN_name = '_noBN'
print('Do not use BN in the model')
if FLAGS.use_random==True:
name_random = '_random'
else:
name_random = ''
if FLAGS.use_leaky==True:
name_leaky = '_withleaky'
else:
name_leaky = ''
if FLAGS.use_dense==True:
name_dense = ''
else:
name_dense = '_nodense'
#Load means in MMLDA
kernel_dict = loadmat('kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+name_random+'.mat')
mean_logits = kernel_dict['mean_logits'] #num_class X num_dense
mean_logits_1 = FLAGS.mean_var_1 * tf.constant(mean_logits,dtype=tf.float32)
mean_logits_2 = FLAGS.mean_var_2 * tf.constant(mean_logits,dtype=tf.float32)
#MMLDA prediction function
def MMLDA_layer_1(x, means=mean_logits_1, num_class=num_class, use_ball=FLAGS.use_ball_1):
#x_shape = batch_size X num_dense
x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
if use_ball==True:
if FLAGS.normalize_output_for_ball_1==False:
return logits
else:
return tf.nn.softmax(logits, axis=-1)
else:
return tf.nn.softmax(logits, axis=-1)
def MMLDA_layer_2(x, means=mean_logits_2, num_class=num_class, use_ball=FLAGS.use_ball_2):
#x_shape = batch_size X num_dense
x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
if use_ball==True:
if FLAGS.normalize_output_for_ball_2==False:
return logits
else:
return tf.nn.softmax(logits, axis=-1)
else:
return tf.nn.softmax(logits, axis=-1)
# Load the data.
y_test_target = np.zeros_like(y_test)
for i in range(y_test.shape[0]):
l = np.random.randint(num_class)
while l == y_test[i][0]:
l = np.random.randint(num_class)
y_test_target[i][0] = l
print('Finish crafting y_test_target!!!!!!!!!!!')
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
clip_min = 0.0
clip_max = 1.0
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
clip_min -= x_train_mean
clip_max -= x_train_mean
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_class)
y_test = keras.utils.to_categorical(y_test, num_class)
y_test_target = keras.utils.to_categorical(y_test_target, num_class)
# Define input TF placeholder
y_place = tf.placeholder(tf.float32, shape=(None, num_class))
y_target = tf.placeholder(tf.float32, shape=(None, num_class))
sess = tf.Session()
keras.backend.set_session(sess)
model_input_1 = Input(shape=input_shape)
model_input_2 = Input(shape=input_shape)
#dim of logtis is batchsize x dim_means
if version == 2:
original_model_1,_,_,_,final_features_1 = resnet_v2(input=model_input_1, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
else:
original_model_1,_,_,_,final_features_1 = resnet_v1(input=model_input_1, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
if version == 2:
original_model_2,_,_,_,final_features_2 = resnet_v2(input=model_input_2, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
else:
original_model_2,_,_,_,final_features_2 = resnet_v1(input=model_input_2, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
##### Load model 1 #####
#Whether use target attack for adversarial training
if FLAGS.use_target_1==False:
is_target_1 = ''
else:
is_target_1 = 'target'
if FLAGS.use_advtrain_1==True:
dirr_1 = 'advtrained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain_1 = '_'+is_target_1+FLAGS.attack_method_for_advtrain_1
adv_ratio_name_1 = '_advratio'+str(FLAGS.adv_ratio_1)
mean_var_1 = int(FLAGS.mean_var_1)
else:
dirr_1 = 'trained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain_1 = ''
adv_ratio_name_1 = ''
mean_var_1 = FLAGS.mean_var_1
if FLAGS.use_MMLDA_1==True:
print('Using MMLDA for model 1, the substitute model')
new_layer_1 = Lambda(MMLDA_layer_1)
predictions_1 = new_layer_1(final_features_1)
model_1 = Model(input=model_input_1, output=predictions_1)
use_ball_1=''
if FLAGS.use_ball_1==False:
print('Using softmax function for model 1')
use_ball_1='_softmax'
filepath_dir_1 = dirr_1+'resnet32v'+str(version)+'_meanvar'+str(mean_var_1) \
+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size) \
+attack_method_for_advtrain_1+adv_ratio_name_1+BN_name+name_leaky+name_dense+name_random+use_ball_1+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
else:
print('Using softmax loss for model 1')
model_1 = original_model_1
filepath_dir_1 = dirr_1+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain_1+adv_ratio_name_1+BN_name+name_leaky+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
wrap_ensemble_1 = KerasModelWrapper(model_1, num_class=num_class)
model_1.load_weights(filepath_dir_1)
##### Load model 2 #####
#Whether use target attack for adversarial training
if FLAGS.use_target_2==False:
is_target_2 = ''
else:
is_target_2 = 'target'
if FLAGS.use_advtrain_2==True:
dirr_2 = 'advtrained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain_2 = '_'+is_target_2+FLAGS.attack_method_for_advtrain_2
adv_ratio_name_2 = '_advratio'+str(FLAGS.adv_ratio_2)
mean_var_2 = int(FLAGS.mean_var_2)
else:
dirr_2 = 'trained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain_2 = ''
adv_ratio_name_2 = ''
mean_var_2 = FLAGS.mean_var_2
if FLAGS.use_MMLDA_2==True:
print('Using MMLDA for model 2, the original model')
new_layer_2 = Lambda(MMLDA_layer_2)
predictions_2 = new_layer_2(final_features_2)
model_2 = Model(input=model_input_2, output=predictions_2)
use_ball_2=''
if FLAGS.use_ball_2==False:
print('Using softmax function for model 2')
use_ball_2='_softmax'
filepath_dir_2 = dirr_2+'resnet32v'+str(version)+'_meanvar'+str(mean_var_2) \
+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size) \
+attack_method_for_advtrain_2+adv_ratio_name_2+BN_name+name_leaky+name_dense+name_random+use_ball_2+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
else:
print('Using softmax loss for model 2')
model_2 = original_model_2
filepath_dir_2 = dirr_2+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain_2+adv_ratio_name_2+BN_name+name_leaky+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
wrap_ensemble_2 = KerasModelWrapper(model_2, num_class=num_class)
model_2.load_weights(filepath_dir_2)
# Initialize the attack method
if FLAGS.attack_method == 'MadryEtAl':
att = attacks.MadryEtAl(wrap_ensemble_1)
elif FLAGS.attack_method == 'FastGradientMethod':
att = attacks.FastGradientMethod(wrap_ensemble_1)
elif FLAGS.attack_method == 'MomentumIterativeMethod':
att = attacks.MomentumIterativeMethod(wrap_ensemble_1)
elif FLAGS.attack_method == 'BasicIterativeMethod':
att = attacks.BasicIterativeMethod(wrap_ensemble_1)
# Consider the attack to be constant
eval_par = {'batch_size': FLAGS.batch_size}
for eps in range(2):
eps_ = (eps+1) * 8
print('eps is %d'%eps_)
eps_ = eps_ / 256.0
if FLAGS.target==False:
y_target = None
if FLAGS.attack_method == 'FastGradientMethod':
att_params = {'eps': eps_,
'clip_min': clip_min,
'clip_max': clip_max,
'y_target': y_target}
else:
att_params = {'eps': eps_,
#'eps_iter': eps_*1.0/FLAGS.num_iter,
#'eps_iter': 3.*eps_/FLAGS.num_iter,
'eps_iter': 2. / 256.,
'clip_min': clip_min,
'clip_max': clip_max,
'nb_iter': FLAGS.num_iter,
'y_target': y_target}
adv_x = tf.stop_gradient(att.generate(x_place, **att_params))
preds = model_2(adv_x)
if FLAGS.target==False:
acc = model_eval(sess, x_place, y_place, preds, x_test, y_test, args=eval_par)
print('adv_acc of model 1 transfer to model 2 is: %.3f' %acc)
else:
acc = model_eval_targetacc(sess, x_place, y_place, y_target, preds, x_test, y_test, y_test_target, args=eval_par)
print('adv_acc_target of model 1 transfer to model 2 is: %.3f' %acc)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten, Lambda
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import mnist, cifar10, cifar100
import tensorflow as tf
import numpy as np
import os
from scipy.io import loadmat
import math
from utils.model import resnet_v1, resnet_v2
import cleverhans.attacks as attacks
from cleverhans.utils_tf import model_eval
from utils.keras_wraper_ensemble import KerasModelWrapper
from utils.utils_model_eval import model_eval_targetacc
from sklearn.metrics import roc_auc_score
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 50, 'batch_size for attack')
tf.app.flags.DEFINE_string('optimizer', 'mom', '')
tf.app.flags.DEFINE_float('mean_var', 10, 'parameter in MMLDA')
tf.app.flags.DEFINE_string('attack_method', 'gaussian', '')
tf.app.flags.DEFINE_string('attack_method_for_advtrain', 'FastGradientMethod', '')
tf.app.flags.DEFINE_integer('version', 2, '')
tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
tf.app.flags.DEFINE_bool('target', True, 'is target attack or not')
tf.app.flags.DEFINE_bool('use_target', False, 'whether use target attack or untarget attack for adversarial training')
tf.app.flags.DEFINE_integer('num_iter', 10, '')
tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax')
tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
tf.app.flags.DEFINE_bool('use_advtrain', True, 'whether use advtraining or normal training')
tf.app.flags.DEFINE_float('adv_ratio', 1.0, 'the ratio of adversarial examples in each mini-batch')
tf.app.flags.DEFINE_integer('epoch', 1, 'the epoch of model to load')
tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
tf.app.flags.DEFINE_string('dataset', 'mnist', '')
tf.app.flags.DEFINE_bool('normalize_output_for_ball', True, 'whether apply softmax in the inference phase')
tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
tf.app.flags.DEFINE_bool('use_dense', True, 'whether use extra dense layer in the network')
tf.app.flags.DEFINE_bool('use_leaky', False, 'whether use leaky relu in the network')
# Load the dataset
if FLAGS.dataset=='mnist':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
epochs = 50
num_class = 10
epochs_inter = [30,40]
x_place = tf.placeholder(tf.float32, shape=(None, 28, 28, 3))
elif FLAGS.dataset=='cifar10':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
epochs = 200
num_class = 10
epochs_inter = [100,150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
elif FLAGS.dataset=='cifar100':
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
epochs = 200
num_class = 100
epochs_inter = [100,150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
else:
print('Unknown dataset')
# These parameters are usually fixed
subtract_pixel_mean = True
version = FLAGS.version # Model version
n = 5 # n=5 for resnet-32 v1
# Computed depth from supplied model parameter n
if version == 1:
depth = n * 6 + 2
feature_dim = 64
elif version == 2:
depth = n * 9 + 2
feature_dim = 256
if FLAGS.use_random==True:
name_random = '_random'
else:
name_random = ''
if FLAGS.use_leaky==True:
name_leaky = '_withleaky'
else:
name_leaky = ''
if FLAGS.use_dense==True:
name_dense = ''
else:
name_dense = '_nodense'
#Load means in MMLDA
kernel_dict = loadmat('kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+name_random+'.mat')
mean_logits = kernel_dict['mean_logits'] #num_class X num_dense
mean_logits = FLAGS.mean_var * tf.constant(mean_logits,dtype=tf.float32)
#MMLDA prediction function
def MMLDA_layer(x, means=mean_logits, num_class=num_class, use_ball=FLAGS.use_ball):
#x_shape = batch_size X num_dense
x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
if use_ball==True:
if FLAGS.normalize_output_for_ball==False:
return logits
else:
return tf.nn.softmax(logits, axis=-1)
else:
return tf.nn.softmax(logits, axis=-1)
# Load the data.
y_test_target = np.zeros_like(y_test)
for i in range(y_test.shape[0]):
l = np.random.randint(num_class)
while l == y_test[i][0]:
l = np.random.randint(num_class)
y_test_target[i][0] = l
print('Finish crafting y_test_target!!!!!!!!!!!')
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
clip_min = 0.0
clip_max = 1.0
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
clip_min -= np.max(x_train_mean)
clip_max -= np.min(x_train_mean)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_class)
y_test = keras.utils.to_categorical(y_test, num_class)
y_test_target = keras.utils.to_categorical(y_test_target, num_class)
# Define input TF placeholder
y_place = tf.placeholder(tf.float32, shape=(None, num_class))
y_target = tf.placeholder(tf.float32, shape=(None, num_class))
sess = tf.Session()
keras.backend.set_session(sess)
model_input = Input(shape=input_shape)
#dim of logtis is batchsize x dim_means
if version == 2:
original_model,_,_,_,final_features = resnet_v2(input=model_input, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
else:
original_model,_,_,_,final_features = resnet_v1(input=model_input, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
if FLAGS.use_BN==True:
BN_name = '_withBN'
print('Use BN in the model')
else:
BN_name = '_noBN'
print('Do not use BN in the model')
#Whether use target attack for adversarial training
if FLAGS.use_target==False:
is_target = ''
else:
is_target = 'target'
if FLAGS.use_advtrain==True:
dirr = 'advtrained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain = '_'+is_target+FLAGS.attack_method_for_advtrain
adv_ratio_name = '_advratio'+str(FLAGS.adv_ratio)
mean_var = int(FLAGS.mean_var)
else:
dirr = 'trained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain = ''
adv_ratio_name = ''
mean_var = FLAGS.mean_var
if FLAGS.use_MMLDA==True:
print('Using MMT Training Scheme')
new_layer = Lambda(MMLDA_layer)
predictions = new_layer(final_features)
model = Model(input=model_input, output=predictions)
use_ball_=''
if FLAGS.use_ball==False:
print('Using softmax function (MMLDA)')
use_ball_='_softmax'
filepath_dir = dirr+'resnet32v'+str(version)+'_meanvar'+str(mean_var) \
+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size) \
+attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+name_dense+name_random+use_ball_+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
else:
print('Using softmax loss')
model = original_model
filepath_dir = dirr+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
wrap_ensemble = KerasModelWrapper(model, num_class=num_class)
model.load_weights(filepath_dir)
if FLAGS.attack_method == 'Rotation':
datagen = ImageDataGenerator(
rotation_range=30)
data_generate=datagen.flow(x_test, y_test, batch_size=100)
accuracy = 0
with sess.as_default():
for i in range(10):
test_batch = data_generate.next()
test_batch_data = test_batch[0]
test_batch_label = test_batch[1]
correct_preds = tf.equal(tf.argmax(y_place, axis=-1),
tf.argmax(model(x_place), axis=-1))
cur_corr_preds = correct_preds.eval(feed_dict={x_place: test_batch_data, y_place: test_batch_label})
accuracy += cur_corr_preds.sum()
print (accuracy)
accuracy /= 10.
print ('Accuracy is: ', accuracy)
elif FLAGS.attack_method == 'Gaussian':
accuracy = 0
with sess.as_default():
for i in range(10):
correct_preds = tf.equal(tf.argmax(y_place, axis=-1),
tf.argmax(model(x_place+tf.random_normal([100,32,32,3],mean=0.0,stddev=0.05)), axis=-1))
cur_corr_preds = correct_preds.eval(feed_dict={x_place: x_test[i*100:(i+1)*100], y_place: y_test[i*100:(i+1)*100]})
accuracy += cur_corr_preds.sum()
print (accuracy)
accuracy /= 10.
print ('Accuracy is: ', accuracy)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import pdb
from functools import partial
import keras
import torch
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten, Lambda
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, \
ReduceLROnPlateau
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import mnist, cifar10, cifar100
import tensorflow as tf
import numpy as np
import os
from scipy.io import loadmat
import math
from torch.utils.data import DataLoader
from active_tests.decision_boundary_binarization import LogitRescalingType
from active_tests.decision_boundary_binarization import \
_train_logistic_regression_classifier
from active_tests.decision_boundary_binarization import format_result
from active_tests.decision_boundary_binarization import \
interior_boundary_discrimination_attack
from tensorflow_wrapper import TensorFlow1ToPyTorchWrapper
from utils import build_dataloader_from_arrays
from mmt_utils.model import resnet_v1, resnet_v2
import cleverhans.attacks as attacks
from cleverhans.utils_tf import model_eval
from mmt_utils.keras_wraper_ensemble import KerasModelWrapper
from mmt_utils.utils_model_eval import model_eval_targetacc
from sklearn.metrics import roc_auc_score
FLAGS = tf.app.flags.FLAGS
def main():
tf.app.flags.DEFINE_integer('epsilon', 8, 'attack radius')
tf.app.flags.DEFINE_integer('n_inner_points', 999, '')
tf.app.flags.DEFINE_integer('n_boundary_points', 1, '')
tf.app.flags.DEFINE_integer('n_samples', 512, '')
tf.app.flags.DEFINE_integer('batch_size', 512, 'batch_size for attack')
tf.app.flags.DEFINE_string('optimizer', 'mom', '')
tf.app.flags.DEFINE_float('mean_var', 10, 'parameter in MMLDA')
tf.app.flags.DEFINE_string('attack_method', 'FastGradientMethod', '')
tf.app.flags.DEFINE_string('attack_method_for_advtrain', 'FastGradientMethod',
'')
tf.app.flags.DEFINE_integer('version', 2, '')
tf.app.flags.DEFINE_bool('use_target', False,
'whether use target attack or untarget attack for adversarial training')
tf.app.flags.DEFINE_integer('num_iter', 10, '')
tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax')
tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
tf.app.flags.DEFINE_bool('use_advtrain', True,
'whether use advtraining or normal training')
tf.app.flags.DEFINE_float('adv_ratio', 1.0,
'the ratio of adversarial examples in each mini-batch')
tf.app.flags.DEFINE_integer('epoch', 1, 'the epoch of model to load')
tf.app.flags.DEFINE_bool('use_BN', True,
'whether use batch normalization in the network')
tf.app.flags.DEFINE_string('dataset', 'mnist', '')
tf.app.flags.DEFINE_bool('normalize_output_for_ball', True,
'whether apply softmax in the inference phase')
tf.app.flags.DEFINE_bool('use_random', False,
'whether use random center or MMLDA center in the network')
tf.app.flags.DEFINE_bool('use_dense', True,
'whether use extra dense layer in the network')
tf.app.flags.DEFINE_bool('use_leaky', False,
'whether use leaky relu in the network')
tf.app.flags.DEFINE_string('checkpoint', None, '')
# For calculate AUC-scores
tf.app.flags.DEFINE_bool('is_calculate_auc', False,
'whether to calculate auc scores')
tf.app.flags.DEFINE_bool('is_auc_metric_softmax_for_MMC', False,
'whether use softmax to calculate auc metrics for MMC')
tf.app.flags.DEFINE_bool('sample_from_corners', False, '')
run_test()
# MMLDA prediction function
def MMLDA_layer(x, means, num_class, use_ball,
normalize_output_for_ball=None):
if normalize_output_for_ball is None:
normalize_output_for_ball = FLAGS.normalize_output_for_ball
# x_shape = batch_size X num_dense
x_expand = tf.tile(tf.expand_dims(x, axis=1),
[1, num_class, 1]) # batch_size X num_class X num_dense
mean_expand = tf.expand_dims(means, axis=0) # 1 X num_class X num_dense
logits = -tf.reduce_sum(tf.square(x_expand - mean_expand),
axis=-1) # batch_size X num_class
if use_ball == True:
if normalize_output_for_ball == False:
return logits
else:
return tf.nn.softmax(logits, axis=-1)
else:
return tf.nn.softmax(logits, axis=-1)
def setup_model_and_load_data():
# Load the dataset
if FLAGS.dataset == 'mnist':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
epochs = 50
num_class = 10
epochs_inter = [30, 40]
x_place = tf.placeholder(tf.float32, shape=(None, 28, 28, 3))
elif FLAGS.dataset == 'cifar10':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
epochs = 200
num_class = 10
epochs_inter = [100, 150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
elif FLAGS.dataset == 'cifar100':
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
epochs = 200
num_class = 100
epochs_inter = [100, 150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
else:
print('Unknown dataset')
# These parameters are usually fixed
subtract_pixel_mean = True
version = FLAGS.version # Model version
n = 18 # n=5 for resnet-32 v1, n=18 for Resnet110 (according to README.md)
# Computed depth from supplied model parameter n
if version == 1:
depth = n * 6 + 2
feature_dim = 64
elif version == 2:
depth = n * 9 + 2
feature_dim = 256
if FLAGS.use_random == True:
name_random = '_random'
else:
name_random = ''
if FLAGS.use_leaky == True:
name_leaky = '_withleaky'
else:
name_leaky = ''
if FLAGS.use_dense == True:
name_dense = ''
else:
name_dense = '_nodense'
# Load means in MMLDA
kernel_dict = loadmat(
'case_studies/mmt/kernel_paras/meanvar1_featuredim' + str(
feature_dim) + '_class' + str(
num_class) + name_random + '.mat')
mean_logits = kernel_dict['mean_logits'] # num_class X num_dense
mean_logits = FLAGS.mean_var * tf.constant(mean_logits, dtype=tf.float32)
# Load the data.
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# clip_min = 0.0
# clip_max = 1.0
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0, keepdims=True)
# x_train -= x_train_mean
# x_test -= x_train_mean
# clip_min -= np.max(x_train_mean)
# clip_max -= np.min(x_train_mean)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_class)
y_test = keras.utils.to_categorical(y_test, num_class)
# Define input TF placeholder
y_place = tf.placeholder(tf.float32, shape=(None, num_class))
sess = tf.Session()
keras.backend.set_session(sess)
model_input = Input(shape=input_shape)
if subtract_pixel_mean:
normalized_model_input = Lambda(lambda x: x - x_train_mean)(model_input)
else:
normalized_model_input = model_input
# preprocessed_input =
# dim of logtis is batchsize x dim_means
if version == 2:
original_model, _, _, _, final_features = resnet_v2(
immediate_input=normalized_model_input, input=model_input, depth=depth,
num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense,
use_leaky=FLAGS.use_leaky)
else:
original_model, _, _, _, final_features = resnet_v1(
immediate_input=normalized_model_input, input=model_input, depth=depth,
num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense,
use_leaky=FLAGS.use_leaky)
if FLAGS.use_BN == True:
BN_name = '_withBN'
print('Use BN in the model')
else:
BN_name = '_noBN'
print('Do not use BN in the model')
# Whether use target attack for adversarial training
if FLAGS.use_target == False:
is_target = ''
else:
is_target = 'target'
if FLAGS.use_advtrain == True:
dirr = 'advtrained_models/' + FLAGS.dataset + '/'
attack_method_for_advtrain = '_' + is_target + FLAGS.attack_method_for_advtrain
adv_ratio_name = '_advratio' + str(FLAGS.adv_ratio)
mean_var = int(FLAGS.mean_var)
else:
dirr = 'trained_models/' + FLAGS.dataset + '/'
attack_method_for_advtrain = ''
adv_ratio_name = ''
mean_var = FLAGS.mean_var
if FLAGS.use_MMLDA == True:
print('Using MMLDA')
new_layer = Lambda(partial(MMLDA_layer, means=mean_logits,
num_class=num_class, use_ball=FLAGS.use_ball))
predictions = new_layer(final_features)
model = Model(input=model_input, output=predictions)
else:
print('Using softmax loss')
model = original_model
model.load_weights(FLAGS.checkpoint)
return (model, model_input, final_features, mean_logits), \
x_place, y_place, sess, (x_test, y_test)
def run_attack(m, l, kwargs, preds, x_adv, x_ph, y_ph, sess):
del kwargs
del m
for x, y in l:
x = x.numpy().transpose(0, 2, 3, 1)
y = y.numpy()
y_oh = keras.utils.to_categorical(y, 2)
x_adv_np, logits = sess.run((x_adv, preds), {x_ph: x, y_ph: y_oh})
y_pred = logits.argmax(-1)
print(logits)
is_adv = y_pred != y
x_adv_np = x_adv_np.transpose((0, 3, 1, 2))
return is_adv, (torch.tensor(x_adv_np, dtype=torch.float32),
torch.tensor(logits, dtype=torch.float32)
)
def train_classifier(
n_features: int,
train_loader: DataLoader,
raw_train_loader: DataLoader,
logits: torch.Tensor,
device: str,
rescale_logits: LogitRescalingType,
linear_layer,
clean_preds,
x_ph,
sess,
binarized_model_wrapper
):
# del raw_train_loader
assert rescale_logits is None
cls = _train_logistic_regression_classifier(
n_features,
train_loader,
logits if logits is not None else None,
"sklearn",
20000,
device,
n_classes=2,
rescale_logits=rescale_logits,
solution_goodness="perfect",
class_weight={0: 1, 1:5}
)
clw = cls.weight.data.detach().numpy()
clb = cls.bias.data.detach().numpy()
# since the first two MMT weights look roughly like this:
# 1: (10, 0, ..., 0)
# 2: (-1, 9.9, 0, ..., 0)
# we can easily construct a weight matrix that remaps the feature space to
# these two vectors
nw = np.zeros((256, 256))
nb = np.zeros(256)
nw[:2] = clw
nb[:2] = clb
linear_layer.set_weights((nw.T, nb))
# now test
n_correct_inner = 0
n_correct_outer = 0
n_total_inner = 0
n_total_outer = 0
for x, y in raw_train_loader:
x = x.numpy().transpose((0, 2, 3, 1))
y = y.numpy()
logits = sess.run(clean_preds, {x_ph: x})
y_pred = logits.argmax(-1)
is_correct = y_pred == y
n_correct_inner += is_correct[y == 0].sum()
n_correct_outer += is_correct[y == 1].sum()
n_total_inner += (y == 0).sum()
n_total_outer += (y == 1).sum()
accuracy_inner = n_correct_inner / n_total_inner
accuracy_outer = n_correct_outer / n_total_outer
if accuracy_outer != 1.0:
raise RuntimeError(f"Solver failed to find solution that perfectly detects boundary samples {accuracy_outer}")
if accuracy_inner == 0:
raise RuntimeError(f"Solver failed to find solution that detects (at least some) inner samples {accuracy_inner}")
return binarized_model_wrapper
def setup_binarized_model(sess, model_input, final_features, mean_logits):
assert FLAGS.use_ball
# means_ph = tf.placeholder(tf.float32, shape=[2, mean_logits.shape[1]])
# means = tf.Variable(np.zeros([2, mean_logits.shape[1]], dtype=np.float32),
# name="binarized_model_means")
# set_means = means.assign(means_ph)
new_layer = Lambda(partial(MMLDA_layer, means=mean_logits[:2],
num_class=2, use_ball=FLAGS.use_ball,
normalize_output_for_ball=False))
linear_layer = Dense(256)
transformed_features = linear_layer(final_features)
predictions = new_layer(transformed_features)
model = Model(input=model_input, output=predictions)
# will be used by binarization test to eval the model
binarized_model_wrapper = BinarizedModelWrapper(model_input, predictions, sess)
return model, linear_layer, binarized_model_wrapper
class BinarizedModelWrapper:
def __init__(self, input, output, sess):
self.input = input
self.output = output
self.sess = sess
def __call__(self, x):
return_torch = False
if isinstance(x, torch.Tensor):
x = x.cpu().numpy()
return_torch = True
if isinstance(x, np.ndarray):
if x.shape[1] == 3:
x = x.transpose(0, 2, 3, 1)
out = self.sess.run(self.output, {self.input: x})
if return_torch:
out = torch.tensor(out, dtype=torch.float32)
return out
def run_test():
(model, model_input, final_features, mean_logits), \
x_place, y_place, sess, (x_test, y_test) = \
setup_model_and_load_data()
del y_place
y_place = tf.placeholder(tf.float32, shape=(None, 2))
binarized_model, linear_layer, binarized_model_wrapper = \
setup_binarized_model(
sess,
model_input,
final_features,
mean_logits)
bin_clean_preds = binarized_model(x_place)
clean_preds = model(x_place)
wrap_ensemble = KerasModelWrapper(binarized_model, num_class=2, binarized_model=True)
# Initialize the attack method
if FLAGS.attack_method == 'MadryEtAl':
att = attacks.MadryEtAl(wrap_ensemble)
elif FLAGS.attack_method == 'FastGradientMethod':
att = attacks.FastGradientMethod(wrap_ensemble)
elif FLAGS.attack_method == 'MomentumIterativeMethod':
att = attacks.MomentumIterativeMethod(wrap_ensemble)
elif FLAGS.attack_method == 'BasicIterativeMethod':
att = attacks.BasicIterativeMethod(wrap_ensemble)
elif FLAGS.attack_method == "Adaptive":
from adaptive_attack import FeatureSpaceProjectedGradientDescent
att = FeatureSpaceProjectedGradientDescent(wrap_ensemble, logit_means=mean_logits,
projection="l2")
# Consider the attack to be constant
eval_par = {'batch_size': FLAGS.batch_size}
# TODO: shouldn't this be a 255?
eps_ = FLAGS.epsilon / 256.0
print("Epsilon:", eps_)
y_target = None
if FLAGS.attack_method == 'FastGradientMethod':
att_params = {
'eps': eps_,
'clip_min': 0,
'clip_max': 1,
'y_target': y_target
}
else:
att_params = {
'eps': eps_,
# 'eps_iter': eps_*1.0/FLAGS.num_iter,
# 'eps_iter': 3.*eps_/FLAGS.num_iter,
'eps_iter': 2. / 256.,
'clip_min': 0,
'clip_max': 1,
'nb_iter': FLAGS.num_iter,
'y_target': y_target
}
if FLAGS.attack_method == "Adaptive":
att_params["y"] = y_place
att_params['eps_iter'] = 0.03 / 256.
print("att_params", att_params)
if FLAGS.attack_method != "Adaptive":
import cleverhans.attacks
from fgm_patched import fgm_patched
cleverhans.attacks.fgm = fgm_patched
print("patched fgm function")
adv_x = tf.stop_gradient(att.generate(x_place, **att_params))
bin_adv_preds = binarized_model(adv_x)
def _model_forward_pass(x_np, features_only=False, features_and_logits=False):
x_np = np.transpose(x_np, (0, 2, 3, 1))
if features_only:
f = sess.run(final_features, {model_input: x_np})
return f
elif features_and_logits:
f, l = sess.run((final_features,
clean_preds), {model_input: x_np})
f = np.stack(f, 1)
return f, l
else:
l = sess.run(clean_preds, {model_input: x_np})
return l
feature_extractor = TensorFlow1ToPyTorchWrapper(
logit_forward_pass=_model_forward_pass,
logit_forward_and_backward_pass=None
)
test_loader = build_dataloader_from_arrays(x_test.transpose((0, 3, 1, 2)),
y_test,
batch_size=FLAGS.batch_size)
from argparse_utils import DecisionBoundaryBinarizationSettings
scores_logit_differences_and_validation_accuracies = \
interior_boundary_discrimination_attack(
feature_extractor,
test_loader,
attack_fn=partial(run_attack, preds=bin_adv_preds, sess=sess, x_ph=x_place,
y_ph=y_place, x_adv=adv_x),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=eps_,
norm="linf",
lr=10000,
n_boundary_points=FLAGS.n_boundary_points,
n_inner_points=FLAGS.n_inner_points,
adversarial_attack_settings=None,
optimizer="sklearn"
),
n_samples=FLAGS.n_samples,
device="cpu",
batch_size=FLAGS.batch_size,
n_samples_evaluation=200,
n_samples_asr_evaluation=200,
train_classifier_fn=partial(
train_classifier,
linear_layer=linear_layer,
clean_preds=bin_clean_preds,
x_ph=x_place,
sess=sess,
binarized_model_wrapper=binarized_model_wrapper
),
fail_on_exception=False,
rescale_logits=None,
sample_training_data_from_corners=FLAGS.sample_from_corners,
# decision_boundary_closeness=0.9999,
)
print(format_result(scores_logit_differences_and_validation_accuracies,
FLAGS.n_samples))
if __name__ == "__main__":
main()
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten, Lambda
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import mnist, cifar10, cifar100
import tensorflow as tf
import numpy as np
import os
from scipy.io import loadmat
import math
from utils.model import resnet_v1, resnet_v2
from utils.keras_wraper_ensemble import KerasModelWrapper
import cleverhans.attacks as attacks
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 50, '')
tf.app.flags.DEFINE_integer('mean_var', 10, 'parameter in MMLDA')
tf.app.flags.DEFINE_string('optimizer', 'mom', '')
tf.app.flags.DEFINE_integer('version', 2, '')
tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax loss')
tf.app.flags.DEFINE_float('adv_ratio', 1.0, 'the ratio of adversarial examples in each mini-batch')
tf.app.flags.DEFINE_string('attack_method', 'MadryEtAl', 'the attack used to craft adversarial examples for adv training')
tf.app.flags.DEFINE_bool('use_target', False, 'whether use target attack or untarget attack for adversarial training')
tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
tf.app.flags.DEFINE_string('dataset', 'mnist', '')
# Load the dataset
if FLAGS.dataset=='mnist':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
epochs = 50
num_class = 10
epochs_inter = [30,40]
elif FLAGS.dataset=='cifar10':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
epochs = 200
num_class = 10
epochs_inter = [100,150]
elif FLAGS.dataset=='cifar100':
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
epochs = 200
num_class = 100
epochs_inter = [100,150]
else:
print('Unknown dataset')
# These parameters are usually fixed
subtract_pixel_mean = True
version = FLAGS.version # Model version
n = 5 # n=5 for resnet-32 v1
# Computed depth from supplied model parameter n
if version == 1:
depth = n * 6 + 2
feature_dim = 64
elif version == 2:
depth = n * 9 + 2
feature_dim = 256
if FLAGS.use_random==True:
name_random = '_random'
else:
name_random = ''
#Load means in MMLDA
kernel_dict = loadmat('kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+name_random+'.mat')
mean_logits = kernel_dict['mean_logits'] #num_class X num_dense
mean_logits = FLAGS.mean_var * tf.constant(mean_logits,dtype=tf.float32)
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
clip_min = 0.0
clip_max = 1.0
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
clip_min -= x_train_mean
clip_max -= x_train_mean
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_class)
y_test = keras.utils.to_categorical(y_test, num_class)
def dot_loss(y_true, y_pred):
return - tf.reduce_sum(y_pred * y_true, axis=-1) #batch_size X 1
#MMLDA prediction function
def MMLDA_layer(x, means=mean_logits, num_class=num_class, use_ball=FLAGS.use_ball):
#x_shape = batch_size X num_dense
x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
if use_ball==True:
return logits
else:
logits = logits - tf.reduce_max(logits, axis=-1, keepdims=True) #Avoid numerical rounding
logits = logits - tf.log(tf.reduce_sum(tf.exp(logits), axis=-1, keepdims=True)) #Avoid numerical rounding
return logits
def lr_schedule(epoch):
lr = FLAGS.lr
if epoch > epochs_inter[1]:
lr *= 1e-2
elif epoch > epochs_inter[0]:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
model_input = Input(shape=input_shape)
#dim of logtis is batchsize x dim_means
if version == 2:
original_model,_,_,_,final_features = resnet_v2(input=model_input, depth=depth, num_classes=num_class, use_BN=FLAGS.use_BN)
else:
original_model,_,_,_,final_features = resnet_v1(input=model_input, depth=depth, num_classes=num_class, use_BN=FLAGS.use_BN)
if FLAGS.use_BN==True:
BN_name = '_withBN'
print('Use BN in the model')
else:
BN_name = '_noBN'
print('Do not use BN in the model')
#Whether use target attack for adversarial training
if FLAGS.use_target==False:
is_target = ''
y_target = None
else:
is_target = 'target'
y_target = tf.multinomial(tf.ones((FLAGS.batch_size,num_class)),1) #batch_size x 1
y_target = tf.one_hot(tf.reshape(y_target,(FLAGS.batch_size,)),num_class) #batch_size x num_class
if FLAGS.use_MMLDA==True:
print('Using MMT Training Scheme')
new_layer = Lambda(MMLDA_layer)
predictions = new_layer(final_features)
model = Model(input=model_input, output=predictions)
use_ball_=''
train_loss = dot_loss
if FLAGS.use_ball==False:
print('Using softmax function (MMLDA)')
use_ball_='_softmax'
filepath_dir = 'advtrained_models/'+FLAGS.dataset+'/resnet32v'+str(version)+'_meanvar'+str(FLAGS.mean_var) \
+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size) \
+'_'+is_target+FLAGS.attack_method \
+'_advratio'+str(FLAGS.adv_ratio)+BN_name+name_random \
+use_ball_
else:
print('Using softmax loss')
model = original_model
train_loss = keras.losses.categorical_crossentropy
filepath_dir = 'advtrained_models/'+FLAGS.dataset+'/resnet32v'+str(version)+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size)+'_'+is_target+FLAGS.attack_method+'_advratio'+str(FLAGS.adv_ratio)+BN_name
wrap_ensemble = KerasModelWrapper(model, num_class=num_class)
eps = 8. / 256.
if FLAGS.attack_method == 'MadryEtAl':
print('apply '+is_target+'PGD'+' for advtrain')
att = attacks.MadryEtAl(wrap_ensemble)
att_params = {
'eps': eps,
#'eps_iter': 3.*eps/10.,
'eps_iter': 2. / 256.,
'clip_min': clip_min,
'clip_max': clip_max,
'nb_iter': 10,
'y_target': y_target
}
elif FLAGS.attack_method == 'MomentumIterativeMethod':
print('apply '+is_target+'MIM'+' for advtrain')
att = attacks.MomentumIterativeMethod(wrap_ensemble)
att_params = {
'eps': eps,
#'eps_iter': 3.*eps/10.,
'eps_iter': 2. / 256.,
'clip_min': clip_min,
'clip_max': clip_max,
'nb_iter': 10,
'y_target': y_target
}
elif FLAGS.attack_method == 'FastGradientMethod':
print('apply '+is_target+'FGSM'+' for advtrain')
att = attacks.FastGradientMethod(wrap_ensemble)
att_params = {'eps': eps,
'clip_min': clip_min,
'clip_max': clip_max,
'y_target': y_target}
adv_x = tf.stop_gradient(att.generate(model_input, **att_params))
adv_output = model(adv_x)
normal_output = model(model_input)
def adv_train_loss(_y_true, _y_pred):
return (1-FLAGS.adv_ratio) * train_loss(_y_true, normal_output) + FLAGS.adv_ratio * train_loss(_y_true, adv_output)
if FLAGS.optimizer=='Adam':
model.compile(
loss=adv_train_loss,
optimizer=Adam(lr=lr_schedule(0)),
metrics=['accuracy'])
print('Using Adam optimizer')
elif FLAGS.optimizer=='mom':
model.compile(
loss=adv_train_loss,
optimizer=SGD(lr=lr_schedule(0), momentum=0.9),
metrics=['accuracy'])
print('Using momentum optimizer')
model.summary()
# Prepare model model saving directory.
save_dir = os.path.join(os.getcwd(), filepath_dir)
model_name = 'model.{epoch:03d}.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
# Prepare callbacks for model saving and for learning rate adjustment.
checkpoint = ModelCheckpoint(
filepath=filepath, monitor='val_loss', mode='min', verbose=2, save_best_only=False, save_weights_only=True, period=5)
lr_scheduler = LearningRateScheduler(lr_schedule)
callbacks = [checkpoint, lr_scheduler]
# Run training, with data augmentation.
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# epsilon for ZCA whitening
zca_epsilon=1e-06,
# randomly shift images horizontally
width_shift_range=0.1,
# randomly shift images vertically
height_shift_range=0.1,
# set mode for filling points outside the input boundaries
fill_mode='nearest',
# randomly flip images
horizontal_flip=True)
# Compute quantities required for featurewise normalization
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(
datagen.flow(x_train, y_train, batch_size=FLAGS.batch_size),
validation_data=(x_test, y_test),
epochs=epochs,
verbose=2,
workers=4,
callbacks=callbacks)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten, Lambda
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import mnist, cifar10, cifar100
import tensorflow as tf
import numpy as np
import os
from scipy.io import loadmat
import math
from utils.model import resnet_v1, resnet_v2
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 64, '')
tf.app.flags.DEFINE_float('mean_var', 10, 'parameter in MMLDA')
tf.app.flags.DEFINE_string('optimizer', 'mom', '')
tf.app.flags.DEFINE_integer('version', 1, '')
tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
tf.app.flags.DEFINE_integer('feature_dim', 256, '')
tf.app.flags.DEFINE_bool('is_2d_demo', False, 'whether is a 2d demo on MNIST')
tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax')
tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
tf.app.flags.DEFINE_bool('use_dense', True, 'whether use extra dense layer in the network')
tf.app.flags.DEFINE_bool('use_leaky', False, 'whether use leaky relu in the network')
tf.app.flags.DEFINE_string('dataset', 'mnist', '')
random_seed = '' # '' or '2' or '3'
# Load the dataset
if FLAGS.dataset=='mnist':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.repeat(np.expand_dims(x_train, axis=3), 3, axis=3)
x_test = np.repeat(np.expand_dims(x_test, axis=3), 3, axis=3)
epochs = 50
num_class = 10
epochs_inter = [30,40]
elif FLAGS.dataset=='cifar10':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
epochs = 200
num_class = 10
epochs_inter = [100,150]
elif FLAGS.dataset=='cifar100':
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
epochs = 200
num_class = 100
epochs_inter = [100,150]
else:
print('Unknown dataset')
# Training parameters
subtract_pixel_mean = True
version = FLAGS.version # Model version
n = 5 # n=5 for resnet-32 v1
# Computed depth from supplied model parameter n
if version == 1:
depth = n * 6 + 2
feature_dim = 64
elif version == 2:
depth = n * 9 + 2
feature_dim = FLAGS.feature_dim
if FLAGS.use_random==True:
name_random = '_random'
random_seed = '2'
else:
name_random = ''
if FLAGS.use_leaky==True:
name_leaky = '_withleaky'
else:
name_leaky = ''
if FLAGS.use_dense==True:
name_dense = ''
else:
name_dense = '_nodense'
if FLAGS.is_2d_demo==True:
is_2d_demo = '_demoMNIST'
else:
is_2d_demo = ''
#Load centers in MMC
kernel_dict = loadmat('kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+'.mat')
mean_logits_np = kernel_dict['mean_logits'] #num_class X num_dense
mean_logits = FLAGS.mean_var * tf.constant(mean_logits_np,dtype=tf.float32)
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_class)
y_test = keras.utils.to_categorical(y_test, num_class)
def dot_loss(y_true, y_pred):
return - tf.reduce_sum(y_pred * y_true, axis=-1) #batch_size X 1
#MMLDA prediction function
def MMLDA_layer(x, means=mean_logits, num_class=num_class, use_ball=FLAGS.use_ball):
#x_shape = batch_size X num_dense
x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
if use_ball==True:
return logits
else:
logits = logits - tf.reduce_max(logits, axis=-1, keepdims=True) #Avoid numerical rounding
logits = logits - tf.log(tf.reduce_sum(tf.exp(logits), axis=-1, keepdims=True)) #Avoid numerical rounding
return logits
def lr_schedule(epoch):
lr = FLAGS.lr
if epoch > epochs_inter[1]:
lr *= 1e-2
elif epoch > epochs_inter[0]:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
model_input = Input(shape=input_shape)
#dim of logtis is batchsize x dim_means
if version == 2:
original_model,_,_,_,final_features = resnet_v2(input=model_input, depth=depth, num_classes=num_class, num_dims=feature_dim, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
else:
original_model,_,_,_,final_features = resnet_v1(input=model_input, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
if FLAGS.use_BN==True:
BN_name = '_withBN'
print('Use BN in the model')
else:
BN_name = '_noBN'
print('Do not use BN in the model')
if FLAGS.use_MMLDA==True:
print('Using MM Training Scheme')
new_layer = Lambda(MMLDA_layer)
predictions = new_layer(final_features)
model = Model(input=model_input, output=predictions)
use_ball_=''
train_loss = dot_loss
if FLAGS.use_ball==False:
print('Using softmax function (MMLDA)')
use_ball_='_softmax'
filepath_dir = 'trained_models/'+FLAGS.dataset+'/resnet32v'+str(version)+'_meanvar'+str(FLAGS.mean_var) \
+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size) \
+BN_name+name_leaky+name_dense+name_random+random_seed+use_ball_+is_2d_demo
else:
print('Using softmax loss')
model = original_model
train_loss = keras.losses.categorical_crossentropy
filepath_dir = 'trained_models/'+FLAGS.dataset+'/resnet32v'+str(version)+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size)+BN_name+name_leaky
if FLAGS.optimizer=='Adam':
model.compile(
loss=train_loss,
optimizer=Adam(lr=lr_schedule(0)),
metrics=['accuracy'])
elif FLAGS.optimizer=='mom':
model.compile(
loss=train_loss,
optimizer=SGD(lr=lr_schedule(0), momentum=0.9),
metrics=['accuracy'])
model.summary()
# Prepare model model saving directory.
save_dir = os.path.join(os.getcwd(), filepath_dir)
model_name = 'model.{epoch:03d}.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
# Prepare callbacks for model saving and for learning rate adjustment.
checkpoint = ModelCheckpoint(
filepath=filepath, monitor='val_loss', mode='min', verbose=2, save_best_only=False, save_weights_only=True, period=5)
lr_scheduler = LearningRateScheduler(lr_schedule)
callbacks = [checkpoint, lr_scheduler]
# Run training, with data augmentation.
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# epsilon for ZCA whitening
zca_epsilon=1e-06,
# randomly shift images horizontally
width_shift_range=0.1,
# randomly shift images vertically
height_shift_range=0.1,
# set mode for filling points outside the input boundaries
fill_mode='nearest',
# randomly flip images
horizontal_flip=True)
# Compute quantities required for featurewise normalization
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(
datagen.flow(x_train, y_train, batch_size=FLAGS.batch_size),
validation_data=(x_test, y_test),
epochs=epochs,
verbose=2,
workers=4,
callbacks=callbacks)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cleverhans.attacks import optimize_linear
from cleverhans import utils_tf
import numpy as np
import tensorflow as tf
def fgm_patched(x,
logits,
y=None,
eps=0.3,
ord=np.inf,
clip_min=None,
clip_max=None,
targeted=False,
sanity_checks=True):
"""
TensorFlow implementation of the Fast Gradient Method.
:param x: the input placeholder
:param logits: output of model.get_logits
:param y: (optional) A placeholder for the true labels. If targeted
is true, then provide the target label. Otherwise, only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param eps: the epsilon (input variation parameter)
:param ord: (optional) Order of the norm (mimics NumPy).
Possible values: np.inf, 1 or 2.
:param clip_min: Minimum float value for adversarial example components
:param clip_max: Maximum float value for adversarial example components
:param targeted: Is the attack targeted or untargeted? Untargeted, the
default, will try to make the label incorrect. Targeted
will instead try to move in the direction of being more
like y.
:return: a tensor for the adversarial example
"""
asserts = []
# If a data range was specified, check that the input was in that range
if clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(
x, tf.cast(clip_min, x.dtype)))
if clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x, tf.cast(clip_max, x.dtype)))
# Make sure the caller has not passed probs by accident
assert logits.op.type != 'Softmax'
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = tf.reduce_max(logits, 1, keepdims=True)
y = tf.to_float(tf.equal(logits, preds_max))
y = tf.stop_gradient(y)
y = y / tf.reduce_sum(y, 1, keepdims=True)
# Compute loss
from cleverhans.compat import softmax_cross_entropy_with_logits
#loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = -tf.reduce_sum(logits * y, axis=-1)
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x)
optimal_perturbation = optimize_linear(grad, eps, ord)
# Add perturbation to original example to obtain adversarial example
adv_x = x + optimal_perturbation
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
if sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
# adv_x = tf.Print(adv_x, [loss, logits, y, grad])
return adv_x
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten, Lambda
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import mnist, cifar10, cifar100
import tensorflow as tf
import numpy as np
import os
from scipy.io import loadmat
import math
from mmt_utils.model import resnet_v1, resnet_v2
import cleverhans.attacks as attacks
from cleverhans.utils_tf import model_eval
from mmt_utils.keras_wraper_ensemble import KerasModelWrapper
from mmt_utils.utils_model_eval import model_eval_targetacc
from sklearn.metrics import roc_auc_score
from fgm_patched import fgm_patched
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 50, 'batch_size for attack')
tf.app.flags.DEFINE_string('optimizer', 'mom', '')
tf.app.flags.DEFINE_float('mean_var', 10, 'parameter in MMLDA')
tf.app.flags.DEFINE_string('attack_method', 'FastGradientMethod', '')
tf.app.flags.DEFINE_string('attack_method_for_advtrain', 'FastGradientMethod', '')
tf.app.flags.DEFINE_integer('version', 2, '')
tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
tf.app.flags.DEFINE_bool('target', True, 'is target attack or not')
tf.app.flags.DEFINE_bool('use_target', False, 'whether use target attack or untarget attack for adversarial training')
tf.app.flags.DEFINE_integer('num_iter', 10, '')
tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax')
tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
tf.app.flags.DEFINE_bool('use_advtrain', True, 'whether use advtraining or normal training')
tf.app.flags.DEFINE_float('adv_ratio', 1.0, 'the ratio of adversarial examples in each mini-batch')
tf.app.flags.DEFINE_integer('epoch', 1, 'the epoch of model to load')
tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
tf.app.flags.DEFINE_string('dataset', 'mnist', '')
tf.app.flags.DEFINE_bool('normalize_output_for_ball', True, 'whether apply softmax in the inference phase')
tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
tf.app.flags.DEFINE_bool('use_dense', True, 'whether use extra dense layer in the network')
tf.app.flags.DEFINE_bool('use_leaky', False, 'whether use leaky relu in the network')
tf.app.flags.DEFINE_integer('n_samples', 512, '')
tf.app.flags.DEFINE_string('checkpoint', None, '')
# For calculate AUC-scores
tf.app.flags.DEFINE_bool('is_calculate_auc', False, 'whether to calculate auc scores')
tf.app.flags.DEFINE_bool('is_auc_metric_softmax_for_MMC', False, 'whether use softmax to calculate auc metrics for MMC')
# Load the dataset
if FLAGS.dataset=='mnist':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
epochs = 50
num_class = 10
epochs_inter = [30,40]
x_place = tf.placeholder(tf.float32, shape=(None, 28, 28, 3))
elif FLAGS.dataset=='cifar10':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
epochs = 200
num_class = 10
epochs_inter = [100,150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
elif FLAGS.dataset=='cifar100':
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
epochs = 200
num_class = 100
epochs_inter = [100,150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
else:
print('Unknown dataset')
# These parameters are usually fixed
subtract_pixel_mean = True
version = FLAGS.version # Model version
n = 18 # n=5 for resnet-32 v1, n=18 for Resnet110 (according to README.md)
# Computed depth from supplied model parameter n
if version == 1:
depth = n * 6 + 2
feature_dim = 64
elif version == 2:
depth = n * 9 + 2
feature_dim = 256
if FLAGS.use_random==True:
name_random = '_random'
else:
name_random = ''
if FLAGS.use_leaky==True:
name_leaky = '_withleaky'
else:
name_leaky = ''
if FLAGS.use_dense==True:
name_dense = ''
else:
name_dense = '_nodense'
#Load means in MMLDA
kernel_dict = loadmat('case_studies/mmt/kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+name_random+'.mat')
mean_logits = kernel_dict['mean_logits'] #num_class X num_dense
mean_logits = FLAGS.mean_var * tf.constant(mean_logits,dtype=tf.float32)
#MMLDA prediction function
def MMLDA_layer(x, means=mean_logits, num_class=num_class, use_ball=FLAGS.use_ball):
#x_shape = batch_size X num_dense
x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
if use_ball==True:
if FLAGS.normalize_output_for_ball==False:
return logits
else:
return tf.nn.softmax(logits, axis=-1)
else:
return tf.nn.softmax(logits, axis=-1)
# Load the data.
y_test_target = np.zeros_like(y_test)
for i in range(y_test.shape[0]):
l = np.random.randint(num_class)
while l == y_test[i][0]:
l = np.random.randint(num_class)
y_test_target[i][0] = l
print('Finish crafting y_test_target!!!!!!!!!!!')
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
clip_min = 0.0
clip_max = 1.0
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
clip_min -= np.max(x_train_mean)
clip_max -= np.min(x_train_mean)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_class)
y_test = keras.utils.to_categorical(y_test, num_class)
y_test_target = keras.utils.to_categorical(y_test_target, num_class)
# Define input TF placeholder
y_place = tf.placeholder(tf.float32, shape=(None, num_class))
y_target = tf.placeholder(tf.float32, shape=(None, num_class))
sess = tf.Session()
keras.backend.set_session(sess)
model_input = Input(shape=input_shape)
#dim of logtis is batchsize x dim_means
if version == 2:
original_model,_,_,_,final_features = resnet_v2(immediate_input=model_input, input=model_input, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
else:
original_model,_,_,_,final_features = resnet_v1(immediate_input=model_input, input=model_input, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
print("depth", depth)
print("#layers", len(original_model.layers))
if FLAGS.use_BN==True:
BN_name = '_withBN'
print('Use BN in the model')
else:
BN_name = '_noBN'
print('Do not use BN in the model')
#Whether use target attack for adversarial training
if FLAGS.use_target==False:
is_target = ''
else:
is_target = 'target'
if FLAGS.use_advtrain==True:
dirr = 'advtrained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain = '_'+is_target+FLAGS.attack_method_for_advtrain
adv_ratio_name = '_advratio'+str(FLAGS.adv_ratio)
mean_var = int(FLAGS.mean_var)
else:
dirr = 'trained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain = ''
adv_ratio_name = ''
mean_var = FLAGS.mean_var
if FLAGS.use_MMLDA==True:
print('Using MMLDA')
new_layer = Lambda(MMLDA_layer)
predictions = new_layer(final_features)
model = Model(input=model_input, output=predictions)
use_ball_=''
if FLAGS.use_ball==False:
print('Using softmax function')
use_ball_='_softmax'
filepath_dir = dirr+'resnet32v'+str(version)+'_meanvar'+str(mean_var) \
+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size) \
+attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+name_dense+name_random+use_ball_+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
else:
print('Using softmax loss')
model = original_model
filepath_dir = dirr+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
print(filepath_dir)
model.load_weights(FLAGS.checkpoint)
# TODO: ch_compatibility_mode needs to be set to true since the
# authors of the model messed logits and predictions up which accidentially
# worked with ch2.1.0 but not with later version: setting this flag modifies
# the model so that one can reproduce their results with later ch versions.
wrap_ensemble = KerasModelWrapper(model, num_class=num_class, binarized_model=False,
ch_compatibility_mode=True)
#model.load_weights(filepath_dir)
# Initialize the attack method
if FLAGS.attack_method == 'MadryEtAl':
att = attacks.MadryEtAl(wrap_ensemble)
elif FLAGS.attack_method == 'FastGradientMethod':
att = attacks.FastGradientMethod(wrap_ensemble)
elif FLAGS.attack_method == 'MomentumIterativeMethod':
att = attacks.MomentumIterativeMethod(wrap_ensemble)
elif FLAGS.attack_method == 'BasicIterativeMethod':
att = attacks.BasicIterativeMethod(wrap_ensemble)
elif FLAGS.attack_method == "Adaptive":
from adaptive_attack import FeatureSpaceProjectedGradientDescent
att = FeatureSpaceProjectedGradientDescent(wrap_ensemble, logit_means=mean_logits,
max_steps=600, projection="linf")
# Consider the attack to be constant
eval_par = {'batch_size': FLAGS.batch_size}
if FLAGS.n_samples != -1:
print(f"Using {FLAGS.n_samples} random test samples")
inds = np.arange(len(x_test))
np.random.shuffle(inds)
inds = inds[:FLAGS.n_samples]
x_test, y_test = x_test[inds], y_test[inds]
if FLAGS.is_calculate_auc:
# Calculate model preds for clean inputs
avg_score_nor = np.array([])
nor_indicator = np.ones((1000,), dtype=int)
for i in range(10):
avg_score_nor_batch = sess.run(tf.reduce_max(model(x_place),axis=-1), feed_dict={x_place:x_test[i*100:(i+1)*100]})
avg_score_nor = np.concatenate((avg_score_nor, avg_score_nor_batch), axis=0)
print('Calculate score for nor images with batch', i)
# Calculate model preds for adv inputs
eps_ = 8 / 256.0
if FLAGS.target==False:
y_target = None
if FLAGS.attack_method == 'FastGradientMethod':
att_params = {'eps': eps_,
'clip_min': clip_min,
'clip_max': clip_max,
'y_target': y_target}
else:
att_params = {'eps': eps_,
#'eps_iter': eps_*1.0/FLAGS.num_iter,
#'eps_iter': 3.*eps_/FLAGS.num_iter,
'eps_iter': 2. / 256.,
'clip_min': clip_min,
'clip_max': clip_max,
'nb_iter': FLAGS.num_iter,
'y_target': y_target}
if FLAGS.attack_method == "Adaptive":
att_params["y"] = y_place
att_params['eps_iter'] = 1 / 256.
print("clip_min", clip_min)
print("clip_max", clip_max)
adv_x = tf.stop_gradient(att.generate(x_place, **att_params))
preds = tf.reduce_max(model(adv_x),axis=-1)
if FLAGS.is_auc_metric_softmax_for_MMC==True:
preds = tf.reduce_max(tf.nn.softmax(model(adv_x)),axis=-1)
avg_score_adv = np.array([])
adv_indicator = np.zeros((1000,), dtype=int)
if FLAGS.target==True:
for i in range(10):
avg_score_adv_batch = sess.run(preds, feed_dict={x_place:x_test[i*100:(i+1)*100], y_target:y_test_target[i*100:(i+1)*100]})
avg_score_adv = np.concatenate((avg_score_adv, avg_score_adv_batch), axis=0)
print('Calculate score for target attack images with batch', i)
else:
for i in range(10):
avg_score_adv_batch = sess.run(preds, feed_dict={x_place:x_test[i*100:(i+1)*100]})
avg_score_adv = np.concatenate((avg_score_adv, avg_score_adv_batch), axis=0)
print('Calculate score for untarget attack images with batch', i)
score_all = np.concatenate((avg_score_nor,avg_score_adv), axis=0)
indicator_all = np.concatenate((nor_indicator,adv_indicator), axis=0)
print('AUC score is', roc_auc_score(indicator_all, score_all))
else:
clip_min = clip_min.item()
clip_max = clip_max.item()
for eps in range(4):
eps_ = (eps+1) * 8
print('eps is %d'%eps_)
eps_ = eps_ / 256.0
if FLAGS.target==False:
y_target = None
if FLAGS.attack_method == 'FastGradientMethod':
att_params = {'eps': eps_,
'clip_min': clip_min,
'clip_max': clip_max,
'y_target': y_target}
else:
att_params = {'eps': eps_,
#'eps_iter': eps_*1.0/FLAGS.num_iter,
#'eps_iter': 3.*eps_/FLAGS.num_iter,
'eps_iter': 2. / 256.,
'clip_min': clip_min,
'clip_max': clip_max,
'nb_iter': FLAGS.num_iter,
'y_target': y_target,
'y': y_place,
}
if FLAGS.attack_method == "Adaptive":
att_params["y"] = y_place
att_params['eps_iter'] = 0.5 / 256.
# debugging statements
print("att_params", att_params)
if FLAGS.attack_method != "Adaptive":
import cleverhans.attacks
cleverhans.attacks.fgm = fgm_patched
print("patched fgm function")
#clean_preds = model(x_place)
#ll = sess.run(clean_preds, {x_place: x_test[:16]})
#import pdb; pdb.set_trace()
adv_x = tf.stop_gradient(att.generate(x_place, **att_params))
preds = model(adv_x)
if FLAGS.target==False:
acc = model_eval(sess, x_place, y_place, preds, x_test, y_test, args=eval_par)
print('adv_acc: %.3f' %acc)
else:
acc = model_eval_targetacc(sess, x_place, y_place, y_target, preds, x_test, y_test, y_test_target, args=eval_par)
print('adv_acc_target: %.3f' %acc)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten, Lambda
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import mnist, cifar10, cifar100
import tensorflow as tf
import numpy as np
import os
from scipy.io import loadmat
import math
from utils.model import resnet_v1, resnet_v2
import cleverhans.attacks as attacks
from cleverhans.utils_tf import model_eval
from utils.keras_wraper_ensemble import KerasModelWrapper
from utils.utils_model_eval import model_eval_targetacc, model_eval_for_SPSA, model_eval_for_SPSA_targetacc
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 50, 'batch_size for attack')
tf.app.flags.DEFINE_string('optimizer', 'mom', '')
tf.app.flags.DEFINE_float('mean_var', 10, 'parameter in MMLDA')
tf.app.flags.DEFINE_string('attack_method', 'FastGradientMethod', '')
tf.app.flags.DEFINE_string('attack_method_for_advtrain', 'FastGradientMethod', '')
tf.app.flags.DEFINE_integer('version', 2, '')
tf.app.flags.DEFINE_float('lr', 0.01, 'initial lr')
tf.app.flags.DEFINE_bool('target', True, 'is target attack or not')
tf.app.flags.DEFINE_bool('use_target', False, 'whether use target attack or untarget attack for adversarial training')
tf.app.flags.DEFINE_bool('use_ball', True, 'whether use ball loss or softmax')
tf.app.flags.DEFINE_bool('use_MMLDA', True, 'whether use MMLDA or softmax')
tf.app.flags.DEFINE_bool('use_advtrain', True, 'whether use advtraining or normal training')
tf.app.flags.DEFINE_float('adv_ratio', 1.0, 'the ratio of adversarial examples in each mini-batch')
tf.app.flags.DEFINE_integer('epoch', 1, 'the epoch of model to load')
tf.app.flags.DEFINE_bool('use_BN', True, 'whether use batch normalization in the network')
tf.app.flags.DEFINE_string('dataset', 'mnist', '')
tf.app.flags.DEFINE_bool('normalize_output_for_ball', True, 'whether apply softmax in the inference phase')
tf.app.flags.DEFINE_bool('use_random', False, 'whether use random center or MMLDA center in the network')
tf.app.flags.DEFINE_bool('use_dense', True, 'whether use extra dense layer in the network')
tf.app.flags.DEFINE_bool('use_leaky', False, 'whether use leaky relu in the network')
tf.app.flags.DEFINE_float('CW_confidence', 1.0, 'the confidence for CW-L2 attacks')
tf.app.flags.DEFINE_float('SPSA_epsilon', 8, 'the eps for SPSA attacks in 256 pixel values')
# Load the dataset
if FLAGS.dataset=='mnist':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
epochs = 50
num_class = 10
epochs_inter = [30,40]
x_place = tf.placeholder(tf.float32, shape=(None, 28, 28, 3))
elif FLAGS.dataset=='cifar10':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
epochs = 200
num_class = 10
epochs_inter = [100,150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
elif FLAGS.dataset=='cifar100':
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
epochs = 200
num_class = 100
epochs_inter = [100,150]
x_place = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
else:
print('Unknown dataset')
# These parameters are usually fixed
subtract_pixel_mean = True
version = FLAGS.version # Model version
n = 5 # n=5 for resnet-32 v1
# Computed depth from supplied model parameter n
if version == 1:
depth = n * 6 + 2
feature_dim = 64
elif version == 2:
depth = n * 9 + 2
feature_dim = 256
if FLAGS.use_random==True:
name_random = '_random'
else:
name_random = ''
if FLAGS.use_leaky==True:
name_leaky = '_withleaky'
else:
name_leaky = ''
if FLAGS.use_dense==True:
name_dense = ''
else:
name_dense = '_nodense'
#Load means in MMLDA
kernel_dict = loadmat('kernel_paras/meanvar1_featuredim'+str(feature_dim)+'_class'+str(num_class)+name_random+'.mat')
mean_logits = kernel_dict['mean_logits'] #num_class X num_dense
mean_logits = FLAGS.mean_var * tf.constant(mean_logits,dtype=tf.float32)
#MMLDA prediction function
def MMLDA_layer(x, means=mean_logits, num_class=num_class, use_ball=FLAGS.use_ball):
#x_shape = batch_size X num_dense
x_expand = tf.tile(tf.expand_dims(x,axis=1),[1,num_class,1]) #batch_size X num_class X num_dense
mean_expand = tf.expand_dims(means,axis=0) #1 X num_class X num_dense
logits = -tf.reduce_sum(tf.square(x_expand - mean_expand), axis=-1) #batch_size X num_class
if use_ball==True:
if FLAGS.normalize_output_for_ball==False:
return logits
else:
return tf.nn.softmax(logits, axis=-1)
else:
return tf.nn.softmax(logits, axis=-1)
# Load the data.
y_test_target = np.zeros_like(y_test)
for i in range(y_test.shape[0]):
l = np.random.randint(num_class)
while l == y_test[i][0]:
l = np.random.randint(num_class)
y_test_target[i][0] = l
print('Finish crafting y_test_target!!!!!!!!!!!')
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
clip_min = 0.0
clip_max = 1.0
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
clip_min -= x_train_mean
clip_max -= x_train_mean
print (np.min(x_train_mean))
print (np.max(x_train_mean))
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_class)
y_test_index = np.squeeze(np.copy(y_test).astype('int32'))
y_test = keras.utils.to_categorical(y_test, num_class)
y_test_target_index = np.squeeze(np.copy(y_test_target).astype('int32'))
y_test_target = keras.utils.to_categorical(y_test_target, num_class)
# Define input TF placeholder
y_place = tf.placeholder(tf.float32, shape=(None, num_class))
y_target = tf.placeholder(tf.float32, shape=(None, num_class))
sess = tf.Session()
keras.backend.set_session(sess)
model_input = Input(shape=input_shape)
#dim of logtis is batchsize x dim_means
if version == 2:
original_model,_,_,_,final_features = resnet_v2(input=model_input, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
else:
original_model,_,_,_,final_features = resnet_v1(input=model_input, depth=depth, num_classes=num_class, \
use_BN=FLAGS.use_BN, use_dense=FLAGS.use_dense, use_leaky=FLAGS.use_leaky)
if FLAGS.use_BN==True:
BN_name = '_withBN'
print('Use BN in the model')
else:
BN_name = '_noBN'
print('Do not use BN in the model')
#Whether use target attack for adversarial training
if FLAGS.use_target==False:
is_target = ''
else:
is_target = 'target'
if FLAGS.use_advtrain==True:
dirr = 'advtrained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain = '_'+is_target+FLAGS.attack_method_for_advtrain
adv_ratio_name = '_advratio'+str(FLAGS.adv_ratio)
mean_var = int(FLAGS.mean_var)
else:
dirr = 'trained_models/'+FLAGS.dataset+'/'
attack_method_for_advtrain = ''
adv_ratio_name = ''
mean_var = FLAGS.mean_var
if FLAGS.use_MMLDA==True:
print('Using MMLDA')
new_layer = Lambda(MMLDA_layer)
predictions = new_layer(final_features)
model = Model(input=model_input, output=predictions)
use_ball_=''
if FLAGS.use_ball==False:
print('Using softmax function')
use_ball_='_softmax'
filepath_dir = dirr+'resnet32v'+str(version)+'_meanvar'+str(mean_var) \
+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size) \
+attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+name_dense+name_random+use_ball_+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
else:
print('Using softmax loss')
model = original_model
filepath_dir = dirr+'resnet32v'+str(version)+'_'+FLAGS.optimizer \
+'_lr'+str(FLAGS.lr) \
+'_batchsize'+str(FLAGS.batch_size)+attack_method_for_advtrain+adv_ratio_name+BN_name+name_leaky+'/' \
+'model.'+str(FLAGS.epoch).zfill(3)+'.h5'
wrap_ensemble = KerasModelWrapper(model, num_class=num_class)
model.load_weights(filepath_dir)
# Initialize the attack method
if FLAGS.attack_method == 'SaliencyMapMethod':
num_samples = 100
eval_par = {'batch_size': 1}
att = attacks.SaliencyMapMethod(wrap_ensemble, sess=sess)
att_params = {
'theta': 1.,
'gamma': 0.1,
'clip_min': clip_min,
'clip_max': clip_max,
}
adv_x = att.generate(x_place, **att_params)
elif FLAGS.attack_method == 'CarliniWagnerL2': #Update on 2019.3.29
num_samples = 500
eval_par = {'batch_size': 10}
att = attacks.CarliniWagnerL2(wrap_ensemble, sess=sess)
if FLAGS.target==False:
att_params = {
'batch_size': 10,
'confidence': FLAGS.CW_confidence,
'learning_rate': 5e-3,
'binary_search_steps': 9,
'max_iterations': 1000,
'initial_const': 0.01,
'abort_early': True,
'clip_min': clip_min,
'clip_max': clip_max
}
else:
att_params = {
'batch_size': 10,
'confidence': FLAGS.CW_confidence,
'y_target': y_target,
'learning_rate': 5e-3,
'binary_search_steps': 9,
'max_iterations': 1000,
'initial_const': 0.01,
'abort_early': True,
'clip_min': clip_min,
'clip_max': clip_max
}
if FLAGS.use_MMLDA == True and FLAGS.use_ball == True:
is_MMC = True
else:
is_MMC = False
adv_x = att.generate(x_place, is_MMC=is_MMC, **att_params)
elif FLAGS.attack_method == 'ElasticNetMethod':
num_samples = 1000
eval_par = {'batch_size': 100}
att = attacks.ElasticNetMethod(wrap_ensemble, sess=sess)
att_params = {
'batch_size': 100,
'confidence': 0.1,
'learning_rate': 0.01,
'binary_search_steps': 1,
'max_iterations': 1000,
'initial_const': 1.0,
'beta': 1e-2,
'fista': True,
'decision_rule': 'EN',
'clip_min': clip_min,
'clip_max': clip_max
}
adv_x = att.generate(x_place, **att_params)
elif FLAGS.attack_method == 'DeepFool':
num_samples = 1000
eval_par = {'batch_size': 1}
att = attacks.DeepFool(wrap_ensemble, sess=sess)
att_params = {
'max_iter': 10,
'clip_min': clip_min,
'clip_max': clip_max,
'nb_candidate': 1
}
adv_x = att.generate(x_place, **att_params)
elif FLAGS.attack_method == 'LBFGS':
num_samples = 1000
eval_par = {'batch_size': 1}
att = attacks.LBFGS(wrap_ensemble, sess=sess)
clip_min = np.mean(clip_min)
clip_max = np.mean(clip_max)
att_params = {
'y_target': y_target,
'batch_size': 1,
'binary_search_steps': 1,
'max_iterations': 100,
'initial_const': 1.0,
'clip_min': clip_min,
'clip_max': clip_max
}
adv_x = att.generate(x_place, **att_params)
elif FLAGS.attack_method == 'SPSA': #Update on 2019.3.29
num_samples = 1000
eval_par = {'batch_size': 1}
x = tf.placeholder(tf.float32, shape=(1, 32, 32, 3))
y_index = tf.placeholder(tf.uint8, shape=())
if FLAGS.target:
y_target_index = tf.placeholder(tf.uint8, shape=())
else:
y_target_index = None
att = attacks.SPSA(wrap_ensemble, sess=sess)
if FLAGS.use_MMLDA == True and FLAGS.use_ball == True:
is_MMC = True
else:
is_MMC = False
adv_x = att.generate(x, y_index, y_target=y_target_index, epsilon=FLAGS.SPSA_epsilon / 256., num_steps=10,
is_targeted=FLAGS.target, early_stop_loss_threshold=None,
learning_rate=0.01, delta=0.01, batch_size=128, spsa_iters=1,
is_debug=False, is_MMC=is_MMC)
preds = wrap_ensemble.get_probs(adv_x)
if FLAGS.attack_method == 'LBFGS':
print(model_eval_targetacc(
sess,
x_place,
y_place,
y_target,
preds,
x_test[:num_samples],
y_test[:num_samples],
y_test_target[:num_samples],
args=eval_par))
elif FLAGS.attack_method == 'SPSA':
if FLAGS.target==False:
acc = model_eval_for_SPSA(
sess,
x,
y_place,
y_index,
preds,
x_test[:num_samples],
y_test_index[:num_samples],
y_test[:num_samples],
args=eval_par)
print('adv_acc: %.3f' %acc)
else:
acc = model_eval_for_SPSA_targetacc(
sess,
x,
y_place,
y_index,
y_target_index,
preds,
x_test[:num_samples],
y_test_index[:num_samples],
y_test[:num_samples],
y_test_target_index[:num_samples],
args=eval_par)
print('adv_acc_target: %.3f' %acc)
elif FLAGS.attack_method == 'CarliniWagnerL2':
l2dis_test = np.zeros((num_samples,))
reshape_dis = tf.reshape(x_place - adv_x, shape = [-1, 3072])
if FLAGS.target==False:
for i in range(int(num_samples/10)):
l2dis_test[i*10:(i+1)*10]=sess.run(tf.norm(reshape_dis, ord=2, axis=-1), feed_dict={x_place: x_test[i*10:(i+1)*10], \
y_place: y_test[i*10:(i+1)*10]})
print('Predict batch for test ', i, ', l2dis_mean is ', np.mean(l2dis_test[i*10:(i+1)*10]))
print('Total l2dismean is ',np.mean(l2dis_test))
acc = model_eval(sess, x_place, y_place, preds, x_test[:num_samples], y_test[:num_samples], args=eval_par)
print('adv_acc: %.3f' %acc)
else:
for i in range(int(num_samples/10)):
l2dis_test[i*10:(i+1)*10]=sess.run(tf.norm(reshape_dis, ord=2, axis=-1), feed_dict={x_place: x_test[i*10:(i+1)*10], \
y_place: y_test[i*10:(i+1)*10], y_target: y_test_target[i*10:(i+1)*10]})
print('Predict batch for test ', i, ', l2dis_mean is ', np.mean(l2dis_test[i*10:(i+1)*10]))
print('Total l2dismean is ',np.mean(l2dis_test))
acc = model_eval_targetacc(sess, x_place, y_place, y_target, preds, x_test[:num_samples], y_test[:num_samples], y_test_target[:num_samples], args=eval_par)
print('adv_acc_target: %.3f' %acc)
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cleverhans.attacks import Attack
import tensorflow as tf
import warnings
import numpy as np
from cleverhans import utils_tf
from cleverhans.utils_tf import clip_eta
from cleverhans.attacks import optimize_linear
from six.moves import xrange
def fgm(x,
features,
logit_means,
y=None,
eps=0.3,
ord=np.inf,
clip_min=None,
clip_max=None,
targeted=False,
sanity_checks=True,
projection="linf"):
asserts = []
# If a data range was specified, check that the input was in that range
if clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(
x, tf.cast(clip_min, x.dtype)))
if clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x, tf.cast(clip_max, x.dtype)))
if y is None:
raise NotImplementedError("labels must be supplied")
# Compute loss
loss, loss_diff = loss_fn(logit_means=logit_means, labels=y,
features=features)
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x)
# optimal_perturbation = optimize_linear(grad, eps, ord)
if projection == "l2":
square = tf.maximum(1e-12,
tf.reduce_sum(tf.square(grad),
reduction_indices=list(
xrange(1, len(grad.get_shape()))),
keepdims=True))
optimal_perturbation = grad / tf.sqrt(square)
# Scale perturbation to be the solution for the norm=eps rather than
# norm=1 problem
scaled_perturbation = utils_tf.mul(eps, optimal_perturbation)
else:
optimal_perturbation = tf.sign(grad)
scaled_perturbation = utils_tf.mul(eps, optimal_perturbation)
# Add perturbation to original example to obtain adversarial example
adv_x = x + scaled_perturbation
adv_x = x + utils_tf.clip_eta(adv_x - x, ord, eps)
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = utils_tf.clip_by_value(adv_x, clip_min, clip_max)
if sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x, loss_diff
def loss_fn(logit_means,
sentinel=None,
labels=None,
features=None,
dim=-1, ):
"""
Wrapper around tf.nn.softmax_cross_entropy_with_logits_v2 to handle
deprecated warning
"""
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
name = "softmax_cross_entropy_with_logits"
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)"
% name)
if labels is None or features is None:
raise ValueError("Both labels and features must be provided.")
labels_oh = tf.stop_gradient(labels)
labels = tf.argmax(labels_oh, -1)
# find target labels
# ignore logit means for classes we are not considering (only relevant for
# binarization test)
logit_means = logit_means[:labels_oh.shape[-1]]
distances = tf.reduce_mean((tf.expand_dims(features, 1) - tf.expand_dims(logit_means, 0)) ** 2, -1)
distances = distances + 1e9 * labels_oh
target_labels = tf.argmin(distances, -1)
# target_labels = (labels + 1) % 2
target_logit_means = tf.gather(logit_means, target_labels)
source_logit_means = tf.gather(logit_means, labels)
dist = tf.reduce_mean((features - target_logit_means) ** 2, -1)
dist_other = tf.reduce_mean((features - source_logit_means) ** 2, -1)
dist_diff = dist - dist_other
# invert sign so that we perform gradient ascent instead of descent
return -tf.reduce_sum(dist), dist_diff
class FeatureSpaceProjectedGradientDescent(Attack):
"""
This class implements either the Basic Iterative Method
(Kurakin et al. 2016) when rand_init is set to 0. or the
Madry et al. (2017) method when rand_minmax is larger than 0.
Paper link (Kurakin et al. 2016): https://arxiv.org/pdf/1607.02533.pdf
Paper link (Madry et al. 2017): https://arxiv.org/pdf/1706.06083.pdf
:param model: cleverhans.model.Model
:param sess: optional tf.Session
:param dtypestr: dtype of the data
:param default_rand_init: whether to use random initialization by default
:param kwargs: passed through to super constructor
"""
def __init__(self, model, logit_means, sess=None, dtypestr='float32',
default_rand_init=True, max_steps=99999, projection='linf', **kwargs):
"""
Create a ProjectedGradientDescent instance.
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
super(FeatureSpaceProjectedGradientDescent, self).__init__(model, sess=sess,
dtypestr=dtypestr,
**kwargs)
self.feedable_kwargs = ('eps', 'eps_iter', 'y', 'y_target', 'clip_min',
'clip_max')
self.structural_kwargs = ['ord', 'nb_iter', 'rand_init', 'sanity_checks']
self.logit_means = logit_means
self.default_rand_init = default_rand_init
self.max_steps = max_steps
self.projection = projection
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
asserts = []
# If a data range was specified, check that the input was in that range
if self.clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(x,
tf.cast(self.clip_min,
x.dtype)))
if self.clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x,
tf.cast(self.clip_max,
x.dtype)))
# Initialize loop variables
if self.rand_init:
eta = tf.random_uniform(tf.shape(x),
tf.cast(-self.rand_minmax, x.dtype),
tf.cast(self.rand_minmax, x.dtype),
dtype=x.dtype)
else:
eta = tf.zeros(tf.shape(x))
# Clip eta
eta = clip_eta(eta, self.ord, self.eps)
adv_x = x + eta
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
if self.y_target is not None:
raise NotImplementedError("Targeted mode not fully implemented yet")
elif self.y is not None:
y = self.y
targeted = False
else:
raise NotImplementedError("labels must be supplied")
y_kwarg = 'y_target' if targeted else 'y'
fgm_params = {
'eps': self.eps_iter,
y_kwarg: y,
'ord': self.ord,
'clip_min': self.clip_min,
'clip_max': self.clip_max,
"logit_means": self.logit_means
}
if self.ord == 1:
raise NotImplementedError("It's not clear that FGM is a good inner loop"
" step for PGD when ord=1, because ord=1 FGM "
" changes only one pixel at a time. We need "
" to rigorously test a strong ord=1 PGD "
"before enabling this feature.")
# Use getattr() to avoid errors in eager execution attacks
def cond(i, _, _2, loss_diff, first_idx_done):
return tf.reduce_any(
tf.logical_or(
tf.less(i, self.nb_iter),
tf.logical_and(
tf.greater(loss_diff, tf.zeros([])),
tf.less(i, self.max_steps)
)
# tf.logical_or(
# tf.less_equal(first_idx_done, tf.zeros([])),
# tf.logical_and(
# i < 2000,
# tf.logical_not(
# tf.logical_and(
# tf.less(loss_diff, tf.zeros([])),
# tf.less(first_idx_done + 10, i)
# ))
# )
# )
)
)
def body(i, adv_x, _, _2, first_idx_done):
adv_x_before = adv_x
adv_x, loss_diff = fgm(adv_x, features=self.model.get_mmd_features(adv_x),
**fgm_params, projection=self.projection)
# adv_x = tf.Print(adv_x, [i, first_idx_done, loss_diff])
# Clipping perturbation eta to self.ord norm ball
eta = adv_x - x
eta = clip_eta(eta, self.ord, self.eps)
adv_x = x + eta
# Redo the clipping.
# FGM already did it, but subtracting and re-adding eta can add some
# small numerical error.
if self.clip_min is not None or self.clip_max is not None:
adv_x = utils_tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
first_idx_done = tf.where(
tf.logical_and(first_idx_done > 0, loss_diff < 0),
first_idx_done,
i * tf.where(loss_diff < 0, tf.ones(tf.shape(adv_x)[0]), tf.zeros(tf.shape(adv_x)[0]))
)
return i + 1, adv_x, adv_x_before, loss_diff, first_idx_done
_, _, adv_x, _, _ = tf.while_loop(cond, body,
[tf.zeros([]), adv_x, adv_x,
tf.ones(tf.shape(adv_x)[0]),
-1 * tf.ones(tf.shape(adv_x)[0])],
back_prop=True)
# Asserts run only on CPU.
# When multi-GPU eval code tries to force all PGD ops onto GPU, this
# can cause an error.
common_dtype = tf.float64
asserts.append(utils_tf.assert_less_equal(tf.cast(self.eps_iter,
dtype=common_dtype),
tf.cast(self.eps,
dtype=common_dtype)))
if self.ord == np.inf and self.clip_min is not None:
# The 1e-6 is needed to compensate for numerical error.
# Without the 1e-6 this fails when e.g. eps=.2, clip_min=.5,
# clip_max=.7
asserts.append(utils_tf.assert_less_equal(tf.cast(self.eps, x.dtype),
1e-6 + tf.cast(self.clip_max,
x.dtype)
- tf.cast(self.clip_min,
x.dtype)))
if self.sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x
def parse_params(self,
eps=0.3,
eps_iter=0.05,
nb_iter=10,
y=None,
ord=np.inf,
clip_min=None,
clip_max=None,
y_target=None,
rand_init=None,
rand_minmax=0.3,
sanity_checks=True,
**kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float) maximum distortion of adversarial example
compared to original input
:param eps_iter: (optional float) step size for each attack iteration
:param nb_iter: (optional int) Number of attack iterations.
:param y: (optional) A tensor with the true labels.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
:param sanity_checks: bool Insert tf asserts checking values
(Some tests need to run with no sanity checks because the
tests intentionally configure the attack strangely)
"""
# Save attack-specific parameters
self.eps = eps
if rand_init is None:
rand_init = self.default_rand_init
self.rand_init = rand_init
if self.rand_init:
self.rand_minmax = eps
else:
self.rand_minmax = 0.
self.eps_iter = eps_iter
self.nb_iter = nb_iter
self.y = y
self.y_target = y_target
self.ord = ord
self.clip_min = clip_min
self.clip_max = clip_max
if isinstance(eps, float) and isinstance(eps_iter, float):
# If these are both known at compile time, we can check before anything
# is run. If they are tf, we can't check them yet.
assert eps_iter <= eps, (eps_iter, eps)
if self.y is not None and self.y_target is not None:
raise ValueError("Must not set both y and y_target")
# Check if order of the norm is acceptable given current implementation
if self.ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
self.sanity_checks = sanity_checks
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from distutils.version import LooseVersion
import logging
import math
import numpy as np
import tensorflow as tf
from cleverhans.utils import batch_indices, _ArgsWrapper, create_logger
_logger = create_logger("cleverhans.utils.tf")
_logger.setLevel(logging.INFO)
zero = tf.constant(0, dtype=tf.float32)
num_classes = 10
log_offset = 1e-20
det_offset = 1e-6
def ensemble_diversity(y_true, y_pred, num_model):
bool_R_y_true = tf.not_equal(tf.ones_like(y_true) - y_true, zero) # batch_size X (num_class X num_models), 2-D
mask_non_y_pred = tf.boolean_mask(y_pred, bool_R_y_true) # batch_size X (num_class-1) X num_models, 1-D
mask_non_y_pred = tf.reshape(mask_non_y_pred, [-1, num_model, num_classes-1]) # batch_size X num_model X (num_class-1), 3-D
mask_non_y_pred = mask_non_y_pred / tf.norm(mask_non_y_pred, axis=2, keepdims=True) # batch_size X num_model X (num_class-1), 3-D
matrix = tf.matmul(mask_non_y_pred, tf.transpose(mask_non_y_pred, perm=[0, 2, 1])) # batch_size X num_model X num_model, 3-D
all_log_det = tf.linalg.logdet(matrix+det_offset*tf.expand_dims(tf.eye(num_model),0)) # batch_size X 1, 1-D
return all_log_det
def model_eval_targetacc(sess, x, y, y_target, predictions, X_test=None, Y_test=None, Y_test_target=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test_target is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument and Y_test_target argument"
"must be supplied.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
tf.argmax(predictions,
axis=tf.rank(predictions) - 1))
# Init result var
accuracy = 0.0
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
Y_cur_target = np.zeros((args.batch_size,) + Y_test_target.shape[1:],
dtype=Y_test_target.dtype)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
Y_cur_target[:cur_batch_size] = Y_test_target[start:end]
feed_dict = {x: X_cur, y: Y_cur, y_target: Y_cur_target}
if feed is not None:
feed_dict.update(feed)
cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
accuracy += cur_corr_preds[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
def model_eval_for_SPSA_targetacc(sess, x, y, y_index, y_target, predictions, X_test=None, Y_test_index=None, Y_test=None, Y_test_target=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None or Y_test_index is None:
raise ValueError("X_test argument and Y_test and Y_test_index argument "
"must be supplied.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
tf.argmax(predictions,
axis=tf.rank(predictions) - 1))
# Init result var
accuracy = 0.0
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
Y_cur_target = np.zeros((args.batch_size,) + Y_test_target.shape[1:],
dtype=Y_test_target.dtype)
for batch in range(nb_batches):
print('Sample %d finished'%batch)
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
#Y_cur_target[:cur_batch_size] = Y_test_target[start:end]
feed_dict = {x: X_cur, y: Y_cur, y_index: Y_test_index[start], y_target: Y_test_target[start]}
if feed is not None:
feed_dict.update(feed)
cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
accuracy += cur_corr_preds[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
def model_eval_for_SPSA(sess, x, y, y_index, predictions, X_test=None, Y_test_index=None, Y_test=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None or Y_test_index is None:
raise ValueError("X_test argument and Y_test and Y_test_index argument "
"must be supplied.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
tf.argmax(predictions,
axis=tf.rank(predictions) - 1))
# Init result var
accuracy = 0.0
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
for batch in range(nb_batches):
print('Sample %d finished'%batch)
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
#Y_cur_target[:cur_batch_size] = Y_test_target[start:end]
feed_dict = {x: X_cur, y: Y_cur, y_index: Y_test_index[start]}
if feed is not None:
feed_dict.update(feed)
cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
accuracy += cur_corr_preds[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
def get_ensemble_diversity_values(sess, x, y, predictions, number_model, X_test=None, Y_test=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument"
"must be supplied.")
ensemble_diversity_records = np.array([])
get_batch_ensemble_diversity = ensemble_diversity(y, predictions, number_model)
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
feed_dict = {x: X_cur, y: Y_cur}
if feed is not None:
feed_dict.update(feed)
ensemble_diversity_records_batch = get_batch_ensemble_diversity.eval(feed_dict=feed_dict)
ensemble_diversity_records = np.concatenate((ensemble_diversity_records, ensemble_diversity_records_batch), axis=0)
assert end >= len(X_test)
return ensemble_diversity_records #len(X_test) X 1 |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten, GlobalAveragePooling2D
from keras.regularizers import l2
from keras.models import Model
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(
num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(immediate_input, input, depth, num_classes=10, num_dims=64, use_BN=True, use_dense=True, use_leaky=False):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = immediate_input
x = resnet_layer(inputs=inputs, batch_normalization=use_BN)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides, batch_normalization=use_BN)
y = resnet_layer(inputs=y, num_filters=num_filters, activation=None, batch_normalization=use_BN)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(
inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
if use_leaky==True:
x = keras.layers.LeakyReLU(alpha=0.1)(x)
else:
x = Activation('relu')(x)
num_filters *= 2
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = GlobalAveragePooling2D()(x)
#final_features = Flatten()(x)
if use_dense==True:
final_features = Dense(
num_dims, kernel_initializer='he_normal')(x)
else:
final_features = x
logits = Dense(
num_classes, kernel_initializer='he_normal')(final_features)
outputs = Activation('softmax')(logits)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model, inputs, outputs, logits, final_features
def resnet_v2(immediate_input, input, depth, num_classes=10, num_dims=256, use_BN=True, use_dense=True, use_leaky=False):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = immediate_input
# v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
x = resnet_layer(inputs=inputs, num_filters=num_filters_in, conv_first=True, batch_normalization=use_BN)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = use_BN
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(
inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(
inputs=y, num_filters=num_filters_in, conv_first=False, batch_normalization=use_BN)
y = resnet_layer(
inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False, batch_normalization=use_BN)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(
inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
if use_BN:
x = BatchNormalization()(x)
if use_leaky==True:
x = keras.layers.LeakyReLU(alpha=0.1)(x)
else:
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
#final_features = Flatten()(x)
if use_dense==True:
final_features = Dense(
num_dims, kernel_initializer='he_normal')(x)
else:
final_features = x
logits = Dense(
num_classes, kernel_initializer='he_normal')(final_features)
outputs = Activation('softmax')(logits)
# Instantiate model.
model = Model(inputs=input, outputs=outputs)
return model, input, outputs, logits, final_features |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Model construction utilities based on keras
"""
import warnings
from distutils.version import LooseVersion
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from cleverhans.model import Model, NoSuchLayerError
import tensorflow as tf
class KerasModelWrapper(Model):
"""
An implementation of `Model` that wraps a Keras model. It
specifically exposes the hidden features of a model by creating new models.
The symbolic graph is reused and so there is little overhead. Splitting
in-place operations can incur an overhead.
"""
# set ch_compatibility_mode=True to use this class with ch3.0.1 (added by AUTHOR)
def __init__(self, model, num_class=10, binarized_model=False,
ch_compatibility_mode=False):
"""
Create a wrapper for a Keras model
:param model: A Keras model
"""
super(KerasModelWrapper, self).__init__()
if model is None:
raise ValueError('model argument must be supplied.')
self.model = model
self.keras_model = None
self.num_classes = num_class
self.binarized_model=binarized_model
self.ch_compatibility_mode = ch_compatibility_mode
def _get_softmax_name(self):
"""
Looks for the name of the softmax layer.
:return: Softmax layer name
"""
for layer in self.model.layers:
cfg = layer.get_config()
if cfg['name'] == 'average_1':
return layer.name
raise Exception("No softmax layers found")
def _get_logits_name(self):
"""
Looks for the name of the layer producing the logits.
:return: name of layer producing the logits
"""
softmax_name = self._get_softmax_name()
softmax_layer = self.model.get_layer(softmax_name)
if not isinstance(softmax_layer, Activation):
# In this case, the activation is part of another layer
return softmax_name
if hasattr(softmax_layer, 'inbound_nodes'):
warnings.warn(
"Please update your version to keras >= 2.1.3; "
"support for earlier keras versions will be dropped on "
"2018-07-22")
node = softmax_layer.inbound_nodes[0]
else:
node = softmax_layer._inbound_nodes[0]
logits_name = node.inbound_layers[0].name
return logits_name
def get_mmd_features(self, x):
outs = self.fprop(x)
if self.binarized_model:
return outs["dense_3"]
else:
return outs["dense_1"]
def get_logits(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the logits
"""
# logits_name = self._get_logits_name()
# logits_layer = self.get_layer(x, logits_name)
# # Need to deal with the case where softmax is part of the
# # logits layer
# if logits_name == self._get_softmax_name():
# softmax_logit_layer = self.get_layer(x, logits_name)
# # The final op is the softmax. Return its input
# logits_layer = softmax_logit_layer._op.inputs[0]
prob = self.get_probs(x)
if self.ch_compatibility_mode:
return prob
logits = tf.log(prob)
#logits = prob
return logits
def get_probs(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the probs
"""
return self.model(x)
def get_layer_names(self):
"""
:return: Names of all the layers kept by Keras
"""
layer_names = [x.name for x in self.model.layers]
return layer_names
def fprop(self, x):
"""
Exposes all the layers of the model returned by get_layer_names.
:param x: A symbolic representation of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.
"""
from keras.models import Model as KerasModel
if self.keras_model is None:
# Get the input layer
new_input = self.model.get_input_at(0)
# Make a new model that returns each of the layers as output
out_layers = [x_layer.output for x_layer in self.model.layers]
self.keras_model = KerasModel(new_input, out_layers)
# and get the outputs for that model on the input x
outputs = self.keras_model(x)
# Keras only returns a list for outputs of length >= 1, if the model
# is only one layer, wrap a list
if len(self.model.layers) == 1:
outputs = [outputs]
# compute the dict to return
fprop_dict = dict(zip(self.get_layer_names(), outputs))
return fprop_dict
def get_layer(self, x, layer):
"""
Expose the hidden features of a model given a layer name.
:param x: A symbolic representation of the network input
:param layer: The name of the hidden layer to return features at.
:return: A symbolic representation of the hidden features
:raise: NoSuchLayerError if `layer` is not in the model.
"""
# Return the symbolic representation for this layer.
output = self.fprop(x)
try:
requested = output[layer]
except KeyError:
raise NoSuchLayerError()
return requested |
# Copyright 2022 The Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
L = 10 #Number of classes
d = 2 #Dimension of features
lr = 0.0001 #Learning rate
mean_var = 1
steps = 10000 #optimization steps
z = tf.get_variable("auxiliary_variable", [d, L]) #dxL
x = z / tf.norm(z, axis=0, keepdims=True) #dxL, normalized in each column
XTX = tf.matmul(x, x, transpose_a=True) - 2 * tf.eye(L)#LxL, each element is the dot-product of two means, the diag elements are -1
cost = tf.reduce_max(XTX) #single element
opt = tf.train.AdamOptimizer(learning_rate=lr)
opt_op = opt.minimize(cost)
with tf.Session() as sess:
sess.run(tf.initializers.global_variables())
for i in range(steps):
_, loss = sess.run([opt_op, cost])
min_distance2 = loss
print('Step %d, min_distance2: %f'%(i, min_distance2))
mean_logits = sess.run(x)
mean_logits = mean_var * mean_logits.T
import scipy.io as sio
sio.savemat('/MMC/kernel_paras/meanvar1_featuredim'+str(d)+'_class'+str(L)+'.mat', {'mean_logits': mean_logits})
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import argparse
import logging
import yaml
import os
import time
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from bpda_eot.bpda_eot_attack import BPDA_EOT_Attack
import utils
from utils import str2bool, get_accuracy, get_image_classifier, load_data
from runners.diffpure_ddpm import Diffusion
from runners.diffpure_guided import GuidedDiffusion
from runners.diffpure_sde import RevGuidedDiffusion
class ResNet_Adv_Model(nn.Module):
def __init__(self, args, config):
super().__init__()
# image classifier
self.resnet = get_image_classifier(args.classifier_name).to(config.device)
def purify(self, x):
return x
def forward(self, x, mode='purify_and_classify'):
if mode == 'purify':
out = self.purify(x)
elif mode == 'classify':
out = self.resnet(x) # x in [0, 1]
elif mode == 'purify_and_classify':
x = self.purify(x)
out = self.resnet(x) # x in [0, 1]
else:
raise NotImplementedError(f'unknown mode: {mode}')
return out
class SDE_Adv_Model(nn.Module):
def __init__(self, args, config):
super().__init__()
self.args = args
# image classifier
self.resnet = get_image_classifier(args.classifier_name).to(config.device)
# diffusion model
print(f'diffusion_type: {args.diffusion_type}')
if args.diffusion_type == 'ddpm':
self.runner = GuidedDiffusion(args, config, device=config.device)
elif args.diffusion_type == 'sde':
self.runner = RevGuidedDiffusion(args, config, device=config.device)
elif args.diffusion_type == 'celebahq-ddpm':
self.runner = Diffusion(args, config, device=config.device)
else:
raise NotImplementedError('unknown diffusion type')
self.register_buffer('counter', torch.zeros(1, device=config.device))
self.tag = None
def reset_counter(self):
self.counter = torch.zeros(1, dtype=torch.int, device=config.device)
def set_tag(self, tag=None):
self.tag = tag
def purify(self, x):
counter = self.counter.item()
if counter % 5 == 0:
print(f'diffusion times: {counter}')
# imagenet [3, 224, 224] -> [3, 256, 256] -> [3, 224, 224]
if 'imagenet' in self.args.domain:
x = F.interpolate(x, size=(256, 256), mode='bilinear', align_corners=False)
start_time = time.time()
x_re = self.runner.image_editing_sample((x - 0.5) * 2, bs_id=counter, tag=self.tag)
minutes, seconds = divmod(time.time() - start_time, 60)
if 'imagenet' in self.args.domain:
x_re = F.interpolate(x_re, size=(224, 224), mode='bilinear', align_corners=False)
if counter % 5 == 0:
print(f'x shape (before diffusion models): {x.shape}')
print(f'x shape (before resnet): {x_re.shape}')
print("Sampling time per batch: {:0>2}:{:05.2f}".format(int(minutes), seconds))
self.counter += 1
return (x_re + 1) * 0.5
def forward(self, x, mode='purify_and_classify'):
if mode == 'purify':
out = self.purify(x)
elif mode == 'classify':
out = self.resnet(x) # x in [0, 1]
elif mode == 'purify_and_classify':
x = self.purify(x)
out = self.resnet(x) # x in [0, 1]
else:
raise NotImplementedError(f'unknown mode: {mode}')
return out
def eval_bpda(args, config, model, x_val, y_val, adv_batch_size, log_dir):
ngpus = torch.cuda.device_count()
model_ = model
if ngpus > 1:
model_ = model.module
x_val, y_val = x_val.to(config.device), y_val.to(config.device)
# ------------------ apply the attack to resnet ------------------
print(f'apply the bpda attack to resnet...')
resnet_bpda = ResNet_Adv_Model(args, config)
if ngpus > 1:
resnet_bpda = torch.nn.DataParallel(resnet_bpda)
start_time = time.time()
init_acc = get_accuracy(resnet_bpda, x_val, y_val, bs=adv_batch_size)
print('initial accuracy: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, time.time() - start_time))
adversary_resnet = BPDA_EOT_Attack(resnet_bpda, adv_eps=args.adv_eps, eot_defense_reps=args.eot_defense_reps,
eot_attack_reps=args.eot_attack_reps)
start_time = time.time()
class_batch, ims_adv_batch = adversary_resnet.attack_all(x_val, y_val, batch_size=adv_batch_size)
init_acc = float(class_batch[0, :].sum()) / class_batch.shape[1]
robust_acc = float(class_batch[-1, :].sum()) / class_batch.shape[1]
print('init acc: {:.2%}, robust acc: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, robust_acc, time.time() - start_time))
print(f'x_adv_resnet shape: {ims_adv_batch.shape}')
torch.save([ims_adv_batch, y_val], f'{log_dir}/x_adv_resnet_sd{args.seed}.pt')
# ------------------ apply the attack to sde_adv ------------------
print(f'apply the bpda attack to sde_adv...')
start_time = time.time()
model_.reset_counter()
model_.set_tag('no_adv')
init_acc = get_accuracy(model, x_val, y_val, bs=adv_batch_size)
print('initial accuracy: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, time.time() - start_time))
adversary_sde = BPDA_EOT_Attack(model, adv_eps=args.adv_eps, eot_defense_reps=args.eot_defense_reps,
eot_attack_reps=args.eot_attack_reps)
start_time = time.time()
model_.reset_counter()
model_.set_tag()
class_batch, ims_adv_batch = adversary_sde.attack_all(x_val, y_val, batch_size=adv_batch_size)
init_acc = float(class_batch[0, :].sum()) / class_batch.shape[1]
robust_acc = float(class_batch[-1, :].sum()) / class_batch.shape[1]
print('init acc: {:.2%}, robust acc: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, robust_acc, time.time() - start_time))
print(f'x_adv_sde shape: {ims_adv_batch.shape}')
torch.save([ims_adv_batch, y_val], f'{log_dir}/x_adv_sde_sd{args.seed}.pt')
def robustness_eval(args, config):
middle_name = '_'.join([args.diffusion_type, 'bpda'])
log_dir = os.path.join(args.image_folder, args.classifier_name, middle_name,
'seed' + str(args.seed), 'data' + str(args.data_seed))
os.makedirs(log_dir, exist_ok=True)
args.log_dir = log_dir
logger = utils.Logger(file_name=f'{log_dir}/log.txt', file_mode="w+", should_flush=True)
ngpus = torch.cuda.device_count()
adv_batch_size = args.adv_batch_size * ngpus
print(f'ngpus: {ngpus}, adv_batch_size: {adv_batch_size}')
# load model
print('starting the model and loader...')
model = SDE_Adv_Model(args, config)
if ngpus > 1:
model = torch.nn.DataParallel(model)
model = model.eval().to(config.device)
# load data
x_val, y_val = load_data(args, adv_batch_size)
# eval classifier and sde_adv against bpda attack
eval_bpda(args, config, model, x_val, y_val, adv_batch_size, log_dir)
logger.close()
def parse_args_and_config():
parser = argparse.ArgumentParser(description=globals()['__doc__'])
# diffusion models
parser.add_argument('--config', type=str, required=True, help='Path to the config file')
parser.add_argument('--data_seed', type=int, default=0, help='Random seed')
parser.add_argument('--seed', type=int, default=1234, help='Random seed')
parser.add_argument('--exp', type=str, default='exp', help='Path for saving running related data.')
parser.add_argument('--verbose', type=str, default='info', help='Verbose level: info | debug | warning | critical')
parser.add_argument('-i', '--image_folder', type=str, default='images', help="The folder name of samples")
parser.add_argument('--ni', action='store_true', help="No interaction. Suitable for Slurm Job launcher")
parser.add_argument('--sample_step', type=int, default=1, help='Total sampling steps')
parser.add_argument('--t', type=int, default=400, help='Sampling noise scale')
parser.add_argument('--t_delta', type=int, default=15, help='Perturbation range of sampling noise scale')
parser.add_argument('--rand_t', type=str2bool, default=False, help='Decide if randomize sampling noise scale')
parser.add_argument('--diffusion_type', type=str, default='ddpm', help='[ddpm, sde, celebahq-ddpm]')
parser.add_argument('--score_type', type=str, default='guided_diffusion', help='[guided_diffusion, score_sde]')
parser.add_argument('--eot_iter', type=int, default=20, help='only for rand version of autoattack')
parser.add_argument('--use_bm', action='store_true', help='whether to use brownian motion')
parser.add_argument('--eot_defense_reps', type=int, default=150)
parser.add_argument('--eot_attack_reps', type=int, default=15)
# adv
parser.add_argument('--domain', type=str, default='celebahq', help='which domain: celebahq, cat, car, imagenet')
parser.add_argument('--classifier_name', type=str, default='Eyeglasses', help='which classifier to use')
parser.add_argument('--partition', type=str, default='val')
parser.add_argument('--adv_batch_size', type=int, default=64)
parser.add_argument('--num_sub', type=int, default=1000, help='imagenet subset')
parser.add_argument('--adv_eps', type=float, default=0.07)
parser.add_argument('--gpu_ids', type=str, default='0')
args = parser.parse_args()
# parse config file
with open(os.path.join('configs', args.config), 'r') as f:
config = yaml.safe_load(f)
new_config = utils.dict2namespace(config)
level = getattr(logging, args.verbose.upper(), None)
if not isinstance(level, int):
raise ValueError('level {} not supported'.format(args.verbose))
handler1 = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s - %(filename)s - %(asctime)s - %(message)s')
handler1.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler1)
logger.setLevel(level)
args.image_folder = os.path.join(args.exp, args.image_folder)
os.makedirs(args.image_folder, exist_ok=True)
# add device
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
logging.info("Using device: {}".format(device))
new_config.device = device
# set random seed
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
return args, new_config
if __name__ == '__main__':
args, config = parse_args_and_config()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
robustness_eval(args, config)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import sys
import argparse
from typing import Any
import torch
import torch.nn as nn
import torchvision.models as models
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from robustbench import load_model
import data
def compute_n_params(model, return_str=True):
tot = 0
for p in model.parameters():
w = 1
for x in p.shape:
w *= x
tot += w
if return_str:
if tot >= 1e6:
return '{:.1f}M'.format(tot / 1e6)
else:
return '{:.1f}K'.format(tot / 1e3)
else:
return tot
class Logger(object):
"""
Redirect stderr to stdout, optionally print stdout to a file,
and optionally force flushing on both stdout and the file.
"""
def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
self.file = None
if file_name is not None:
self.file = open(file_name, file_mode)
self.should_flush = should_flush
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __enter__(self) -> "Logger":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def write(self, text: str) -> None:
"""Write text to stdout (and a file) and optionally flush."""
if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
return
if self.file is not None:
self.file.write(text)
self.stdout.write(text)
if self.should_flush:
self.flush()
def flush(self) -> None:
"""Flush written text to both stdout and a file, if open."""
if self.file is not None:
self.file.flush()
self.stdout.flush()
def close(self) -> None:
"""Flush, close possible files, and remove stdout/stderr mirroring."""
self.flush()
# if using multiple loggers, prevent closing in wrong order
if sys.stdout is self:
sys.stdout = self.stdout
if sys.stderr is self:
sys.stderr = self.stderr
if self.file is not None:
self.file.close()
def dict2namespace(config):
namespace = argparse.Namespace()
for key, value in config.items():
if isinstance(value, dict):
new_value = dict2namespace(value)
else:
new_value = value
setattr(namespace, key, new_value)
return namespace
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def update_state_dict(state_dict, idx_start=9):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[idx_start:] # remove 'module.0.' of dataparallel
new_state_dict[name]=v
return new_state_dict
# ------------------------------------------------------------------------
def get_accuracy(model, x_orig, y_orig, bs=64, device=torch.device('cuda:0')):
n_batches = x_orig.shape[0] // bs
acc = 0.
for counter in range(n_batches):
x = x_orig[counter * bs:min((counter + 1) * bs, x_orig.shape[0])].clone().to(device)
y = y_orig[counter * bs:min((counter + 1) * bs, x_orig.shape[0])].clone().to(device)
output = model(x)
acc += (output.max(1)[1] == y).float().sum()
if isinstance(acc, torch.Tensor):
acc = acc.item()
return acc / x_orig.shape[0]
def get_image_classifier(classifier_name):
class _Wrapper_ResNet(nn.Module):
def __init__(self, resnet):
super().__init__()
self.resnet = resnet
self.mu = torch.Tensor([0.485, 0.456, 0.406]).float().view(3, 1, 1)
self.sigma = torch.Tensor([0.229, 0.224, 0.225]).float().view(3, 1, 1)
def forward(self, x):
x = (x - self.mu.to(x.device)) / self.sigma.to(x.device)
return self.resnet(x)
if 'imagenet' in classifier_name:
if 'resnet18' in classifier_name:
print('using imagenet resnet18...')
model = models.resnet18(pretrained=True).eval()
elif 'resnet50' in classifier_name:
print('using imagenet resnet50...')
model = models.resnet50(pretrained=True).eval()
elif 'resnet101' in classifier_name:
print('using imagenet resnet101...')
model = models.resnet101(pretrained=True).eval()
elif 'wideresnet-50-2' in classifier_name:
print('using imagenet wideresnet-50-2...')
model = models.wide_resnet50_2(pretrained=True).eval()
elif 'deit-s' in classifier_name:
print('using imagenet deit-s...')
model = torch.hub.load('facebookresearch/deit:main', 'deit_small_patch16_224', pretrained=True).eval()
else:
raise NotImplementedError(f'unknown {classifier_name}')
wrapper_resnet = _Wrapper_ResNet(model)
elif 'cifar10' in classifier_name:
if 'wideresnet-28-10' in classifier_name:
print('using cifar10 wideresnet-28-10...')
model = load_model(model_name='Standard', dataset='cifar10', threat_model='Linf') # pixel in [0, 1]
elif 'wrn-28-10-at0' in classifier_name:
print('using cifar10 wrn-28-10-at0...')
model = load_model(model_name='Gowal2021Improving_28_10_ddpm_100m', dataset='cifar10',
threat_model='Linf') # pixel in [0, 1]
elif 'wrn-28-10-at1' in classifier_name:
print('using cifar10 wrn-28-10-at1...')
model = load_model(model_name='Gowal2020Uncovering_28_10_extra', dataset='cifar10',
threat_model='Linf') # pixel in [0, 1]
elif 'wrn-70-16-at0' in classifier_name:
print('using cifar10 wrn-70-16-at0...')
model = load_model(model_name='Gowal2021Improving_70_16_ddpm_100m', dataset='cifar10',
threat_model='Linf') # pixel in [0, 1]
elif 'wrn-70-16-at1' in classifier_name:
print('using cifar10 wrn-70-16-at1...')
model = load_model(model_name='Rebuffi2021Fixing_70_16_cutmix_extra', dataset='cifar10',
threat_model='Linf') # pixel in [0, 1]
elif 'wrn-70-16-L2-at1' in classifier_name:
print('using cifar10 wrn-70-16-L2-at1...')
model = load_model(model_name='Rebuffi2021Fixing_70_16_cutmix_extra', dataset='cifar10',
threat_model='L2') # pixel in [0, 1]
elif 'wideresnet-70-16' in classifier_name:
print('using cifar10 wideresnet-70-16 (dm_wrn-70-16)...')
from robustbench.model_zoo.architectures.dm_wide_resnet import DMWideResNet, Swish
model = DMWideResNet(num_classes=10, depth=70, width=16, activation_fn=Swish) # pixel in [0, 1]
model_path = 'checkpoints/diffpure/cifar10/wresnet-76-10/weights-best.pt'
print(f"=> loading wideresnet-70-16 checkpoint '{model_path}'")
model.load_state_dict(update_state_dict(torch.load(model_path)['model_state_dict']))
model.eval()
print(f"=> loaded wideresnet-70-16 checkpoint")
elif 'resnet-50' in classifier_name:
print('using cifar10 resnet-50...')
from classifiers.cifar10_resnet import ResNet50
model = ResNet50() # pixel in [0, 1]
model_path = 'checkpoints/diffpure/cifar10/resnet-50/weights.pt'
print(f"=> loading resnet-50 checkpoint '{model_path}'")
model.load_state_dict(update_state_dict(torch.load(model_path), idx_start=7))
model.eval()
print(f"=> loaded resnet-50 checkpoint")
elif 'wrn-70-16-dropout' in classifier_name:
print('using cifar10 wrn-70-16-dropout (standard wrn-70-16-dropout)...')
from classifiers.cifar10_resnet import WideResNet_70_16_dropout
model = WideResNet_70_16_dropout() # pixel in [0, 1]
model_path = 'checkpoints/diffpure/cifar10/wrn-70-16-dropout/weights.pt'
print(f"=> loading wrn-70-16-dropout checkpoint '{model_path}'")
model.load_state_dict(update_state_dict(torch.load(model_path), idx_start=7))
model.eval()
print(f"=> loaded wrn-70-16-dropout checkpoint")
else:
raise NotImplementedError(f'unknown {classifier_name}')
wrapper_resnet = model
elif 'celebahq' in classifier_name:
attribute = classifier_name.split('__')[-1] # `celebahq__Smiling`
ckpt_path = f'checkpoints/diffpure/celebahq/{attribute}/net_best.pth'
from classifiers.attribute_classifier import ClassifierWrapper
model = ClassifierWrapper(attribute, ckpt_path=ckpt_path)
wrapper_resnet = model
else:
raise NotImplementedError(f'unknown {classifier_name}')
return wrapper_resnet
def load_data(args, adv_batch_size, binarization_test=False):
if 'imagenet' in args.domain:
val_dir = './dataset/imagenet_lmdb/val' # using imagenet lmdb data
val_transform = data.get_transform(args.domain, 'imval', base_size=224)
val_data = data.imagenet_lmdb_dataset_sub(val_dir, transform=val_transform,
num_sub=args.num_sub, data_seed=args.data_seed)
n_samples = len(val_data)
val_loader = DataLoader(val_data, batch_size=n_samples, shuffle=False, pin_memory=True, num_workers=4)
x_val, y_val = next(iter(val_loader))
elif 'cifar10' in args.domain:
data_dir = './dataset'
transform = transforms.Compose([transforms.ToTensor()])
if binarization_test:
val_data = data.cifar10_dataset_sub(
data_dir, transform=transform, num_sub=10000, data_seed=args.data_seed)
val_loader = DataLoader(val_data, batch_size=args.test_samples_idx_end, shuffle=False)
x_val, y_val = next(iter(val_loader))
x_val = x_val[args.test_samples_idx_start:args.test_samples_idx_end]
y_val = y_val[args.test_samples_idx_start:args.test_samples_idx_end]
else:
val_data = data.cifar10_dataset_sub(data_dir, transform=transform,
num_sub=args.num_sub, data_seed=args.data_seed)
n_samples = len(val_data)
val_loader = DataLoader(val_data, batch_size=n_samples, shuffle=False, pin_memory=True, num_workers=4)
x_val, y_val = next(iter(val_loader))
elif 'celebahq' in args.domain:
data_dir = './dataset/celebahq'
attribute = args.classifier_name.split('__')[-1] # `celebahq__Smiling`
val_transform = data.get_transform('celebahq', 'imval')
clean_dset = data.get_dataset('celebahq', 'val', attribute, root=data_dir, transform=val_transform,
fraction=2, data_seed=args.data_seed) # data_seed randomizes here
loader = DataLoader(clean_dset, batch_size=adv_batch_size, shuffle=False,
pin_memory=True, num_workers=4)
x_val, y_val = next(iter(loader)) # [0, 1], 256x256
else:
raise NotImplementedError(f'Unknown domain: {args.domain}!')
print(f'x_val shape: {x_val.shape}')
x_val, y_val = x_val.contiguous().requires_grad_(True), y_val.contiguous()
print(f'x (min, max): ({x_val.min()}, {x_val.max()})')
return x_val, y_val
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import argparse
import logging
import yaml
import os
import time
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from autoattack import AutoAttack
from attacks.autopgd import fix_autoattack as fix_autoattack_autopgd
from active_tests import decision_boundary_binarization as dbb
from argparse_utils import DecisionBoundaryBinarizationSettings
from stadv_eot.attacks import StAdvAttack
import dp_utils
import utils
from dp_utils import str2bool, get_accuracy, get_image_classifier, load_data
from runners.diffpure_ddpm import Diffusion
from runners.diffpure_guided import GuidedDiffusion
from runners.diffpure_sde import RevGuidedDiffusion
from runners.diffpure_ode import OdeGuidedDiffusion
from runners.diffpure_ldsde import LDGuidedDiffusion
def patch_robustbench_models():
import robustbench.model_zoo.architectures as arch
def wide_resnet_forward(self, x, features_only=False, features_and_logits=False):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
if features_only:
return out
l = self.fc(out)
if features_and_logits:
return out, l
return l
def robust_wide_resnet_forward(self, x, features_only=False, features_and_logits=False):
out = self.stem_conv(x)
for i, block in enumerate(self.blocks):
out = block(out)
out = self.relu(self.bn1(out))
out = self.global_pooling(out)
out = out.view(-1, self.fc_size)
if features_only:
return out
l = self.fc(out)
if features_and_logits:
return out, l
return out
def cifar_resnext_forward(self, x, features_only=False, features_and_logits=False):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if features_only:
return x
l = self.classifier(x)
if features_and_logits:
return x, l
return l
def preact_resenet_forward(self, x, features_only=False, features_and_logits=False):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
if self.bn_before_fc:
out = F.relu(self.bn(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
if features_only:
return out
l = self.linear(out)
if features_and_logits:
return out, l
return l
def resnet_forward(self, x, features_only=False, features_and_logits=False):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
if features_only:
return out
l = self.linear(out)
if features_and_logits:
out, l
return l
def dm_preact_resnet_forward(self, x, features_only=False, features_and_logits=False):
if self.padding > 0:
x = F.pad(x, (self.padding,) * 4)
out = (x - self.mean) / self.std
out = self.conv_2d(out)
out = self.layer_0(out)
out = self.layer_1(out)
out = self.layer_2(out)
out = self.layer_3(out)
out = self.relu(self.batchnorm(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
if features_only:
return out
l = self.logits(out)
if features_and_logits:
return out, l
return l
def dm_resnet_forward(self, x, features_only=False, features_and_logits=False):
if self.padding > 0:
x = F.pad(x, (self.padding,) * 4)
out = (x - self.mean) / self.std
out = self.init_conv(out)
out = self.layer(out)
out = self.relu(self.batchnorm(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.num_channels)
if features_only:
return out
l = self.logits(out)
if features_and_logits:
return out, l
return l
arch.wide_resnet.WideResNet.forward = wide_resnet_forward
arch.robust_wide_resnet.RobustWideResNet.forward = robust_wide_resnet_forward
arch.resnext.CifarResNeXt.forward = cifar_resnext_forward
arch.resnet.PreActResNet.forward = preact_resenet_forward
arch.resnet.ResNet.forward = resnet_forward
arch.dm_wide_resnet.DMPreActResNet.forward = dm_preact_resnet_forward
arch.dm_wide_resnet.DMWideResNet.forward = dm_resnet_forward
print("Patched RobustBench classifiers.")
class SDE_Adv_Model(nn.Module):
def __init__(self, args, config):
super().__init__()
self.args = args
# image classifier
self.classifier = get_image_classifier(args.classifier_name).to(config.device)
# diffusion model
print(f'diffusion_type: {args.diffusion_type}')
if args.diffusion_type == 'ddpm':
self.runner = GuidedDiffusion(args, config, device=config.device)
elif args.diffusion_type == 'sde':
self.runner = RevGuidedDiffusion(args, config, device=config.device)
elif args.diffusion_type == 'ode':
self.runner = OdeGuidedDiffusion(args, config, device=config.device)
elif args.diffusion_type == 'ldsde':
self.runner = LDGuidedDiffusion(args, config, device=config.device)
elif args.diffusion_type == 'celebahq-ddpm':
self.runner = Diffusion(args, config, device=config.device)
else:
raise NotImplementedError('unknown diffusion type')
self.register_buffer('counter', torch.zeros(1, device=config.device))
self.tag = None
def reset_counter(self):
self.counter = torch.zeros(1, dtype=torch.int, device=config.device)
def set_tag(self, tag=None):
self.tag = tag
def forward(self, x, features_only=False, features_and_logits=False):
counter = self.counter.item()
if counter % 10 == 0:
print(f'diffusion times: {counter}')
# imagenet [3, 224, 224] -> [3, 256, 256] -> [3, 224, 224]
if 'imagenet' in self.args.domain:
x = F.interpolate(x, size=(256, 256), mode='bilinear', align_corners=False)
start_time = time.time()
x_re = self.runner.image_editing_sample((x - 0.5) * 2, bs_id=counter, tag=self.tag)
minutes, seconds = divmod(time.time() - start_time, 60)
if 'imagenet' in self.args.domain:
x_re = F.interpolate(x_re, size=(224, 224), mode='bilinear', align_corners=False)
if counter % 10 == 0:
print(f'x shape (before diffusion models): {x.shape}')
print(f'x shape (before classifier): {x_re.shape}')
print("Sampling time per batch: {:0>2}:{:05.2f}".format(int(minutes), seconds))
out = self.classifier((x_re + 1) * 0.5, features_only=features_only, features_and_logits=features_and_logits)
self.counter += 1
return out
def eval_autoattack(args, config, model, x_val, y_val, adv_batch_size, log_dir):
ngpus = torch.cuda.device_count()
model_ = model
if ngpus > 1:
model_ = model.module
attack_version = args.attack_version # ['standard', 'rand', 'custom']
if attack_version == 'standard':
attack_list = ['apgd-ce', 'apgd-t', 'fab-t', 'square']
elif attack_version == 'rand':
attack_list = ['apgd-ce', 'apgd-dlr']
elif attack_version == 'custom':
attack_list = args.attack_type.split(',')
else:
raise NotImplementedError(f'Unknown attack version: {attack_version}!')
print(f'attack_version: {attack_version}, attack_list: {attack_list}') # ['apgd-ce', 'apgd-t', 'fab-t', 'square']
# ---------------- apply the attack to classifier ----------------
print(f'apply the attack to classifier [{args.lp_norm}]...')
classifier = get_image_classifier(args.classifier_name).to(config.device)
adversary_resnet = AutoAttack(classifier, norm=args.lp_norm, eps=args.adv_eps,
version=attack_version, attacks_to_run=attack_list,
log_path=f'{log_dir}/log_resnet.txt', device=config.device)
if attack_version == 'custom':
adversary_resnet.apgd.n_restarts = 1
adversary_resnet.fab.n_restarts = 1
adversary_resnet.apgd_targeted.n_restarts = 1
adversary_resnet.fab.n_target_classes = 9
adversary_resnet.apgd_targeted.n_target_classes = 9
adversary_resnet.square.n_queries = 5000
if attack_version == 'rand':
adversary_resnet.apgd.eot_iter = args.eot_iter
print(f'[classifier] rand version with eot_iter: {adversary_resnet.apgd.eot_iter}')
print(f'{args.lp_norm}, epsilon: {args.adv_eps}')
x_adv_resnet = adversary_resnet.run_standard_evaluation(x_val, y_val, bs=adv_batch_size)
print(f'x_adv_resnet shape: {x_adv_resnet.shape}')
torch.save([x_adv_resnet, y_val], f'{log_dir}/x_adv_resnet_sd{args.seed}.pt')
# ---------------- apply the attack to sde_adv ----------------
print(f'apply the attack to sde_adv [{args.lp_norm}]...')
model_.reset_counter()
adversary_sde = AutoAttack(model, norm=args.lp_norm, eps=args.adv_eps,
version=attack_version, attacks_to_run=attack_list,
log_path=f'{log_dir}/log_sde_adv.txt', device=config.device)
if attack_version == 'custom':
adversary_sde.apgd.n_restarts = 1
adversary_sde.fab.n_restarts = 1
adversary_sde.apgd_targeted.n_restarts = 1
adversary_sde.fab.n_target_classes = 9
adversary_sde.apgd_targeted.n_target_classes = 9
adversary_sde.square.n_queries = 5000
if attack_version == 'rand':
adversary_sde.apgd.eot_iter = args.eot_iter
print(f'[adv_sde] rand version with eot_iter: {adversary_sde.apgd.eot_iter}')
print(f'{args.lp_norm}, epsilon: {args.adv_eps}')
x_adv_sde = adversary_sde.run_standard_evaluation(x_val, y_val, bs=adv_batch_size)
print(f'x_adv_sde shape: {x_adv_sde.shape}')
torch.save([x_adv_sde, y_val], f'{log_dir}/x_adv_sde_sd{args.seed}.pt')
def eval_stadv(args, config, model, x_val, y_val, adv_batch_size, log_dir):
ngpus = torch.cuda.device_count()
model_ = model
if ngpus > 1:
model_ = model.module
x_val, y_val = x_val.to(config.device), y_val.to(config.device)
print(f'bound: {args.adv_eps}')
# apply the attack to resnet
print(f'apply the stadv attack to resnet...')
resnet = get_image_classifier(args.classifier_name).to(config.device)
start_time = time.time()
init_acc = get_accuracy(resnet, x_val, y_val, bs=adv_batch_size)
print('initial accuracy: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, time.time() - start_time))
adversary_resnet = StAdvAttack(resnet, bound=args.adv_eps, num_iterations=100, eot_iter=args.eot_iter)
start_time = time.time()
x_adv_resnet = adversary_resnet(x_val, y_val)
robust_acc = get_accuracy(resnet, x_adv_resnet, y_val, bs=adv_batch_size)
print('robust accuracy: {:.2%}, time elapsed: {:.2f}s'.format(robust_acc, time.time() - start_time))
print(f'x_adv_resnet shape: {x_adv_resnet.shape}')
torch.save([x_adv_resnet, y_val], f'{log_dir}/x_adv_resnet_sd{args.seed}.pt')
# apply the attack to sde_adv
print(f'apply the stadv attack to sde_adv...')
start_time = time.time()
model_.reset_counter()
model_.set_tag('no_adv')
init_acc = get_accuracy(model, x_val, y_val, bs=adv_batch_size)
print('initial accuracy: {:.2%}, time elapsed: {:.2f}s'.format(init_acc, time.time() - start_time))
adversary_sde = StAdvAttack(model, bound=args.adv_eps, num_iterations=100, eot_iter=args.eot_iter)
start_time = time.time()
model_.reset_counter()
model_.set_tag()
x_adv_sde = adversary_sde(x_val, y_val)
model_.reset_counter()
model_.set_tag('sde_adv')
robust_acc = get_accuracy(model, x_adv_sde, y_val, bs=adv_batch_size)
print('robust accuracy: {:.2%}, time elapsed: {:.2f}s'.format(robust_acc, time.time() - start_time))
print(f'x_adv_sde shape: {x_adv_sde.shape}')
torch.save([x_adv_sde, y_val], f'{log_dir}/x_adv_sde_sd{args.seed}.pt')
def binarization_eval(args, config):
print("Running binarization test.")
middle_name = '_'.join([args.diffusion_type, args.attack_version]) if args.attack_version in ['stadv', 'standard',
'rand'] \
else '_'.join([args.diffusion_type, args.attack_version, args.attack_type])
log_dir = os.path.join(args.image_folder, args.classifier_name, middle_name,
'seed' + str(args.seed), 'data' + str(args.data_seed) + "_" +
str(args.test_samples_idx_start) + "_" + str(args.test_samples_idx_end))
os.makedirs(log_dir, exist_ok=True)
args.log_dir = log_dir
ngpus = torch.cuda.device_count()
adv_batch_size = args.adv_batch_size * ngpus
# load model
print('starting the model and loader...')
model = SDE_Adv_Model(args, config)
if ngpus > 1:
model = torch.nn.DataParallel(model)
model = model.eval().to(config.device)
# load data
x_val, y_val = load_data(args, adv_batch_size, binarization_test=True)
testloader = utils.build_dataloader_from_arrays(x_val.detach().cpu().numpy(), y_val.detach().cpu().numpy())
adv_batch_size = args.adv_batch_size * ngpus
print('adv_batch_size', adv_batch_size)
print('x_val', x_val.shape)
if args.attack_version in ['standard', 'rand', 'custom']:
attack_version = args.attack_version # ['standard', 'rand', 'custom']
if attack_version == 'standard':
attack_list = ['apgd-ce', 'apgd-t', 'fab-t', 'square']
elif attack_version == 'rand':
attack_list = ['apgd-ce', 'apgd-dlr']
elif attack_version == 'custom':
attack_list = args.attack_type.split(',')
else:
raise NotImplementedError(f'Unknown attack version: {attack_version}!')
print(
f'attack_version: {attack_version}, attack_list: {attack_list}') # ['apgd-ce', 'apgd-t', 'fab-t', 'square']
print(f'{args.lp_norm}, epsilon: {args.adv_eps}')
elif args.attack_version == 'stadv':
print("Using StAdv attack.")
else:
raise NotImplementedError(f'unknown attack_version: {args.attack_version}')
def eval(m, x, y):
model.reset_counter()
if args.attack_version in ['standard', 'rand', 'custom']:
adversary_sde = AutoAttack(m, norm=args.lp_norm, eps=args.adv_eps,
version=attack_version, attacks_to_run=attack_list,
device=config.device)
# Fix loss functions of APGD such that they are properly defined for binary classification problems.
fix_autoattack_autopgd(adversary_sde)
if attack_version == 'custom':
adversary_sde.apgd.n_restarts = 1
adversary_sde.fab.n_restarts = 1
adversary_sde.apgd_targeted.n_restarts = 1
adversary_sde.fab.n_target_classes = 1
adversary_sde.apgd_targeted.n_target_classes = 1
adversary_sde.square.n_queries = 5000
if attack_version == 'rand':
adversary_sde.apgd.eot_iter = args.eot_iter
x_adv_sde = adversary_sde.run_standard_evaluation(x, y, bs=len(x))
elif args.attack_version == 'stadv':
adversary_sde = StAdvAttack(m, bound=args.adv_eps, num_iterations=100, eot_iter=args.eot_iter)
x_adv_sde = adversary_sde(x, y)
else:
raise NotImplementedError(f'unknown attack_version: {args.attack_version}')
x_adv_logits = m(x_adv_sde)
robust_acc = get_accuracy(m, x_adv_sde, y, bs=adv_batch_size)
return robust_acc, (x_adv_sde, x_adv_logits)
scores_logit_differences_and_validation_accuracies = dbb.interior_boundary_discrimination_attack(
model,
testloader,
attack_fn=lambda m, l, kwargs: eval(m, l.dataset.tensors[0].to(config.device),
l.dataset.tensors[1].to(config.device)),
linearization_settings=DecisionBoundaryBinarizationSettings(
epsilon=args.adv_eps,
norm="linf",
lr=10000,
n_boundary_points=args.n_boundary_points,
n_inner_points=args.n_inner_points,
adversarial_attack_settings=None,
optimizer="sklearn"
),
n_samples=len(x_val),
device=config.device,
n_samples_evaluation=200, # args.num_samples_test * 10,
n_samples_asr_evaluation=200,
batch_size=args.batch_size * ngpus,
rescale_logits="adaptive",
decision_boundary_closeness=0.999,
sample_training_data_from_corners=args.sample_from_corners
)
print(dbb.format_result(scores_logit_differences_and_validation_accuracies, len(x_val)))
def robustness_eval(args, config):
middle_name = '_'.join([args.diffusion_type, args.attack_version]) if args.attack_version in ['stadv', 'standard', 'rand'] \
else '_'.join([args.diffusion_type, args.attack_version, args.attack_type])
log_dir = os.path.join(args.image_folder, args.classifier_name, middle_name,
'seed' + str(args.seed), 'data' + str(args.data_seed))
os.makedirs(log_dir, exist_ok=True)
args.log_dir = log_dir
logger = dp_utils.Logger(file_name=f'{log_dir}/log.txt', file_mode="w+", should_flush=True)
ngpus = torch.cuda.device_count()
adv_batch_size = args.adv_batch_size * ngpus
print(f'ngpus: {ngpus}, adv_batch_size: {adv_batch_size}')
# load model
print('starting the model and loader...')
model = SDE_Adv_Model(args, config)
if ngpus > 1:
model = torch.nn.DataParallel(model)
model = model.eval().to(config.device)
# load data
x_val, y_val = load_data(args, adv_batch_size)
# eval classifier and sde_adv against attacks
if args.attack_version in ['standard', 'rand', 'custom']:
eval_autoattack(args, config, model, x_val, y_val, adv_batch_size, log_dir)
elif args.attack_version == 'stadv':
eval_stadv(args, config, model, x_val, y_val, adv_batch_size, log_dir)
else:
raise NotImplementedError(f'unknown attack_version: {args.attack_version}')
logger.close()
def parse_args_and_config():
parser = argparse.ArgumentParser(description=globals()['__doc__'])
# diffusion models
parser.add_argument('--config', type=str, required=True, help='Path to the config file')
parser.add_argument('--data_seed', type=int, default=0, help='Random seed')
parser.add_argument('--seed', type=int, default=1234, help='Random seed')
parser.add_argument('--exp', type=str, default='exp', help='Path for saving running related data.')
parser.add_argument('--verbose', type=str, default='info', help='Verbose level: info | debug | warning | critical')
parser.add_argument('-i', '--image_folder', type=str, default='images', help="The folder name of samples")
parser.add_argument('--ni', action='store_true', help="No interaction. Suitable for Slurm Job launcher")
parser.add_argument('--sample_step', type=int, default=1, help='Total sampling steps')
parser.add_argument('--t', type=int, default=400, help='Sampling noise scale')
parser.add_argument('--t_delta', type=int, default=15, help='Perturbation range of sampling noise scale')
parser.add_argument('--rand_t', type=str2bool, default=False, help='Decide if randomize sampling noise scale')
parser.add_argument('--diffusion_type', type=str, default='ddpm', help='[ddpm, sde]')
parser.add_argument('--score_type', type=str, default='guided_diffusion', help='[guided_diffusion, score_sde]')
parser.add_argument('--eot_iter', type=int, default=20, help='only for rand version of autoattack')
parser.add_argument('--use_bm', action='store_true', help='whether to use brownian motion')
# LDSDE
parser.add_argument('--sigma2', type=float, default=1e-3, help='LDSDE sigma2')
parser.add_argument('--lambda_ld', type=float, default=1e-2, help='lambda_ld')
parser.add_argument('--eta', type=float, default=5., help='LDSDE eta')
parser.add_argument('--step_size', type=float, default=1e-2, help='step size for ODE Euler method')
# adv
parser.add_argument('--domain', type=str, default='celebahq', help='which domain: celebahq, cat, car, imagenet')
parser.add_argument('--classifier_name', type=str, default='Eyeglasses', help='which classifier to use')
parser.add_argument('--partition', type=str, default='val')
parser.add_argument('--adv_batch_size', type=int, default=64)
parser.add_argument('--attack_type', type=str, default='square')
parser.add_argument('--lp_norm', type=str, default='Linf', choices=['Linf', 'L2'])
parser.add_argument('--attack_version', type=str, default='custom')
parser.add_argument('--num_sub', type=int, default=1000, help='imagenet subset')
parser.add_argument('--adv_eps', type=float, default=0.07)
# Binarization Test
parser.add_argument("--binarization-test", action="store_true")
parser.add_argument("--batch-size", default=64, type=int)
parser.add_argument("--n_inner_points", default=999, type=int)
parser.add_argument("--n_boundary_points", default=1, type=int)
parser.add_argument("--sample-from-corners", action="store_true")
parser.add_argument("--test-samples-idx-start", default=0, type=int)
parser.add_argument("--test-samples-idx-end", default=64, type=int)
# parser.add_argument('--gpu_ids', type=str, default='0')
args = parser.parse_args()
# parse config file
with open(args.config, 'r') as f:
config = yaml.safe_load(f)
new_config = dp_utils.dict2namespace(config)
level = getattr(logging, args.verbose.upper(), None)
if not isinstance(level, int):
raise ValueError('level {} not supported'.format(args.verbose))
handler1 = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s - %(filename)s - %(asctime)s - %(message)s')
handler1.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler1)
logger.setLevel(level)
args.image_folder = os.path.join(args.exp, args.image_folder)
os.makedirs(args.image_folder, exist_ok=True)
# add device
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
logging.info("Using device: {}".format(device))
new_config.device = device
# set random seed
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
return args, new_config
if __name__ == '__main__':
patch_robustbench_models()
args, config = parse_args_and_config()
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
if args.binarization_test:
binarization_eval(args, config)
else:
robustness_eval(args, config)
|
import numpy as np
import glob
import argparse
import os
def parse_log(path):
with open(path, "r") as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
if len(lines) < 4:
return None
if lines[-4].startswith("interior-vs-boundary discimination"):
asr = float(lines[-4].split(":")[1].strip())
logit_diff = float(lines[-3].split(":")[1].split("+-")[0].strip())
validation_acc = eval(lines[-2].split(":")[-1].replace("nan", "np.nan"))
if type(validation_acc) is float:
validation_acc = (np.nan, np.nan, np.nan, np.nan, np.nan, np.nan)
validation_acc = np.array(validation_acc)
n_failed = int(lines[-1].split("for ")[1].split("/")[0].strip())
return asr, logit_diff, validation_acc, n_failed
else:
return None
def main(input_folder):
logs = glob.glob(os.path.join(input_folder, "*.log"))
results = [(p, parse_log(p)) for p in logs]
incomplete_logs = [it[0] for it in results if it[1] is None]
if len(incomplete_logs) > 0:
print("Found incomplete logs for experiments:")
for it in incomplete_logs:
print(f"\t{it}")
results = [it[1] for it in results if it[1] is not None]
if len(results) == 0:
print("No results found.")
return
results = results[:512]
properties = [np.array([it[i] for it in results]) for i in range(len(results[0]))]
n_samples = len(results)
n_failed_samples = np.sum(properties[3])
# filter failed samples
failed_samples = [idx for idx in range(len(properties[3])) if properties[3][idx] == 1]
properties = [[prop[idx] for idx in range(len(prop)) if idx not in failed_samples] for prop in properties]
import pdb; pdb.set_trace()
means = [np.mean(prop, 0) for prop in properties]
stds = [np.std(prop, 0) for prop in properties]
print(f"ASR: {means[0]}")
print(f"Normalized Logit-Difference-Improvement: {means[1]} +- {stds[1]}")
print(f"Validation Accuracy (I, B, BS, BC, R. ASR S, R. ASR C): {tuple(means[2])}")
print(f"Setup failed for {n_failed_samples}/{n_samples} samples")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i", required=True)
args = parser.parse_args()
main(args.input)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import functools
import torch
from torch import nn
from torch import optim
# mister_ed
from .recoloradv.mister_ed import loss_functions as lf
from .recoloradv.mister_ed import adversarial_training as advtrain
from .recoloradv.mister_ed import adversarial_perturbations as ap
from .recoloradv.mister_ed import adversarial_attacks as aa
from .recoloradv.mister_ed import spatial_transformers as st
PGD_ITERS = 20
def run_attack_with_random_targets(attack, model, inputs, labels, num_classes):
"""
Runs an attack with targets randomly selected from all classes besides the
correct one. The attack should be a function from (inputs, labels) to
adversarial examples.
"""
rand_targets = torch.randint(
0, num_classes - 1, labels.size(),
dtype=labels.dtype, device=labels.device,
)
targets = torch.remainder(labels + rand_targets + 1, num_classes)
adv_inputs = attack(inputs, targets)
adv_labels = model(adv_inputs).argmax(1)
unsuccessful = adv_labels != targets
adv_inputs[unsuccessful] = inputs[unsuccessful]
return adv_inputs
class MisterEdAttack(nn.Module):
"""
Base class for attacks using the mister_ed library.
"""
def __init__(self, model, threat_model, randomize=False,
perturbation_norm_loss=False, lr=0.001, random_targets=False,
num_classes=None, **kwargs):
super().__init__()
self.model = model
self.normalizer = nn.Identity()
self.threat_model = threat_model
self.randomize = randomize
self.perturbation_norm_loss = perturbation_norm_loss
self.attack_kwargs = kwargs
self.lr = lr
self.random_targets = random_targets
self.num_classes = num_classes
self.attack = None
def _setup_attack(self):
cw_loss = lf.CWLossF6(self.model, self.normalizer, kappa=float('inf'))
if self.random_targets:
cw_loss.forward = functools.partial(cw_loss.forward, targeted=True)
perturbation_loss = lf.PerturbationNormLoss(lp=2)
pert_factor = 0.0
if self.perturbation_norm_loss is True:
pert_factor = 0.05
elif type(self.perturbation_norm_loss) is float:
pert_factor = self.perturbation_norm_loss
adv_loss = lf.RegularizedLoss({
'cw': cw_loss,
'pert': perturbation_loss,
}, {
'cw': 1.0,
'pert': pert_factor,
}, negate=True)
self.pgd_attack = aa.PGD(self.model, self.normalizer,
self.threat_model(), adv_loss)
attack_params = {
'optimizer': optim.Adam,
'optimizer_kwargs': {'lr': self.lr},
'signed': False,
'verbose': False,
'num_iterations': 0 if self.randomize else PGD_ITERS,
'random_init': self.randomize,
}
attack_params.update(self.attack_kwargs)
self.attack = advtrain.AdversarialAttackParameters(
self.pgd_attack,
1.0,
attack_specific_params={'attack_kwargs': attack_params},
)
self.attack.set_gpu(False)
def forward(self, inputs, labels):
if self.attack is None:
self._setup_attack()
assert self.attack is not None
if self.random_targets:
return run_attack_with_random_targets(
lambda inputs, labels: self.attack.attack(inputs, labels)[0],
self.model,
inputs,
labels,
num_classes=self.num_classes,
)
else:
return self.attack.attack(inputs, labels)[0]
class StAdvAttack(MisterEdAttack):
def __init__(self, model, bound=0.05, **kwargs):
kwargs.setdefault('lr', 0.01)
super().__init__(
model,
threat_model=lambda: ap.ThreatModel(ap.ParameterizedXformAdv, {
'lp_style': 'inf',
'lp_bound': bound,
'xform_class': st.FullSpatial,
'use_stadv': True,
}),
perturbation_norm_loss=0.0025 / bound,
**kwargs,
)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/perturbations.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
from .mister_ed import adversarial_perturbations as ap
from .mister_ed.adversarial_perturbations import initialized
from .mister_ed.utils import pytorch_utils as utils
from . import color_transformers as ct
from . import color_spaces as cs
class ReColorAdv(ap.AdversarialPerturbation):
"""
Puts the color at each pixel in the image through the same transformation.
Parameters:
- lp_style: number or 'inf'
- lp_bound: maximum norm of color transformation. Can be a tensor of size
(num_channels,), in which case each channel will be bounded by the
cooresponding bound in the tensor. For instance, passing
[0.1, 0.15, 0.05] would allow a norm of 0.1 for R, 0.15 for G, and 0.05
for B. Not supported by all transformations.
- use_smooth_loss: whether to optimize using the loss function
for FullSpatial that rewards smooth vector fields
- xform_class: a subclass of
color_transformers.ParameterizedTransformation
- xform_params: dict of parameters to pass to the xform_class.
- cspace_class: a subclass of color_spaces.ColorSpace that indicates
in which color space the transformation should be performed
(RGB by default)
"""
def __init__(self, threat_model, perturbation_params, *other_args):
super().__init__(threat_model, perturbation_params)
assert issubclass(perturbation_params.xform_class,
ct.ParameterizedTransformation)
self.lp_style = perturbation_params.lp_style
self.lp_bound = perturbation_params.lp_bound
self.use_smooth_loss = perturbation_params.use_smooth_loss
self.scalar_step = perturbation_params.scalar_step or 1.0
self.cspace = perturbation_params.cspace or cs.RGBColorSpace()
def _merge_setup(self, num_examples, new_xform):
""" DANGEROUS TO BE CALLED OUTSIDE OF THIS FILE!!!"""
self.num_examples = num_examples
self.xform = new_xform
self.initialized = True
def setup(self, originals):
super().setup(originals)
self.xform = self.perturbation_params.xform_class(
shape=originals.shape, manual_gpu=self.use_gpu,
cspace=self.cspace,
**(self.perturbation_params.xform_params or {}),
)
self.initialized = True
@initialized
def perturbation_norm(self, x=None, lp_style=None):
lp_style = lp_style or self.lp_style
if self.use_smooth_loss:
assert isinstance(self.xform, ct.FullSpatial)
return self.xform.smoothness_norm()
else:
return self.xform.norm(lp=lp_style)
@initialized
def constrain_params(self, x=None):
# Do lp projections
if isinstance(self.lp_style, int) or self.lp_style == 'inf':
self.xform.project_params(self.lp_style, self.lp_bound)
@initialized
def update_params(self, step_fxn):
param_list = list(self.xform.parameters())
assert len(param_list) == 1
params = param_list[0]
assert params.grad.data is not None
self.add_to_params(step_fxn(params.grad.data) * self.scalar_step)
@initialized
def add_to_params(self, grad_data):
""" Assumes only one parameters object in the Spatial Transform """
param_list = list(self.xform.parameters())
assert len(param_list) == 1
params = param_list[0]
params.data.add_(grad_data)
@initialized
def random_init(self):
param_list = list(self.xform.parameters())
assert len(param_list) == 1
param = param_list[0]
random_perturb = utils.random_from_lp_ball(param.data,
self.lp_style,
self.lp_bound)
param.data.add_(self.xform.identity_params +
random_perturb - self.xform.xform_params.data)
@initialized
def merge_perturbation(self, other, self_mask):
super().merge_perturbation(other, self_mask)
new_perturbation = ReColorAdv(self.threat_model,
self.perturbation_params)
new_xform = self.xform.merge_xform(other.xform, self_mask)
new_perturbation._merge_setup(self.num_examples, new_xform)
return new_perturbation
def forward(self, x):
if not self.initialized:
self.setup(x)
self.constrain_params()
return self.cspace.to_rgb(
self.xform.forward(self.cspace.from_rgb(x)))
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/__init__.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/color_transformers.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
"""
Contains various parameterizations for spatial transformation in 3D color space.
"""
import torch
import torch.nn as nn
from .mister_ed.utils import pytorch_utils as utils
from torch.autograd import Variable
from . import norms
from functools import lru_cache
##############################################################################
# #
# SKELETON CLASS #
# #
##############################################################################
class ParameterizedTransformation(nn.Module):
""" General class of transformations.
All subclasses need the following methods:
- norm: no args -> scalar variable
- identity_params: shape -> TENSOR : takes an input shape and outputs
the subclass-specific parameter for the identity
transformation
- forward : Variable -> Variable - is the transformation
"""
def __init__(self, **kwargs):
super(ParameterizedTransformation, self).__init__()
if kwargs.get('manual_gpu', None) is not None:
self.use_gpu = kwargs['manual_gpu']
else:
self.use_gpu = utils.use_gpu()
def clone(self, shape=None, example_index=None):
raise NotImplementedError()
def norm(self, lp='inf'):
raise NotImplementedError("Need to call subclass's norm!")
@classmethod
def identity_params(self, shape):
raise NotImplementedError("Need to call subclass's identity_params!")
def merge_xform(self, other, self_mask):
""" Takes in an other instance of this same class with the same
shape of parameters (NxSHAPE) and a self_mask bytetensor of length
N and outputs the merge between self's parameters for the indices
of 1s in the self_mask and other's parameters for the indices of 0's
ARGS:
other: instance of same class as self with params of shape NxSHAPE -
the thing we merge with this one
self_mask : ByteTensor (length N) - which indices of parameters we
keep from self, and which we keep from other
RETURNS:
New instance of this class that's merged between the self and other
(same shaped params)
"""
# JUST DO ASSERTS IN THE SKELETON CLASS
assert self.__class__ == other.__class__
self_params = self.xform_params.data
other_params = other.xform_params.data
assert self_params.shape == other_params.shape
assert self_params.shape[0] == self_mask.shape[0]
assert other_params.shape[0] == self_mask.shape[0]
new_xform = self.__class__(shape=self.img_shape)
new_params = utils.fold_mask(self.xform_params.data,
other.xform_params.data, self_mask)
new_xform.xform_params = nn.Parameter(new_params)
new_xform.use_gpu = self.use_gpu
return new_xform
def forward(self, examples):
raise NotImplementedError("Need to call subclass's forward!")
class AffineTransform(ParameterizedTransformation):
def __init__(self, *args, **kwargs):
super(AffineTransform, self).__init__(**kwargs)
img_shape = kwargs['shape']
self.img_shape = img_shape
self.xform_params = nn.Parameter(self.identity_params(img_shape))
def clone(self, shape=None, example_index=None):
xform = AffineTransform(shape=shape or self.img_shape)
if example_index is None:
my_params = self.xform_params
else:
my_params = self.xform_params[example_index][None]
xform.xform_params = nn.Parameter(
my_params.clone()
.expand(shape[0], -1, -1)
)
return xform
def norm(self, lp='inf'):
identity_params = Variable(self.identity_params(self.img_shape))
return utils.batchwise_norm(self.xform_params - identity_params, lp,
dim=0)
def identity_params(self, shape):
num_examples = shape[0]
identity_affine_transform = torch.zeros(num_examples, 3, 4)
if self.use_gpu:
identity_affine_transform = identity_affine_transform.cuda()
identity_affine_transform[:, 0, 0] = 1
identity_affine_transform[:, 1, 1] = 1
identity_affine_transform[:, 2, 2] = 1
return identity_affine_transform
def project_params(self, lp, lp_bound):
assert isinstance(lp, int) or lp == 'inf'
diff = self.xform_params.data - self.identity_params(self.img_shape)
new_diff = utils.batchwise_lp_project(diff, lp, lp_bound)
self.xform_params.data.add_(new_diff - diff)
def forward(self, x):
N, _, W, H = self.img_shape
x_padded = torch.cat([x, torch.ones(N, 1, W, H)], 1).permute(
0, 2, 3, 1)
transform_padded = self.xform_params[:, None, None, :, :] \
.expand(-1, W, H, -1, -1)
x_transformed = transform_padded.matmul(x_padded[..., None]) \
.squeeze(4) \
.permute(0, 3, 1, 2)
return x_transformed
class FullSpatial(ParameterizedTransformation):
def __init__(self, *args, resolution_x=8,
resolution_y=8, resolution_z=8, **kwargs):
super(FullSpatial, self).__init__(**kwargs)
self.resolution_x = resolution_x
self.resolution_y = resolution_y
self.resolution_z = resolution_z
img_shape = kwargs['shape']
self.img_shape = img_shape
self.cspace = kwargs.get('cspace')
batch_size = self.img_shape[0]
self.identity_params = FullSpatial.construct_identity_params(
batch_size,
self.resolution_x,
self.resolution_y,
self.resolution_z,
torch.cuda.current_device() if self.use_gpu else None,
)
self.xform_params = nn.Parameter(
torch.empty_like(self.identity_params)
.copy_(self.identity_params)
)
def clone(self, shape=None, example_index=None):
xform = FullSpatial(
shape=shape or self.img_shape,
resolution_x=self.resolution_x,
resolution_y=self.resolution_y,
resolution_z=self.resolution_z,
cspace=self.cspace,
)
if example_index is None:
my_params = self.xform_params
else:
my_params = self.xform_params[example_index][None]
xform.xform_params = nn.Parameter(
my_params.clone()
.expand(shape[0], -1, -1, -1, -1)
)
return xform
def smoothness_norm(self):
return norms.smoothness(self.xform_params -
self.identity_params)
def norm(self, lp='inf'):
if isinstance(lp, int) or lp == 'inf':
return utils.batchwise_norm(
self.xform_params - self.identity_params,
lp, dim=0,
)
else:
assert lp == 'smooth'
return self.smoothness_norm()
def clip_params(self):
"""
Clips the parameters to be between 0 and 1 and also within the color
space's gamut.
"""
clamp_params = torch.clamp(self.xform_params, 0, 1).data
params_shape = self.xform_params.size()
flattened_params = (
clamp_params
.permute(0, 4, 1, 2, 3)
.reshape(params_shape[0], 3, -1, 1)
)
gamut_params = self.cspace.from_rgb(self.cspace.to_rgb(
flattened_params))
clamp_params = (
gamut_params
.permute(0, 2, 3, 1)
.reshape(*params_shape)
)
change_in_params = clamp_params - self.xform_params.data
self.xform_params.data.add_(change_in_params)
def merge_xform(self, other, self_mask):
"""
Takes in an other instance of this same class with the same
shape of parameters (NxSHAPE) and a self_mask bytetensor of length
N and outputs the merge between self's parameters for the indices
of 1s in the self_mask and other's parameters for the indices of 0's
"""
super().merge_xform(other, self_mask)
new_xform = FullSpatial(shape=self.img_shape,
manual_gpu=self.use_gpu,
resolution_x=self.resolution_x,
resolution_y=self.resolution_y,
resolution_z=self.resolution_z,
cspace=self.cspace)
new_params = utils.fold_mask(self.xform_params.data,
other.xform_params.data, self_mask)
new_xform.xform_params = nn.Parameter(new_params)
return new_xform
def project_params(self, lp, lp_bound):
"""
Projects the params to be within lp_bound (according to an lp)
of the identity map. First thing we do is clip the params to be
valid, too.
ARGS:
lp : int or 'inf' - which LP norm we use. Must be an int or the
string 'inf'.
lp_bound : float - how far we're allowed to go in LP land. Can be
a list to indicate that we can go more in some channels
than others.
RETURNS:
None, but modifies self.xform_params
"""
assert isinstance(lp, int) or lp == 'inf'
# clip first
self.clip_params()
# then project back
if lp == 'inf':
try:
# first, assume lp_bound is a vector, and then revert to scalar
# if it's not
clamped_channels = []
for channel_index, bound in enumerate(lp_bound):
clamped_channels.append(utils.clamp_ref(
self.xform_params[..., channel_index],
self.identity_params[..., channel_index],
bound,
))
clamp_params = torch.stack(clamped_channels, 4)
except TypeError:
clamp_params = utils.clamp_ref(self.xform_params.data,
self.identity_params, lp_bound)
change_in_params = clamp_params - self.xform_params.data
else:
flattened_params = (
self.xform_params.data -
self.identity_params
).reshape((-1, 3))
projected_params = flattened_params.renorm(lp, 0, lp_bound)
flattened_change = projected_params - flattened_params
change_in_params = flattened_change.reshape(
self.xform_params.size())
self.xform_params.data.add_(change_in_params)
def forward(self, imgs):
device = torch.device('cuda') if self.use_gpu else None
N, C, W, H = self.img_shape
imgs = imgs.permute(0, 2, 3, 1) # N x W x H x C
imgs = imgs * torch.tensor(
[
self.resolution_x - 1,
self.resolution_y - 1,
self.resolution_z - 1,
],
dtype=torch.float,
device=device,
)[None, None, None, :].expand(N, W, H, C)
integer_part, float_part = torch.floor(imgs).long(), imgs % 1
params_list = self.xform_params.view(N, -1, 3)
# do trilinear interpolation from the params grid
endpoint_values = []
for delta_x in [0, 1]:
corner_values = []
for delta_y in [0, 1]:
vertex_values = []
for delta_z in [0, 1]:
params_index = Variable(torch.zeros(
N, W, H,
dtype=torch.long,
device=device,
))
for color_index, resolution in [
(integer_part[..., 0] + delta_x, self.resolution_x),
(integer_part[..., 1] + delta_y, self.resolution_y),
(integer_part[..., 2] + delta_z, self.resolution_z),
]:
color_index = color_index.clamp(
0, resolution - 1)
params_index = (params_index * resolution +
color_index)
params_index = params_index.view(N, -1)[:, :, None] \
.expand(-1, -1, 3)
vertex_values.append(
params_list.gather(1, params_index)
.view(N, W, H, C)
)
corner_values.append(
vertex_values[0] * (1 - float_part[..., 2, None]) +
vertex_values[1] * float_part[..., 2, None]
)
endpoint_values.append(
corner_values[0] * (1 - float_part[..., 1, None]) +
corner_values[1] * float_part[..., 1, None]
)
result = (
endpoint_values[0] * (1 - float_part[..., 0, None]) +
endpoint_values[1] * float_part[..., 0, None]
)
return result.permute(0, 3, 1, 2)
@staticmethod
@lru_cache(maxsize=10)
def construct_identity_params(batch_size, resolution_x, resolution_y,
resolution_z, device):
identity_params = torch.empty(
batch_size, resolution_x, resolution_y,
resolution_z, 3,
dtype=torch.float,
device=device,
)
for x in range(resolution_x):
for y in range(resolution_y):
for z in range(resolution_z):
identity_params[:, x, y, z, 0] = \
x / (resolution_x - 1)
identity_params[:, x, y, z, 1] = \
y / (resolution_y - 1)
identity_params[:, x, y, z, 2] = \
z / (resolution_z - 1)
return identity_params
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/utils.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
from torch import nn
from torch import optim
from .mister_ed.utils.pytorch_utils import DifferentiableNormalize
from .mister_ed import adversarial_perturbations as ap
from .mister_ed import adversarial_attacks as aa
from .mister_ed import spatial_transformers as st
from .mister_ed import loss_functions as lf
from .mister_ed import adversarial_training as advtrain
from . import perturbations as pt
from . import color_transformers as ct
from . import color_spaces as cs
def get_attack_from_name(
name: str,
classifier: nn.Module,
normalizer: DifferentiableNormalize,
verbose: bool = False,
) -> advtrain.AdversarialAttackParameters:
"""
Builds an attack from a name like "recoloradv" or "stadv+delta" or
"recoloradv+stadv+delta".
"""
threats = []
norm_weights = []
for attack_part in name.split('+'):
if attack_part == 'delta':
threats.append(ap.ThreatModel(
ap.DeltaAddition,
ap.PerturbationParameters(
lp_style='inf',
lp_bound=8.0 / 255,
),
))
norm_weights.append(0.0)
elif attack_part == 'stadv':
threats.append(ap.ThreatModel(
ap.ParameterizedXformAdv,
ap.PerturbationParameters(
lp_style='inf',
lp_bound=0.05,
xform_class=st.FullSpatial,
use_stadv=True,
),
))
norm_weights.append(1.0)
elif attack_part == 'recoloradv':
threats.append(ap.ThreatModel(
pt.ReColorAdv,
ap.PerturbationParameters(
lp_style='inf',
lp_bound=[0.06, 0.06, 0.06],
xform_params={
'resolution_x': 16,
'resolution_y': 32,
'resolution_z': 32,
},
xform_class=ct.FullSpatial,
use_smooth_loss=True,
cspace=cs.CIELUVColorSpace(),
),
))
norm_weights.append(1.0)
else:
raise ValueError(f'Invalid attack "{attack_part}"')
sequence_threat = ap.ThreatModel(
ap.SequentialPerturbation,
threats,
ap.PerturbationParameters(norm_weights=norm_weights),
)
# use PGD attack
adv_loss = lf.CWLossF6(classifier, normalizer, kappa=float('inf'))
st_loss = lf.PerturbationNormLoss(lp=2)
loss_fxn = lf.RegularizedLoss({'adv': adv_loss, 'pert': st_loss},
{'adv': 1.0, 'pert': 0.05},
negate=True)
pgd_attack = aa.PGD(classifier, normalizer, sequence_threat, loss_fxn)
return advtrain.AdversarialAttackParameters(
pgd_attack,
1.0,
attack_specific_params={'attack_kwargs': {
'num_iterations': 100,
'optimizer': optim.Adam,
'optimizer_kwargs': {'lr': 0.001},
'signed': False,
'verbose': verbose,
}},
)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/norms.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
import torch
from torch.autograd import Variable
def smoothness(grid):
"""
Given a variable of dimensions (N, X, Y, [Z], C), computes the sum of
the differences between adjacent points in the grid formed by the
dimensions X, Y, and (optionally) Z. Returns a tensor of dimension N.
"""
num_dims = len(grid.size()) - 2
batch_size = grid.size()[0]
norm = Variable(torch.zeros(batch_size, dtype=grid.data.dtype,
device=grid.data.device))
for dim in range(num_dims):
slice_before = (slice(None),) * (dim + 1)
slice_after = (slice(None),) * (num_dims - dim)
shifted_grids = [
# left
torch.cat([
grid[slice_before + (slice(1, None),) + slice_after],
grid[slice_before + (slice(-1, None),) + slice_after],
], dim + 1),
# right
torch.cat([
grid[slice_before + (slice(None, 1),) + slice_after],
grid[slice_before + (slice(None, -1),) + slice_after],
], dim + 1)
]
for shifted_grid in shifted_grids:
delta = shifted_grid - grid
norm_components = (delta.pow(2).sum(-1) + 1e-10).pow(0.5)
norm.add_(norm_components.sum(
tuple(range(1, len(norm_components.size())))))
return norm
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/color_spaces.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
"""
Contains classes that convert from RGB to various other color spaces and back.
"""
import torch
import numpy as np
import math
class ColorSpace(object):
"""
Base class for color spaces.
"""
def from_rgb(self, imgs):
"""
Converts an Nx3xWxH tensor in RGB color space to a Nx3xWxH tensor in
this color space. All outputs should be in the 0-1 range.
"""
raise NotImplementedError()
def to_rgb(self, imgs):
"""
Converts an Nx3xWxH tensor in this color space to a Nx3xWxH tensor in
RGB color space.
"""
raise NotImplementedError()
class RGBColorSpace(ColorSpace):
"""
RGB color space. Just applies identity transformation.
"""
def from_rgb(self, imgs):
return imgs
def to_rgb(self, imgs):
return imgs
class YPbPrColorSpace(ColorSpace):
"""
YPbPr color space. Uses ITU-R BT.601 standard by default.
"""
def __init__(self, kr=0.299, kg=0.587, kb=0.114, luma_factor=1,
chroma_factor=1):
self.kr, self.kg, self.kb = kr, kg, kb
self.luma_factor = luma_factor
self.chroma_factor = chroma_factor
def from_rgb(self, imgs):
r, g, b = imgs.permute(1, 0, 2, 3)
y = r * self.kr + g * self.kg + b * self.kb
pb = (b - y) / (2 * (1 - self.kb))
pr = (r - y) / (2 * (1 - self.kr))
return torch.stack([y * self.luma_factor,
pb * self.chroma_factor + 0.5,
pr * self.chroma_factor + 0.5], 1)
def to_rgb(self, imgs):
y_prime, pb_prime, pr_prime = imgs.permute(1, 0, 2, 3)
y = y_prime / self.luma_factor
pb = (pb_prime - 0.5) / self.chroma_factor
pr = (pr_prime - 0.5) / self.chroma_factor
b = pb * 2 * (1 - self.kb) + y
r = pr * 2 * (1 - self.kr) + y
g = (y - r * self.kr - b * self.kb) / self.kg
return torch.stack([r, g, b], 1).clamp(0, 1)
class ApproxHSVColorSpace(ColorSpace):
"""
Converts from RGB to approximately the HSV cone using a much smoother
transformation.
"""
def from_rgb(self, imgs):
r, g, b = imgs.permute(1, 0, 2, 3)
x = r * np.sqrt(2) / 3 - g / (np.sqrt(2) * 3) - b / (np.sqrt(2) * 3)
y = g / np.sqrt(6) - b / np.sqrt(6)
z, _ = imgs.max(1)
return torch.stack([z, x + 0.5, y + 0.5], 1)
def to_rgb(self, imgs):
z, xp, yp = imgs.permute(1, 0, 2, 3)
x, y = xp - 0.5, yp - 0.5
rp = float(np.sqrt(2)) * x
gp = -x / np.sqrt(2) + y * np.sqrt(3 / 2)
bp = -x / np.sqrt(2) - y * np.sqrt(3 / 2)
delta = z - torch.max(torch.stack([rp, gp, bp], 1), 1)[0]
r, g, b = rp + delta, gp + delta, bp + delta
return torch.stack([r, g, b], 1).clamp(0, 1)
class HSVConeColorSpace(ColorSpace):
"""
Converts from RGB to the HSV "cone", where (x, y, z) =
(s * v cos h, s * v sin h, v). Note that this cone is then squashed to fit
in [0, 1]^3 by letting (x', y', z') = ((x + 1) / 2, (y + 1) / 2, z).
WARNING: has a very complex derivative, not very useful in practice
"""
def from_rgb(self, imgs):
r, g, b = imgs.permute(1, 0, 2, 3)
mx, argmx = imgs.max(1)
mn, _ = imgs.min(1)
chroma = mx - mn
eps = 1e-10
h_max_r = math.pi / 3 * (g - b) / (chroma + eps)
h_max_g = math.pi / 3 * (b - r) / (chroma + eps) + math.pi * 2 / 3
h_max_b = math.pi / 3 * (r - g) / (chroma + eps) + math.pi * 4 / 3
h = (((argmx == 0) & (chroma != 0)).float() * h_max_r
+ ((argmx == 1) & (chroma != 0)).float() * h_max_g
+ ((argmx == 2) & (chroma != 0)).float() * h_max_b)
x = torch.cos(h) * chroma
y = torch.sin(h) * chroma
z = mx
return torch.stack([(x + 1) / 2, (y + 1) / 2, z], 1)
def _to_rgb_part(self, h, chroma, v, n):
"""
Implements the function f(n) defined here:
https://en.wikipedia.org/wiki/HSL_and_HSV#Alternative_HSV_to_RGB
"""
k = (n + h * math.pi / 3) % 6
return v - chroma * torch.min(k, 4 - k).clamp(0, 1)
def to_rgb(self, imgs):
xp, yp, z = imgs.permute(1, 0, 2, 3)
x, y = xp * 2 - 1, yp * 2 - 1
# prevent NaN gradients when calculating atan2
x_nonzero = (1 - 2 * (torch.sign(x) == -1).float()) * (torch.abs(x) + 1e-10)
h = torch.atan2(y, x_nonzero)
v = z.clamp(0, 1)
chroma = torch.min(torch.sqrt(x ** 2 + y ** 2 + 1e-10), v)
r = self._to_rgb_part(h, chroma, v, 5)
g = self._to_rgb_part(h, chroma, v, 3)
b = self._to_rgb_part(h, chroma, v, 1)
return torch.stack([r, g, b], 1).clamp(0, 1)
class CIEXYZColorSpace(ColorSpace):
"""
The 1931 CIE XYZ color space (assuming input is in sRGB).
Warning: may have values outside [0, 1] range. Should only be used in
the process of converting to/from other color spaces.
"""
def from_rgb(self, imgs):
# apply gamma correction
small_values_mask = (imgs < 0.04045).float()
imgs_corrected = (
(imgs / 12.92) * small_values_mask +
((imgs + 0.055) / 1.055) ** 2.4 * (1 - small_values_mask)
)
# linear transformation to XYZ
r, g, b = imgs_corrected.permute(1, 0, 2, 3)
x = 0.4124 * r + 0.3576 * g + 0.1805 * b
y = 0.2126 * r + 0.7152 * g + 0.0722 * b
z = 0.0193 * r + 0.1192 * g + 0.9504 * b
return torch.stack([x, y, z], 1)
def to_rgb(self, imgs):
# linear transformation
x, y, z = imgs.permute(1, 0, 2, 3)
r = 3.2406 * x - 1.5372 * y - 0.4986 * z
g = -0.9689 * x + 1.8758 * y + 0.0415 * z
b = 0.0557 * x - 0.2040 * y + 1.0570 * z
imgs = torch.stack([r, g, b], 1)
# apply gamma correction
small_values_mask = (imgs < 0.0031308).float()
imgs_clamped = imgs.clamp(min=1e-10) # prevent NaN gradients
imgs_corrected = (
(12.92 * imgs) * small_values_mask +
(1.055 * imgs_clamped ** (1 / 2.4) - 0.055) *
(1 - small_values_mask)
)
return imgs_corrected
class CIELUVColorSpace(ColorSpace):
"""
Converts to the 1976 CIE L*u*v* color space.
"""
def __init__(self, up_white=0.1978, vp_white=0.4683, y_white=1,
eps=1e-10):
self.xyz_cspace = CIEXYZColorSpace()
self.up_white = up_white
self.vp_white = vp_white
self.y_white = y_white
self.eps = eps
def from_rgb(self, imgs):
x, y, z = self.xyz_cspace.from_rgb(imgs).permute(1, 0, 2, 3)
# calculate u' and v'
denom = x + 15 * y + 3 * z + self.eps
up = 4 * x / denom
vp = 9 * y / denom
# calculate L*, u*, and v*
small_values_mask = (y / self.y_white < (6 / 29) ** 3).float()
y_clamped = y.clamp(min=self.eps) # prevent NaN gradients
L = (
((29 / 3) ** 3 * y / self.y_white) * small_values_mask +
(116 * (y_clamped / self.y_white) ** (1 / 3) - 16) *
(1 - small_values_mask)
)
u = 13 * L * (up - self.up_white)
v = 13 * L * (vp - self.vp_white)
return torch.stack([L / 100, (u + 100) / 200, (v + 100) / 200], 1)
def to_rgb(self, imgs):
L = imgs[:, 0, :, :] * 100
u = imgs[:, 1, :, :] * 200 - 100
v = imgs[:, 2, :, :] * 200 - 100
up = u / (13 * L + self.eps) + self.up_white
vp = v / (13 * L + self.eps) + self.vp_white
small_values_mask = (L <= 8).float()
y = (
(self.y_white * L * (3 / 29) ** 3) * small_values_mask +
(self.y_white * ((L + 16) / 116) ** 3) * (1 - small_values_mask)
)
denom = 4 * vp + self.eps
x = y * 9 * up / denom
z = y * (12 - 3 * up - 20 * vp) / denom
return self.xyz_cspace.to_rgb(
torch.stack([x, y, z], 1).clamp(0, 1.1)).clamp(0, 1)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/adversarial_perturbations.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
""" File that holds adversarial perturbations as torch.nn.Modules.
An adversarial perturbation is an example-specific
"""
import torch
import torch.nn as nn
from . import spatial_transformers as st
from .utils import image_utils as img_utils
from .utils import pytorch_utils as utils
from torch.autograd import Variable
import functools
# assert initialized decorator
def initialized(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
assert self.initialized, ("Parameters not initialized yet. "
"Call .forward(...) first")
return func(self, *args, **kwargs)
return wrapper
##############################################################################
# #
# SKELETON CLASS #
# #
##############################################################################
class AdversarialPerturbation(nn.Module):
""" Skeleton class to hold adversarial perturbations FOR A SINGLE MINIBATCH.
For general input-agnostic adversarial perturbations, see the
ThreatModel class
All subclasses need the following:
- perturbation_norm() : no args -> scalar Variable
- self.parameters() needs to iterate over params we want to optimize
- constrain_params() : no args -> no return,
modifies the parameters such that this is still a valid image
- forward : no args -> Variable - applies the adversarial perturbation
the originals and outputs a Variable of how we got there
- adversarial_tensors() : applies the adversarial transform to the
originals and outputs TENSORS that are the
adversarial images
"""
def __init__(self, threat_model, perturbation_params):
super(AdversarialPerturbation, self).__init__()
self.threat_model = threat_model
self.initialized = False
self.perturbation_params = perturbation_params
if isinstance(perturbation_params, tuple):
self.use_gpu = perturbation_params[1].use_gpu or utils.use_gpu()
else:
self.use_gpu = perturbation_params.use_gpu or utils.use_gpu()
# Stores parameters of the adversarial perturbation and hyperparams
# to compute total perturbation norm here
def __call__(self, x):
return self.forward(x)
def __repr__(self):
if isinstance(self.perturbation_params, tuple):
output_str = "[Perturbation] %s: %s" % (self.__class__.__name__,
self.perturbation_params[1])
output_str += '\n['
for el in self.perturbation_params[0]:
output_str += '\n\t%s,' % el
output_str += '\n]'
return output_str
else:
return "[Perturbation] %s: %s" % (self.__class__.__name__,
self.perturbation_params)
def _merge_setup(self, *args):
""" Internal method to be used when initializing a new perturbation
from merging only. Should not be called outside this file!!
"""
pass
def setup(self, x):
""" This is the standard setup technique and should be used to
initialize a perturbation (i.e. sets up parameters and unlocks
all other methods)
ARGS:
x : Variable or Tensor (NxCxHxW) - the images this perturbation is
intended for
"""
self.num_examples = x.shape[0]
@initialized
def perturbation_norm(self, x=None):
""" This returns the 'norm' of this perturbation. Optionally, for
certain norms, having access to the images for which the
perturbation is intended can have an effect on the output.
ARGS:
x : Variable or Tensor (NxCxHxW) - optionally can be the images
that the perturbation was intended for
RETURNS:
Scalar Variable
"""
raise NotImplementedError("Need to call subclass method here")
@initialized
def constrain_params(self):
""" This modifies the parameters such that the perturbation falls within
the threat model it belongs to. E.g. for l-infinity threat models,
this clips the params to match the right l-infinity bound.
TODO: for non-lp norms, projecting to the nearest point in the level
set
"""
raise NotImplementedError("Need to call subclass method here")
@initialized
def make_valid_image(self, x):
""" This takes in the minibatch self's parameters were tuned for and
clips the parameters such that this is still a valid image.
ARGS:
x : Variable or Tensor (NxCxHxW) - the images this this perturbation
was intended for
RETURNS:
None
"""
pass # Only implement in classes that can create invalid images
@initialized
def forward(self, x):
""" This takes in the minibatch self's parameters were tuned for and
outputs a variable of the perturbation applied to the images
ARGS:
x : Variable (NxCxHxW) - the images this this perturbation
was intended for
RETURNS:
Variable (NxCxHxW) - the perturbation applied to the input images
"""
raise NotImplementedError("Need to call subclass method here")
@initialized
def add_to_params(self, grad_data):
""" This takes in a Tensor the same shape as self's parameters and
adds to them. Note that this usually won't preserve gradient
information
(also this might have different signatures in subclasses)
ARGS:
x : Tensor (params-shape) - Tensor to be added to the
parameters of self
RETURNS:
None, but modifies self's parameters
"""
raise NotImplementedError("Need to call subclass method here")
@initialized
def update_params(self, step_fxn):
""" This takes in a function step_fxn: Tensor -> Tensor that generates
the change to the parameters that we step along. This loops through
all parameters and updates signs accordingly.
For sequential perturbations, this also multiplies by a scalar if
provided
ARGS:
step_fxn : Tensor -> Tensor - function that maps tensors to tensors.
e.g. for FGSM, we want a function that multiplies signs
by step_size
RETURNS:
None, but updates the parameters
"""
raise NotImplementedError("Need to call subclass method here")
@initialized
def adversarial_tensors(self, x=None):
""" Little helper method to get the tensors of the adversarial images
directly
"""
assert x is not None or self.originals is not None
if x is None:
x = self.originals
return self.forward(x).data
@initialized
def attach_attr(self, attr_name, attr):
""" Special method to set an attribute if it doesn't exist in this
object yet. throws error if this attr already exists
ARGS:
attr_name : string - name of attribute we're attaching
attr: object - attribute we're attaching
RETURNS:
None
"""
if hasattr(self, attr_name):
raise Exception("%s already has attribute %s" % (self, attr_name))
else:
setattr(self, attr_name, attr)
@initialized
def attach_originals(self, originals):
""" Little helper method to tack on the original images to self to
pass around the (images, perturbation) in a single object
"""
self.attach_attr('originals', originals)
@initialized
def random_init(self):
""" Modifies the parameters such that they're randomly initialized
uniformly across the threat model (this is harder for nonLp threat
models...). Takes no args and returns nothing, but modifies the
parameters
"""
raise NotImplementedError("Need to call subclass method here")
@initialized
def merge_perturbation(self, other, self_mask):
""" Special technique to merge this perturbation with another
perturbation of the same threat model.
This will return a new perturbation object that, for each parameter
will return the parameters of self for self_mask, and the
perturbation of other for NOT(self_mask)
ARGS:
other: AdversarialPerturbation Object - instance of other
adversarial perturbation that is instantiated with the
same threat model as self
self_indices: ByteTensor [N] : bytetensor indicating which
parameters to include from self and which to include
from other
"""
# this parent class just does the shared asserts such that this is a
# valid thing
assert self.__class__ == other.__class__
assert self.threat_model == other.threat_model
assert self.num_examples == other.num_examples
assert self.perturbation_params == other.perturbation_params
assert other.initialized
@initialized
def collect_successful(self, classifier_net, normalizer):
""" Returns a list of [adversarials, originals] of the SUCCESSFUL
attacks only, according to the given classifier_net, normalizer
SUCCESSFUL here means that the adversarial is different
ARGS:
TODO: fill in when I'm not in crunchtime
"""
assert self.originals is not None
adversarials = Variable(self.adversarial_tensors())
originals = Variable(self.originals)
adv_out = torch.max(classifier_net(normalizer(adversarials)), 1)[1]
out = torch.max(classifier_net(normalizer(originals)), 1)[1]
adv_idx_bytes = adv_out != out
idxs = []
for idx, el in enumerate(adv_idx_bytes):
if float(el) > 0:
idxs.append(idx)
idxs = torch.LongTensor(idxs)
if self.originals.is_cuda:
idxs = idxs.cuda()
return [torch.index_select(self.adversarial_tensors(), 0, idxs),
torch.index_select(self.originals, 0, idxs)]
@initialized
def collect_adversarially_successful(self, classifier_net, normalizer,
labels):
""" Returns an object containing the SUCCESSFUL attacked examples,
their corresponding originals, and the number of misclassified
examples
ARGS:
classifier_net : nn.Module subclass - neural net that is the
relevant classifier
normalizer : DifferentiableNormalize object - object to convert
input data to mean-zero, unit-var examples
labels : Variable (longTensor N) - correct labels for classification
of self.originals
RETURNS:
dict with structure:
{'adversarials': Variable(N'xCxHxW) - adversarial perturbation
applied
'originals': Variable(N'xCxHxW) - unperturbed examples that
were correctly classified AND
successfully attacked
'num_correctly_classified': int - number of correctly classified
unperturbed examples
}
"""
assert self.originals is not None
adversarials = Variable(self.adversarial_tensors())
originals = Variable(self.originals)
adv_out = torch.max(classifier_net(normalizer(adversarials)), 1)[1]
out = torch.max(classifier_net(normalizer(originals)), 1)[1]
# First take a subset of correctly classified originals
correct_idxs = (out == labels) # correctly classified idxs
adv_idx_bytes = (adv_out != out) # attacked examples
num_correctly_classified = int(sum(correct_idxs))
adv_idxs = adv_idx_bytes * correct_idxs
idxs = []
for idx, el in enumerate(adv_idxs):
if float(el) > 0:
idxs.append(idx)
idxs = torch.LongTensor(idxs)
if self.originals.is_cuda:
idxs = idxs.cuda()
return {'adversarial': torch.index_select(self.adversarial_tensors(),
0, idxs),
'originals': torch.index_select(self.originals, 0, idxs),
'num_correctly_classified': num_correctly_classified}
@initialized
def display(self, scale=5, successful_only=False, classifier_net=None,
normalizer=None):
""" Displays this adversarial perturbation in a 3-row format:
top row is adversarial images, second row is original images,
bottom row is difference magnified by scale (default 5)
ARGS:
scale: int - how much to magnify differences by
successful_only: bool - if True we only display successful (in that
advs output different classifier outputs)
If this is not None, classifie_net and normalizer
cannot be None
RETURNS:
None, but displays images
"""
if successful_only:
assert classifier_net is not None
assert normalizer is not None
advs, origs = self.collect_successful(classifier_net, normalizer)
else:
advs = self.adversarial_tensors()
origs = self.originals
diffs = torch.clamp((advs - origs) * scale + 0.5, 0, 1)
img_utils.show_images([advs, origs, diffs])
class PerturbationParameters(dict):
""" Object that stores parameters like a dictionary.
This allows perturbation classes to be only partially instantiated and
then fed various 'originals' later.
Implementation taken from : https://stackoverflow.com/a/14620633/3837607
(and then modified with the getattribute trick to return none instead of
error for missing attributes)
"""
def __init__(self, *args, **kwargs):
super(PerturbationParameters, self).__init__(*args, **kwargs)
if kwargs.get('manual_gpu') is not None:
self.use_gpu = kwargs['manual_gpu']
else:
self.use_gpu = utils.use_gpu()
self.__dict__ = self
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return None
class ThreatModel(object):
def __init__(self, perturbation_class, param_kwargs, *other_args):
""" Factory class to generate per_minibatch instances of Adversarial
perturbations.
ARGS:
perturbation_class : class - subclass of Adversarial Perturbations
param_kwargs : dict - dict containing named kwargs to instantiate
the class in perturbation class
"""
assert issubclass(perturbation_class, AdversarialPerturbation)
self.perturbation_class = perturbation_class
if isinstance(param_kwargs, dict):
param_kwargs = PerturbationParameters(**param_kwargs)
self.param_kwargs = param_kwargs
self.other_args = other_args
def __repr__(self):
return "[Threat] %s: %s" % (str(self.perturbation_class.__name__),
self.param_kwargs)
def __call__(self, *args):
if args == ():
return self.perturbation_obj()
else:
perturbation_obj = self.perturbation_obj()
perturbation_obj.setup(*args)
return perturbation_obj
def perturbation_obj(self):
return self.perturbation_class(self, self.param_kwargs, *self.other_args)
##############################################################################
# #
# ADDITION PARAMETERS #
# #
##############################################################################
class DeltaAddition(AdversarialPerturbation):
def __init__(self, threat_model, perturbation_params, *other_args):
""" Maintains a delta that gets addded to the originals to generate
adversarial images. This is the type of adversarial perturbation
that the literature extensivey studies
ARGS:
threat_model : ThreatModel object that is used to initialize self
perturbation_params: PerturbationParameters object.
{ lp_style : None, int or 'inf' - if not None is the type of
Lp_bound that we apply to this adversarial example
lp_bound : None or float - cannot be None if lp_style is
not None, but if not None should be the lp bound
we allow for adversarial perturbations
custom_norm : None or fxn:(NxCxHxW) -> Scalar Variable. This is
not implemented for now
}
"""
super(DeltaAddition, self).__init__(threat_model, perturbation_params)
self.lp_style = perturbation_params.lp_style
self.lp_bound = perturbation_params.lp_bound
if perturbation_params.custom_norm is not None:
raise NotImplementedError("Only LP norms allowed for now")
self.scalar_step = perturbation_params.scalar_step or 1.0
def _merge_setup(self, num_examples, delta_data):
""" DANGEROUS TO BE CALLED OUTSIDE OF THIS FILE!!!"""
self.num_examples = num_examples
self.delta = nn.Parameter(delta_data)
self.initialized = True
def setup(self, x):
super(DeltaAddition, self).setup(x)
self.delta = nn.Parameter(torch.zeros_like(x))
self.initialized = True
@initialized
def perturbation_norm(self, x=None, lp_style=None):
lp_style = lp_style or self.lp_style
assert isinstance(lp_style, int) or lp_style == 'inf'
return utils.batchwise_norm(self.delta, lp=lp_style)
@initialized
def constrain_params(self):
new_delta = utils.batchwise_lp_project(self.delta.data, self.lp_style,
self.lp_bound)
delta_diff = new_delta - self.delta.data
self.delta.data.add_(delta_diff)
@initialized
def make_valid_image(self, x):
new_delta = self.delta.data
change_in_delta = utils.clamp_0_1_delta(new_delta, x)
self.delta.data.add_(change_in_delta)
@initialized
def update_params(self, step_fxn):
assert self.delta.grad.data is not None
self.add_to_params(step_fxn(self.delta.grad.data) * self.scalar_step)
@initialized
def add_to_params(self, grad_data):
""" sets params to be self.params + grad_data """
self.delta.data.add_(grad_data)
@initialized
def random_init(self):
self.delta = nn.Parameter(utils.random_from_lp_ball(self.delta.data,
self.lp_style,
self.lp_bound))
@initialized
def merge_perturbation(self, other, self_mask):
super(DeltaAddition, self).merge_perturbation(other, self_mask)
# initialize a new perturbation
new_perturbation = DeltaAddition(self.threat_model,
self.perturbation_params)
# make the new parameters
new_delta = utils.fold_mask(self.delta.data, other.delta.data,
self_mask)
# do the merge setup and return the object
new_perturbation._merge_setup(self.num_examples,
new_delta)
return new_perturbation
def forward(self, x):
if not self.initialized:
self.setup(x)
self.make_valid_image(x) # not sure which one to do first...
self.constrain_params()
return x + self.delta
##############################################################################
# #
# SPATIAL PARAMETERS #
# #
##############################################################################
class ParameterizedXformAdv(AdversarialPerturbation):
def __init__(self, threat_model, perturbation_params, *other_args):
super(ParameterizedXformAdv, self).__init__(threat_model,
perturbation_params)
assert issubclass(perturbation_params.xform_class,
st.ParameterizedTransformation)
self.lp_style = perturbation_params.lp_style
self.lp_bound = perturbation_params.lp_bound
self.use_stadv = perturbation_params.use_stadv
self.scalar_step = perturbation_params.scalar_step or 1.0
def _merge_setup(self, num_examples, new_xform):
""" DANGEROUS TO BE CALLED OUTSIDE OF THIS FILE!!!"""
self.num_examples = num_examples
self.xform = new_xform
self.initialized = True
def setup(self, originals):
super(ParameterizedXformAdv, self).setup(originals)
self.xform = self.perturbation_params.xform_class(shape=originals.shape,
manual_gpu=self.use_gpu)
self.initialized = True
@initialized
def perturbation_norm(self, x=None, lp_style=None):
lp_style = lp_style or self.lp_style
if self.use_stadv is not None:
assert isinstance(self.xform, st.FullSpatial)
return self.xform.stAdv_norm()
else:
return self.xform.norm(lp=lp_style)
@initialized
def constrain_params(self, x=None):
# Do lp projections
if isinstance(self.lp_style, int) or self.lp_style == 'inf':
self.xform.project_params(self.lp_style, self.lp_bound)
@initialized
def update_params(self, step_fxn):
param_list = list(self.xform.parameters())
assert len(param_list) == 1
params = param_list[0]
assert params.grad.data is not None
self.add_to_params(step_fxn(params.grad.data) * self.scalar_step)
@initialized
def add_to_params(self, grad_data):
""" Assumes only one parameters object in the Spatial Transform """
param_list = list(self.xform.parameters())
assert len(param_list) == 1
params = param_list[0]
params.data.add_(grad_data)
@initialized
def random_init(self):
param_list = list(self.xform.parameters())
assert len(param_list) == 1
param = param_list[0]
random_perturb = utils.random_from_lp_ball(param.data,
self.lp_style,
self.lp_bound)
param.data.add_(self.xform.identity_params(self.xform.img_shape) +
random_perturb - self.xform.xform_params.data)
@initialized
def merge_perturbation(self, other, self_mask):
super(ParameterizedXformAdv, self).merge_perturbation(other, self_mask)
new_perturbation = ParameterizedXformAdv(self.threat_model,
self.perturbation_params)
new_xform = self.xform.merge_xform(other.xform, self_mask)
new_perturbation._merge_setup(self.num_examples, new_xform)
return new_perturbation
def forward(self, x):
if not self.initialized:
self.setup(x)
self.constrain_params()
return self.xform.forward(x)
##############################################################################
# #
# SPATIAL + ADDITION PARAMETERS #
# #
##############################################################################
class SequentialPerturbation(AdversarialPerturbation):
""" Takes a list of perturbations and composes them. A norm needs to
be specified here to describe the perturbations.
"""
def __init__(self, threat_model, perturbation_sequence,
global_parameters=PerturbationParameters(pad=10),
preinit_pipeline=None):
""" Initializes a sequence of adversarial perturbation layers
ARGS:
originals : NxCxHxW tensor - original images we create adversarial
perturbations for
perturbation_sequence : ThreatModel[] -
list of ThreatModel objects
global_parameters : PerturbationParameters - global parameters to
use. These contain things like how to norm this
sequence, how to constrain this sequence, etc
preinit_pipelines: list[]
if not None i
"""
super(SequentialPerturbation, self).__init__(threat_model,
(perturbation_sequence,
global_parameters))
if preinit_pipeline is not None:
layers = preinit_pipeline
else:
layers = []
for threat_model in perturbation_sequence:
assert isinstance(threat_model, ThreatModel)
layers.append(threat_model())
self.pipeline = []
for layer_no, layer in enumerate(layers):
self.pipeline.append(layer)
self.add_module('layer_%02d' % layer_no, layer)
# norm: pipeline -> Scalar Variable
self.norm = global_parameters.norm
self.norm_weights = global_parameters.norm_weights
# padding with black is useful to not throw information away during
# sequential steps
self.pad = nn.ConstantPad2d(global_parameters.pad or 0, 0)
self.unpad = nn.ConstantPad2d(-1 * (global_parameters.pad or 0), 0)
def _merge_setup(self, num_examples):
self.num_examples = num_examples
self.initialized = True
def setup(self, x):
super(SequentialPerturbation, self).setup(x)
x = self.pad(x)
for layer in self.pipeline:
layer.setup(x)
self.initialized = True
@initialized
def perturbation_norm(self, x=None, lp_style=None):
# Need to define a nice way to describe the norm here. This can be
# an empirical norm between input/output
# For now, let's just say it's the sum of the norms of each constituent
if self.norm is not None:
return self.norm(self.pipeline, x=x, lp_style=lp_style)
else:
norm_weights = self.norm_weights or\
[1.0 for _ in range(len(self.pipeline))]
out = None
for i, layer in enumerate(self.pipeline):
weight = norm_weights[i]
layer_norm = layer.perturbation_norm(x=x, lp_style=lp_style)
if out is None:
out = layer_norm * weight
else:
out = out + layer_norm * weight
return out
@initialized
def make_valid_image(self, x):
x = self.pad(x)
for layer in self.pipeline:
layer.make_valid_image(x)
x = layer(x)
@initialized
def constrain_params(self):
# Need to do some sort of crazy projection operator for general things
# For now, let's just constrain each thing in sequence
for layer in self.pipeline:
layer.constrain_params()
@initialized
def update_params(self, step_fxn):
for layer in self.pipeline:
layer.update_params(step_fxn)
@initialized
def merge_perturbation(self, other, self_mask):
super(SequentialPerturbation, self).merge_perturbation(other, self_mask)
new_pipeline = []
for self_layer, other_layer in zip(self.pipeline, other.pipeline):
new_pipeline.append(self_layer.merge_perturbation(other_layer,
self_mask))
layer_params, global_params = self.perturbation_params
new_perturbation = SequentialPerturbation(self.threat_model,
layer_params,
global_parameters=global_params,
preinit_pipeline=new_pipeline)
new_perturbation._merge_setup(self.num_examples)
return new_perturbation
def forward(self, x, layer_slice=None):
""" Layer slice here is either an int or a tuple
If int, we run forward only the first layer_slice layers
If tuple, we start at the
"""
# Blocks to handle only particular layer slices (debugging)
if layer_slice is None:
pipeline_iter = iter(self.pipeline)
elif isinstance(layer_slice, int):
pipeline_iter = iter(self.pipeline[:layer_slice])
elif isinstance(layer_slice, tuple):
pipeline_iter = iter(self.pipeline[layer_slice[0]: layer_slice[1]])
# End block to handle particular layer slices
# Handle padding
original_hw = x.shape[-2:]
if not self.initialized:
self.setup(x)
self.constrain_params()
self.make_valid_image(x)
x = self.pad(x)
for layer in pipeline_iter:
x = layer(x)
return self.unpad(x)
@initialized
def random_init(self):
for layer in self.pipeline:
layer.random_init()
@initialized
def attach_originals(self, originals):
self.originals = originals
for layer in self.pipeline:
layer.attach_originals(originals)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/loss_functions.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
import torch.nn as nn
import torch
from numbers import Number
from .utils import pytorch_utils as utils
from .utils import image_utils as img_utils
from . import spatial_transformers as st
from torch.autograd import Variable
from functools import partial
from . import adversarial_perturbations as ap
""" Loss function building blocks """
##############################################################################
# #
# LOSS FUNCTION WRAPPER #
# #
##############################################################################
class RegularizedLoss(object):
""" Wrapper for multiple PartialLoss objects where we combine with
regularization constants """
def __init__(self, losses, scalars, negate=False):
"""
ARGS:
losses : dict - dictionary of partialLoss objects, each is keyed
with a nice identifying name
scalars : dict - dictionary of scalars, each is keyed with the
same identifying name as is in self.losses
negate : bool - if True, we negate the whole thing at the end
"""
assert sorted(losses.keys()) == sorted(scalars.keys())
self.losses = losses
self.scalars = scalars
self.negate = negate
def forward(self, examples, labels, *args, **kwargs):
output = None
output_per_example = kwargs.get('output_per_example', False)
for k in self.losses:
loss = self.losses[k]
scalar = self.scalars[k]
loss_val = loss.forward(examples, labels, *args, **kwargs)
# assert scalar is either a...
assert (isinstance(scalar, float) or # number
scalar.numel() == 1 or # tf wrapping of a number
scalar.shape == loss_val.shape) # same as the loss_val
addendum = loss_val * scalar
if addendum.numel() > 1:
if not output_per_example:
addendum = torch.sum(addendum)
if output is None:
output = addendum
else:
output = output + addendum
if self.negate:
return output * -1
else:
return output
def setup_attack_batch(self, fix_im):
""" Setup before calling loss on a new minibatch. Ensures the correct
fix_im for reference regularizers and that all grads are zeroed
ARGS:
fix_im: Variable (NxCxHxW) - Ground images for this minibatch
SHOULD BE IN [0.0, 1.0] RANGE
"""
for loss in self.losses.values():
if isinstance(loss, ReferenceRegularizer):
loss.setup_attack_batch(fix_im)
else:
loss.zero_grad()
def cleanup_attack_batch(self):
""" Does some cleanup stuff after we finish on a minibatch:
- clears the fixed images for ReferenceRegularizers
- zeros grads
- clears example-based scalars (i.e. scalars that depend on which
example we're using)
"""
for loss in self.losses.values():
if isinstance(loss, ReferenceRegularizer):
loss.cleanup_attack_batch()
else:
loss.zero_grad()
for key, scalar in self.scalars.items():
if not isinstance(scalar, Number):
self.scalars[key] = None
def zero_grad(self):
for loss in self.losses.values():
loss.zero_grad() # probably zeros the same net more than once...
class PartialLoss(object):
""" Partially applied loss object. Has forward and zero_grad methods """
def __init__(self):
self.nets = []
def zero_grad(self):
for net in self.nets:
net.zero_grad()
##############################################################################
# #
# LOSS FUNCTIONS #
# #
##############################################################################
############################################################################
# NAIVE CORRECT INDICATOR LOSS #
############################################################################
class IncorrectIndicator(PartialLoss):
def __init__(self, classifier, normalizer=None):
super(IncorrectIndicator, self).__init__()
self.classifier = classifier
self.normalizer = normalizer
def forward(self, examples, labels, *args, **kwargs):
""" Returns either (the number | a boolean vector) of examples that
don't match the labels when run through the
classifier(normalizer(.)) composition.
ARGS:
examples: Variable (NxCxHxW) - should be same shape as
ctx.fix_im, is the examples we define loss for.
SHOULD BE IN [0.0, 1.0] RANGE
labels: Variable (longTensor of length N) - true classification
output for fix_im/examples
KWARGS:
return_type: String - either 'int' or 'vector'. If 'int', we return
the number of correctly classified examples,
if 'vector' we return a boolean length-N longtensor
with the indices of
RETURNS:
scalar loss variable or boolean vector, depending on kwargs
"""
return_type = kwargs.get('return_type', 'int')
assert return_type in ['int', 'vector']
class_out = self.classifier.forward(self.normalizer.forward(examples))
_, outputs = torch.max(class_out, 1)
incorrect_indicator = outputs != labels
if return_type == 'int':
return torch.sum(incorrect_indicator)
else:
return incorrect_indicator
##############################################################################
# Standard XEntropy Loss #
##############################################################################
class PartialXentropy(PartialLoss):
def __init__(self, classifier, normalizer=None):
super(PartialXentropy, self).__init__()
self.classifier = classifier
self.normalizer = normalizer
self.nets.append(self.classifier)
def forward(self, examples, labels, *args, **kwargs):
""" Returns XEntropy loss
ARGS:
examples: Variable (NxCxHxW) - should be same shape as
ctx.fix_im, is the examples we define loss for.
SHOULD BE IN [0.0, 1.0] RANGE
labels: Variable (longTensor of length N) - true classification
output for fix_im/examples
RETURNS:
scalar loss variable
"""
if self.normalizer is not None:
normed_examples = self.normalizer.forward(examples)
else:
normed_examples = examples
xentropy_init_kwargs = {}
if kwargs.get('output_per_example') == True:
xentropy_init_kwargs['reduction'] = 'none'
criterion = nn.CrossEntropyLoss(**xentropy_init_kwargs)
return criterion(self.classifier.forward(normed_examples), labels)
##############################################################################
# Carlini Wagner loss functions #
##############################################################################
class CWLossF6(PartialLoss):
def __init__(self, classifier, normalizer=None, kappa=0.0):
super(CWLossF6, self).__init__()
self.classifier = classifier
self.normalizer = normalizer
self.nets.append(self.classifier)
self.kappa = kappa
def forward(self, examples, labels, *args, **kwargs):
classifier_in = self.normalizer.forward(examples)
classifier_out = self.classifier.forward(classifier_in)
# get target logits
target_logits = torch.gather(classifier_out, 1, labels.view(-1, 1))
# get largest non-target logits
max_2_logits, argmax_2_logits = torch.topk(classifier_out, 2, dim=1)
top_max, second_max = max_2_logits.chunk(2, dim=1)
top_argmax, _ = argmax_2_logits.chunk(2, dim=1)
targets_eq_max = top_argmax.squeeze().eq(labels).float().view(-1, 1)
targets_ne_max = top_argmax.squeeze().ne(labels).float().view(-1, 1)
max_other = targets_eq_max * second_max + targets_ne_max * top_max
if kwargs.get('targeted', False):
# in targeted case, want to make target most likely
f6 = torch.clamp(max_other - target_logits, min=-1 * self.kappa)
else:
# in NONtargeted case, want to make NONtarget most likely
f6 = torch.clamp(target_logits - max_other, min=-1 * self.kappa)
return f6.squeeze()
##############################################################################
# #
# REFERENCE REGULARIZERS #
# #
##############################################################################
""" Regularization terms that refer back to a set of 'fixed images', or the
original images.
example: L2 regularization which computes L2dist between a perturbed image
and the FIXED ORIGINAL IMAGE
NOTE: it's important that these return Variables that are scalars
(output.numel() == 1), otherwise there's a memory leak w/ CUDA.
See my discussion on this here:
https://discuss.pytorch.org/t/cuda-memory-not-being-freed/15965
"""
class ReferenceRegularizer(PartialLoss):
def __init__(self, fix_im):
super(ReferenceRegularizer, self).__init__()
self.fix_im = fix_im
def setup_attack_batch(self, fix_im):
""" Setup function to ensure fixed images are set
has been made; also zeros grads
ARGS:
fix_im: Variable (NxCxHxW) - Ground images for this minibatch
SHOULD BE IN [0.0, 1.0] RANGE
"""
self.fix_im = fix_im
self.zero_grad()
def cleanup_attack_batch(self):
""" Cleanup function to clear the fixed images after an attack batch
has been made; also zeros grads
"""
old_fix_im = self.fix_im
self.fix_im = None
del old_fix_im
self.zero_grad()
#############################################################################
# SOFT L_INF REGULARIZATION #
#############################################################################
class SoftLInfRegularization(ReferenceRegularizer):
'''
see page 10 of this paper (https://arxiv.org/pdf/1608.04644.pdf)
for discussion on why we want SOFT l inf
'''
def __init__(self, fix_im, **kwargs):
super(SoftLInfRegularization, self).__init__(fix_im)
def forward(self, examples, *args, **kwargs):
# ARGS should have one element, which serves as the tau value
tau = 8.0 / 255.0 # starts at 1 each time?
scale_factor = 0.9
l_inf_dist = float(torch.max(torch.abs(examples - self.fix_im)))
'''
while scale_factor * tau > l_inf_dist:
tau *= scale_factor
assert tau > l_inf_dist
'''
delta_minus_taus = torch.clamp(torch.abs(examples - self.fix_im) - tau,
min=0.0)
batchwise = utils.batchwise_norm(delta_minus_taus, 'inf', dim=0)
return batchwise.squeeze()
#############################################################################
# L2 REGULARIZATION #
#############################################################################
class L2Regularization(ReferenceRegularizer):
def __init__(self, fix_im, **kwargs):
super(L2Regularization, self).__init__(fix_im)
def forward(self, examples, *args, **kwargs):
l2_dist = img_utils.nchw_l2(examples, self.fix_im,
squared=True).view(-1, 1)
return l2_dist.squeeze()
#############################################################################
# LPIPS PERCEPTUAL REGULARIZATION #
#############################################################################
class LpipsRegularization(ReferenceRegularizer):
def __init__(self, fix_im, **kwargs):
super(LpipsRegularization, self).__init__(fix_im)
manual_gpu = kwargs.get('manual_gpu', None)
if manual_gpu is not None:
self.use_gpu = manual_gpu
else:
self.use_gpu = utils.use_gpu()
self.dist_model = dm.DistModel(net='alex', manual_gpu=self.use_gpu)
def forward(self, examples, *args, **kwargs):
xform = lambda im: im * 2.0 - 1.0
perceptual_loss = self.dist_model.forward_var(examples,
self.fix_im)
return perceptual_loss.squeeze()
#############################################################################
# SSIM PERCEPTUAL REGULARIZATION #
#############################################################################
class SSIMRegularization(ReferenceRegularizer):
def __init__(self, fix_im, **kwargs):
super(SSIMRegularization, self).__init__(fix_im)
if 'window_size' in kwargs:
self.ssim_instance = ssim.SSIM(window_size=kwargs['window_size'])
else:
self.ssim_instance = ssim.SSIM()
manual_gpu = kwargs.get('manual_gpu', None)
if manual_gpu is not None:
self.use_gpu = manual_gpu
else:
self.use_gpu = utils.use_gpu()
def forward(self, examples, *args, **kwargs):
output = []
for ex, fix_ex in zip(examples, self.fix_im):
output.append(1.0 - self.ssim_instance(ex.unsqueeze(0),
fix_ex.unsqueeze(0)))
return torch.stack(output)
##############################################################################
# #
# SPATIAL LOSS FUNCTIONS #
# #
##############################################################################
class FullSpatialLpLoss(PartialLoss):
""" Spatial loss using lp norms on the spatial transformation parameters
This is defined as the Lp difference between the identity map and the
provided spatial transformation parameters
"""
def __init__(self, **kwargs):
super(FullSpatialLpLoss, self).__init__()
lp = kwargs.get('lp', 2)
assert lp in [1, 2, 'inf']
self.lp = lp
def forward(self, examples, *args, **kwargs):
""" Computes lp loss between identity map and spatial transformation.
There better be a kwarg with key 'spatial' which is as FullSpatial
object describing how the examples were generated from the originals
"""
st_obj = kwargs['spatial']
assert isinstance(st_obj, st.FullSpatial)
# First create the identity map and make same type as examples
identity_map = Variable(st_obj.identity_params(examples.shape))
if examples.is_cuda:
identity_map.cuda()
# Then take diffs and take lp norms
diffs = st_obj.grid_params - identity_map
lp_norm = utils.batchwise_norm(diffs, self.lp, dim=0)
return lp_norm # return Nx1 variable, will sum in parent class
class PerturbationNormLoss(PartialLoss):
def __init__(self, **kwargs):
super(PerturbationNormLoss, self).__init__()
lp = kwargs.get('lp', 2)
assert lp in [1, 2, 'inf']
self.lp = lp
def forward(self, examples, *args, **kwargs):
""" Computes perturbation norm and multiplies by scale
There better be a kwarg with key 'perturbation' which is a perturbation
object with a 'perturbation_norm' method that takes 'lp_style' as a
kwarg
"""
perturbation = kwargs['perturbation']
assert isinstance(perturbation, ap.AdversarialPerturbation)
return perturbation.perturbation_norm(lp_style=self.lp)
##############################################################################
# #
# Combined Transformer Loss #
# #
##############################################################################
class CombinedTransformerLoss(ReferenceRegularizer):
""" General class for distance functions and loss functions of the form
min_T ||X - T(Y)|| + c * || T ||
where X is the original image, and Y is the 'adversarial' input image.
"""
def __init__(self, fix_im, transform_class=None,
regularization_constant=1.0,
transformation_loss=partial(utils.summed_lp_norm, lp=2),
transform_norm_kwargs=None):
""" Takes in a reference fix im and a class of transformations we need
to search over to compute forward.
"""
super(CombinedTransformerLoss, self).__init__(fix_im)
self.transform_class = transform_class
self.regularization_constant = regularization_constant
self.transformation_loss = transformation_loss
self.transform_norm_kwargs = transform_norm_kwargs or {}
self.transformer = None
def cleanup_attack_batch(self):
super(CombinedTransformerLoss, self).cleanup_attack_batch()
self.transformer = None
def _inner_loss(self, examples):
""" Computes the combined loss for a particular transformation """
trans_examples = self.transformer.forward(examples)
trans_loss = self.transformation_loss(self.fix_im - trans_examples)
trans_norm = self.transformer.norm(**self.transform_norm_kwargs)
return trans_loss + trans_norm * self.regularization_constant
def forward(self, examples, *args, **kwargs):
""" Computes the distance between examples and args
ARGS:
examples : NxCxHxW Variable - 'adversarially' perturbed image from
the self.fix_im
KWARGS:
optimization stuff here
"""
######################################################################
# Setup transformer + optimizer #
######################################################################
self.transformer = self.transform_class(shape=examples.shape)
optim_kwargs = kwargs.get('xform_loss_optim_kwargs', {})
optim_type = kwargs.get('xform_loss_optim_type', torch.optim.Adam)
num_iter = kwargs.get('xform_loss_num_iter', 20)
optimizer = optim_type(self.transformer.parameters(), **optim_kwargs)
#####################################################################
# Iterate and optimize the transformer #
#####################################################################
for iter_no in range(num_iter):
optimizer.zero_grad()
loss = self._inner_loss(examples)
loss.backward()
optimizer.step()
return self._inner_loss(examples)
class RelaxedTransformerLoss(ReferenceRegularizer):
""" Relaxed version of transformer loss: assumes that the adversarial
examples are of the form Y=S(X) + delta for some S in the
transformation class and some small delta perturbation outside the
perturbation.
In this case, we just compute ||delta|| + c||S||
This saves us from having to do the inner minmization step
"""
def __init__(self, fix_im,
regularization_constant=1.0,
transformation_loss=partial(utils.summed_lp_norm, lp=2),
transform_norm_kwargs=None):
""" Takes in a reference fix im and a class of transformations we need
to search over to compute forward.
"""
super(RelaxedTransformerLoss, self).__init__(fix_im)
self.regularization_constant = regularization_constant
self.transformation_loss = transformation_loss
self.transform_norm_kwargs = transform_norm_kwargs or {}
def forward(self, examples, *args, **kwargs):
""" Computes the distance between examples and args
ARGS:
examples : NxCxHxW Variable - 'adversarially' perturbed image from
the self.fix_im
KWARGS:
optimization stuff here
"""
# Collect transformer norm
transformer = kwargs['transformer']
assert isinstance(transformer, st.ParameterizedTransformation)
transformer_norm = self.regularization_constant * \
transformer.norm(**self.transform_norm_kwargs)
# Collect transformation loss
delta = self.transformer.forward(self.fix_im) - examples
transformation_loss = self.transformation_loss(delta)
return transformation_loss + transformer_norm
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/config.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
import os
config_dir = os.path.abspath(os.path.dirname(__file__))
def path_resolver(path):
if path.startswith('~/'):
return os.path.expanduser(path)
if path.startswith('./'):
return os.path.join(*[config_dir] + path.split('/')[1:])
DEFAULT_DATASETS_DIR = path_resolver('~/datasets')
MODEL_PATH = path_resolver('./pretrained_models/')
OUTPUT_IMAGE_PATH = path_resolver('./output_images/')
DEFAULT_BATCH_SIZE = 128
DEFAULT_WORKERS = 4
CIFAR10_MEANS = [0.485, 0.456, 0.406]
CIFAR10_STDS = [0.229, 0.224, 0.225]
WIDE_CIFAR10_MEANS = [0.4914, 0.4822, 0.4465]
WIDE_CIFAR10_STDS = [0.2023, 0.1994, 0.2010]
IMAGENET_MEANS = [0.485, 0.456, 0.406]
IMAGENET_STDS = [0.229, 0.224, 0.225]
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/adversarial_attacks.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
""" Holds the various attacks we can do """
from __future__ import print_function
from six import string_types
import torch
from torch.autograd import Variable
from torch import optim
from .utils import pytorch_utils as utils
from . import loss_functions as lf
MAXFLOAT = 1e20
###############################################################################
# #
# PARENT CLASS FOR ADVERSARIAL ATTACKS #
# #
###############################################################################
class AdversarialAttack(object):
""" Wrapper for adversarial attacks. Is helpful for when subsidiary methods
are needed.
"""
def __init__(self, classifier_net, normalizer, threat_model,
manual_gpu=None):
""" Initializes things to hold to perform a single batch of
adversarial attacks
ARGS:
classifier_net : nn.Module subclass - neural net that is the
classifier we're attacking
normalizer : DifferentiableNormalize object - object to convert
input data to mean-zero, unit-var examples
threat_model : ThreatModel object - object that allows us to create
per-minibatch adversarial examples
manual_gpu : None or boolean - if not None, we override the
environment variable 'MISTER_ED_GPU' for how we use
the GPU in this object
"""
self.classifier_net = classifier_net
self.normalizer = normalizer or utils.IdentityNormalize()
if manual_gpu is not None:
self.use_gpu = manual_gpu
else:
self.use_gpu = utils.use_gpu()
self.validator = lambda *args: None
self.threat_model = threat_model
@property
def _dtype(self):
return torch.cuda.FloatTensor if self.use_gpu else torch.FloatTensor
def setup(self):
self.classifier_net.eval()
self.normalizer.differentiable_call()
def eval(self, ground_examples, adversarials, labels, topk=1):
""" Evaluates how good the adversarial examples are
ARGS:
ground_truths: Variable (NxCxHxW) - examples before we did
adversarial perturbation. Vals in [0, 1] range
adversarials: Variable (NxCxHxW) - examples after we did
adversarial perturbation. Should be same shape and
in same order as ground_truth
labels: Variable (longTensor N) - correct labels of classification
output
RETURNS:
tuple of (% of correctly classified original examples,
% of correctly classified adversarial examples)
"""
normed_ground = self.normalizer.forward(ground_examples)
ground_output = self.classifier_net.forward(normed_ground)
normed_advs = self.normalizer.forward(adversarials)
adv_output = self.classifier_net.forward(normed_advs)
start_prec = utils.accuracy(ground_output.data, labels.data,
topk=(topk,))
adv_prec = utils.accuracy(adv_output.data, labels.data,
topk=(topk,))
return float(start_prec[0]), float(adv_prec[0])
def eval_attack_only(self, adversarials, labels, topk=1):
""" Outputs the accuracy of the adv_inputs only
ARGS:
adv_inputs: Variable NxCxHxW - examples after we did adversarial
perturbation
labels: Variable (longtensor N) - correct labels of classification
output
topk: int - criterion for 'correct' classification
RETURNS:
(int) number of correctly classified examples
"""
normed_advs = self.normalizer.forward(adversarials)
adv_output = self.classifier_net.forward(normed_advs)
return utils.accuracy_int(adv_output, labels, topk=topk)
def print_eval_str(self, ground_examples, adversarials, labels, topk=1):
""" Prints how good this adversarial attack is
(explicitly prints out %CorrectlyClassified(ground_examples)
vs %CorrectlyClassified(adversarials)
ARGS:
ground_truths: Variable (NxCxHxW) - examples before we did
adversarial perturbation. Vals in [0, 1] range
adversarials: Variable (NxCxHxW) - examples after we did
adversarial perturbation. Should be same shape and
in same order as ground_truth
labels: Variable (longTensor N) - correct labels of classification
output
RETURNS:
None, prints some stuff though
"""
og, adv = self.eval(ground_examples, adversarials, labels, topk=topk)
print("Went from %s correct to %s correct" % (og, adv))
def validation_loop(self, examples, labels, iter_no=None):
""" Prints out validation values interim for use in iterative techniques
ARGS:
new_examples: Variable (NxCxHxW) - [0.0, 1.0] images to be
classified and compared against labels
labels: Variable (longTensor
N) - correct labels for indices of
examples
iter_no: String - an extra thing for prettier prints
RETURNS:
None
"""
normed_input = self.normalizer.forward(examples)
new_output = self.classifier_net.forward(normed_input)
new_prec = utils.accuracy(new_output.data, labels.data, topk=(1,))
print_str = ""
if isinstance(iter_no, int):
print_str += "(iteration %02d): " % iter_no
elif isinstance(iter_no, string_types):
print_str += "(%s): " % iter_no
else:
pass
print_str += " %s correct" % float(new_prec[0])
print(print_str)
##############################################################################
# #
# Fast Gradient Sign Method (FGSM) #
# #
##############################################################################
class FGSM(AdversarialAttack):
def __init__(self, classifier_net, normalizer, threat_model, loss_fxn,
manual_gpu=None):
super(FGSM, self).__init__(classifier_net, normalizer, threat_model,
manual_gpu=manual_gpu)
self.loss_fxn = loss_fxn
def attack(self, examples, labels, step_size=0.05, verbose=True):
""" Builds FGSM examples for the given examples with l_inf bound
ARGS:
classifier: Pytorch NN
examples: Nxcxwxh tensor for N examples. NOT NORMALIZED (i.e. all
vals are between 0.0 and 1.0 )
labels: single-dimension tensor with labels of examples (in same
order)
step_size: float - how much we nudge each parameter along the
signs of its gradient
normalizer: DifferentiableNormalize object to prep objects into
classifier
evaluate: boolean, if True will validation results
loss_fxn: RegularizedLoss object - partially applied loss fxn that
takes [0.0, 1.0] image Variables and labels and outputs
a scalar loss variable. Also has a zero_grad method
RETURNS:
AdversarialPerturbation object with correct parameters.
Calling perturbation() gets Variable of output and
calling perturbation().data gets tensor of output
"""
self.classifier_net.eval() # ALWAYS EVAL FOR BUILDING ADV EXAMPLES
perturbation = self.threat_model(examples)
var_examples = Variable(examples, requires_grad=True)
var_labels = Variable(labels, requires_grad=False)
######################################################################
# Build adversarial examples #
######################################################################
# Fix the 'reference' images for the loss function
self.loss_fxn.setup_attack_batch(var_examples)
# take gradients
loss = self.loss_fxn.forward(perturbation(var_examples), var_labels,
perturbation=perturbation)
torch.autograd.backward(loss)
# add adversarial noise to each parameter
update_fxn = lambda grad_data: step_size * torch.sign(grad_data)
perturbation.update_params(update_fxn)
if verbose:
self.validation_loop(perturbation(var_examples), var_labels,
iter_no='Post FGSM')
# output tensor with the data
self.loss_fxn.cleanup_attack_batch()
perturbation.attach_originals(examples)
return perturbation
##############################################################################
# #
# PGD/FGSM^k/BIM #
# #
##############################################################################
# This goes by a lot of different names in the literature
# The key idea here is that we take many small steps of FGSM
# I'll call it PGD though
class PGD(AdversarialAttack):
def __init__(self, classifier_net, normalizer, threat_model, loss_fxn,
manual_gpu=None):
super(PGD, self).__init__(classifier_net, normalizer, threat_model,
manual_gpu=manual_gpu)
self.loss_fxn = loss_fxn # WE MAXIMIZE THIS!!!
def attack(self, examples, labels, step_size=1.0 / 255.0,
num_iterations=20, random_init=False, signed=True,
optimizer=None, optimizer_kwargs=None,
loss_convergence=0.999, verbose=True,
keep_best=True, eot_iter=1):
""" Builds PGD examples for the given examples with l_inf bound and
given step size. Is almost identical to the BIM attack, except
we take steps that are proportional to gradient value instead of
just their sign.
ARGS:
examples: NxCxHxW tensor - for N examples, is NOT NORMALIZED
(i.e., all values are in between 0.0 and 1.0)
labels: N longTensor - single dimension tensor with labels of
examples (in same order as examples)
l_inf_bound : float - how much we're allowed to perturb each pixel
(relative to the 0.0, 1.0 range)
step_size : float - how much of a step we take each iteration
num_iterations: int or pair of ints - how many iterations we take.
If pair of ints, is of form (lo, hi), where we run
at least 'lo' iterations, at most 'hi' iterations
and we quit early if loss has stabilized.
random_init : bool - if True, we randomly pick a point in the
l-inf epsilon ball around each example
signed : bool - if True, each step is
adversarial = adversarial + sign(grad)
[this is the form that madry et al use]
if False, each step is
adversarial = adversarial + grad
keep_best : bool - if True, we keep track of the best adversarial
perturbations per example (in terms of maximal
loss) in the minibatch. The output is the best of
each of these then
RETURNS:
AdversarialPerturbation object with correct parameters.
Calling perturbation() gets Variable of output and
calling perturbation().data gets tensor of output
"""
######################################################################
# Setups and assertions #
######################################################################
self.classifier_net.eval()
if not verbose:
self.validator = lambda ex, label, iter_no: None
else:
self.validator = self.validation_loop
perturbation = self.threat_model(examples)
num_examples = examples.shape[0]
var_examples = Variable(examples, requires_grad=True)
var_labels = Variable(labels, requires_grad=False)
if isinstance(num_iterations, int):
min_iterations = num_iterations
max_iterations = num_iterations
elif isinstance(num_iterations, tuple):
min_iterations, max_iterations = num_iterations
best_perturbation = None
if keep_best:
best_loss_per_example = {i: None for i in range(num_examples)}
prev_loss = None
######################################################################
# Loop through iterations #
######################################################################
self.loss_fxn.setup_attack_batch(var_examples)
self.validator(var_examples, var_labels, iter_no="START")
# random initialization if necessary
if random_init:
perturbation.random_init()
self.validator(perturbation(var_examples), var_labels,
iter_no="RANDOM")
# Build optimizer techniques for both signed and unsigned methods
optimizer = optimizer or optim.Adam
if optimizer_kwargs is None:
optimizer_kwargs = {'lr': 0.0001}
optimizer = optimizer(perturbation.parameters(), **optimizer_kwargs)
update_fxn = lambda grad_data: -1 * step_size * torch.sign(grad_data)
param_list = list(perturbation.parameters())
assert len(param_list) == 1, len(param_list)
param = param_list[0]
print(f'inside PGD attack, eot_iter: {eot_iter}, max_iterations: {max_iterations}')
for iter_no in range(max_iterations):
print("pgd iter", iter_no)
perturbation.zero_grad()
grad = torch.zeros_like(param)
loss_per_example_ave = 0
for i in range(eot_iter):
loss_per_example = self.loss_fxn.forward(perturbation(var_examples), var_labels,
perturbation=perturbation,
output_per_example=keep_best)
loss_per_example_ave += loss_per_example.detach().clone()
loss = -1 * loss_per_example.sum()
loss.backward()
grad += param.grad.data.detach()
param.grad.data.zero_()
grad /= float(eot_iter)
loss_per_example_ave /= float(eot_iter)
assert len(param_list) == 1, len(param_list)
param.grad.data = grad.clone()
if signed:
perturbation.update_params(update_fxn)
else:
optimizer.step()
if keep_best:
mask_val = torch.zeros(num_examples, dtype=torch.uint8)
for i, el in enumerate(loss_per_example_ave):
this_best_loss = best_loss_per_example[i]
if this_best_loss is None or this_best_loss[1] < float(el):
mask_val[i] = 1
best_loss_per_example[i] = (iter_no, float(el))
if best_perturbation is None:
best_perturbation = self.threat_model(examples)
best_perturbation = perturbation.merge_perturbation(
best_perturbation,
mask_val)
self.validator((best_perturbation or perturbation)(var_examples),
var_labels, iter_no=iter_no)
# Stop early if loss didn't go down too much
if (iter_no >= min_iterations and
float(loss) >= loss_convergence * prev_loss):
if verbose:
print("Stopping early at %03d iterations" % iter_no)
break
prev_loss = float(loss)
perturbation.zero_grad()
self.loss_fxn.cleanup_attack_batch()
perturbation.attach_originals(examples)
return perturbation
##############################################################################
# #
# CARLINI WAGNER #
# #
##############################################################################
"""
General class of CW attacks: these aim to solve optim problems of the form
Adv(x) = argmin_{x'} D(x, x')
s.t. f(x) != f(x')
x' is a valid attack (e.g., meets LP bounds)
Which is typically relaxed to solving
Adv(x) = argmin_{x'} D(x, x') + lambda * L_adv(x')
where L_adv(x') is only nonpositive when f(x) != f(x').
Binary search is performed on a per-example basis to find the appropriate
lambda.
The distance function is backpropagated through in each bin search step, so it
needs to be differentiable. It does not need to be a true distance metric tho
"""
class CarliniWagner(AdversarialAttack):
def __init__(self, classifier_net, normalizer, threat_model,
distance_fxn, carlini_loss, manual_gpu=None):
""" This is a different init than the other style attacks because the
loss function is separated into two arguments here
ARGS:
classifier_net: standard attack arg
normalizer: standard attack arg
threat_model: standard attack arg
distance_fxn: lf.ReferenceRegularizer subclass (CLASS NOT INSTANCE)
- is a loss function
that stores originals so it can be used to create a
RegularizedLoss object with the carlini loss object
carlini_loss: lf.PartialLoss subclass (CLASS NOT INSTANCE) - is the
loss term that is
a function on images and labels that only
returns zero when the images are adversarial
"""
super(CarliniWagner, self).__init__(classifier_net, normalizer,
threat_model, manual_gpu=manual_gpu)
assert issubclass(distance_fxn, lf.ReferenceRegularizer)
assert issubclass(carlini_loss, lf.CWLossF6)
self.loss_classes = {'distance_fxn': distance_fxn,
'carlini_loss': carlini_loss}
def _construct_loss_fxn(self, initial_lambda, confidence):
""" Uses the distance_fxn and carlini_loss to create a loss function to
be optimized
ARGS:
initial_lambda : float - which lambda to use initially
in the regularization of the carlini loss
confidence : float - how great the difference in the logits must be
for the carlini_loss to be zero. Overwrites the
self.carlini_loss.kappa value
RETURNS:
RegularizedLoss OBJECT to be used as the loss for this optimization
"""
losses = {'distance_fxn': self.loss_classes['distance_fxn'](None,
use_gpu=self.use_gpu),
'carlini_loss': self.loss_classes['carlini_loss'](
self.classifier_net,
self.normalizer,
kappa=confidence)}
scalars = {'distance_fxn': 1.0,
'carlini_loss': initial_lambda}
return lf.RegularizedLoss(losses, scalars)
def _optimize_step(self, optimizer, perturbation, var_examples,
var_targets, var_scale, loss_fxn, targeted=False):
""" Does one step of optimization """
assert not targeted
optimizer.zero_grad()
loss = loss_fxn.forward(perturbation(var_examples), var_targets)
if torch.numel(loss) > 1:
loss = loss.sum()
loss.backward()
optimizer.step()
# return a loss 'average' to determine if we need to stop early
return loss.item()
def _batch_compare(self, example_logits, targets, confidence=0.0,
targeted=False):
""" Returns a list of indices of valid adversarial examples
ARGS:
example_logits: Variable/Tensor (Nx#Classes) - output logits for a
batch of images
targets: Variable/Tensor (N) - each element is a class index for the
target class for the i^th example.
confidence: float - how much the logits must differ by for an
attack to be considered valid
targeted: bool - if True, the 'targets' arg should be the targets
we want to hit. If False, 'targets' arg should be
the targets we do NOT want to hit
RETURNS:
Variable ByteTensor of length (N) on the same device as
example_logits/targets with 1's for successful adversaral examples,
0's for unsuccessful
"""
# check if the max val is the targets
target_vals = example_logits.gather(1, targets.view(-1, 1))
max_vals, max_idxs = torch.max(example_logits, 1)
max_eq_targets = torch.eq(targets, max_idxs)
# check margins between max and target_vals
if targeted:
max_2_vals, _ = example_logits.kthvalue(2, dim=1)
good_confidence = torch.gt(max_vals - confidence, max_2_vals)
one_hot_indices = max_eq_targets * good_confidence
else:
good_confidence = torch.gt(max_vals.view(-1, 1),
target_vals + confidence)
one_hot_indices = ((1 - max_eq_targets.data).view(-1, 1) *
good_confidence.data)
return one_hot_indices.squeeze()
# return [idx for idx, el in enumerate(one_hot_indices) if el[0] == 1]
@classmethod
def tweak_lambdas(cls, var_scale_lo, var_scale_hi, var_scale,
successful_mask):
""" Modifies the constant scaling that we keep to weight f_adv vs D(.)
in our loss function.
IF the attack was successful
THEN hi -> lambda
lambda -> (lambda + lo) /2
ELSE
lo -> lambda
lambda -> (lambda + hi) / 2
ARGS:
var_scale_lo : Variable (N) - variable that holds the running lower
bounds in our binary search
var_scale_hi: Variable (N) - variable that holds the running upper
bounds in our binary search
var_scale : Variable (N) - variable that holds the lambdas we
actually use
successful_mask : Variable (ByteTensor N) - mask that holds the
indices of the successful attacks
RETURNS:
(var_scale_lo, var_scale_hi, var_scale) but modified according to
the rule describe in the spec of this method
"""
prev_his = var_scale_hi.data
downweights = (var_scale_lo.data + var_scale.data) / 2.0
upweights = (var_scale_hi.data + var_scale.data) / 2.0
scale_hi = utils.fold_mask(var_scale.data, var_scale_hi.data,
successful_mask.data)
scale_lo = utils.fold_mask(var_scale_lo.data, var_scale.data,
successful_mask.data)
scale = utils.fold_mask(downweights, upweights,
successful_mask.data)
return (Variable(scale_lo), Variable(scale_hi), Variable(scale))
def attack(self, examples, labels, targets=None, initial_lambda=1.0,
num_bin_search_steps=10, num_optim_steps=1000,
confidence=0.0, verbose=True):
""" Performs Carlini Wagner attack on provided examples to make them
not get classified as the labels.
ARGS:
examples : Tensor (NxCxHxW) - input images to be made adversarial
labels : Tensor (N) - correct labels of the examples
initial_lambda : float - which lambda to use initially
in the regularization of the carlini loss
num_bin_search_steps : int - how many binary search steps we perform
to optimize the lambda
num_optim_steps : int - how many optimizer steps we perform during
each binary search step (we may stop early)
confidence : float - how great the difference in the logits must be
for the carlini_loss to be zero. Overwrites the
self.carlini_loss.kappa value
RETURNS:
AdversarialPerturbation object with correct parameters.
Calling perturbation() gets Variable of output and
calling perturbation().data gets tensor of output
calling perturbation(distances=True) returns a dict like
{}
"""
######################################################################
# First perform some setups #
######################################################################
if targets is not None:
raise NotImplementedError("Targeted attacks aren't built yet")
if self.use_gpu:
examples = examples.cuda()
labels = labels.cuda()
self.classifier_net.eval() # ALWAYS EVAL FOR BUILDING ADV EXAMPLES
var_examples = Variable(examples, requires_grad=False)
var_labels = Variable(labels, requires_grad=False)
loss_fxn = self._construct_loss_fxn(initial_lambda, confidence)
loss_fxn.setup_attack_batch(var_examples)
distance_fxn = loss_fxn.losses['distance_fxn']
num_examples = examples.shape[0]
best_results = {'best_dist': torch.ones(num_examples) \
.type(examples.type()) \
* MAXFLOAT,
'best_perturbation': self.threat_model(examples)}
######################################################################
# Now start the binary search #
######################################################################
var_scale_lo = Variable(torch.zeros(num_examples) \
.type(self._dtype).squeeze())
var_scale = Variable(torch.ones(num_examples, 1).type(self._dtype) *
initial_lambda).squeeze()
var_scale_hi = Variable(torch.ones(num_examples).type(self._dtype)
* 128).squeeze() # HARDCODED UPPER LIMIT
for bin_search_step in range(num_bin_search_steps):
perturbation = self.threat_model(examples)
##################################################################
# Optimize with a given scale constant #
##################################################################
if verbose:
print("Starting binary_search_step %02d..." % bin_search_step)
prev_loss = MAXFLOAT
optimizer = optim.Adam(perturbation.parameters(), lr=0.001)
for optim_step in range(num_optim_steps):
if verbose and optim_step > 0 and optim_step % 25 == 0:
print("Optim search: %s, Loss: %s" %
(optim_step, prev_loss))
loss_sum = self._optimize_step(optimizer, perturbation,
var_examples, var_labels,
var_scale, loss_fxn)
if loss_sum + 1e-10 > prev_loss * 0.99999 and optim_step >= 100:
if verbose:
print(("...stopping early on binary_search_step %02d "
" after %03d iterations") % (bin_search_step,
optim_step))
break
prev_loss = loss_sum
# End inner optimize loop
################################################################
# Update with results from optimization #
################################################################
# We only keep this round's perturbations if two things occur:
# 1) the perturbation fools the classifier
# 2) the perturbation is closer to original than the best-so-far
bin_search_perts = perturbation(var_examples)
bin_search_out = self.classifier_net.forward(bin_search_perts)
successful_attack_idxs = self._batch_compare(bin_search_out,
var_labels)
batch_dists = distance_fxn.forward(bin_search_perts).data
successful_dist_idxs = batch_dists < best_results['best_dist']
successful_dist_idxs = successful_dist_idxs
successful_mask = successful_attack_idxs * successful_dist_idxs
# And then generate a new 'best distance' and 'best perturbation'
best_results['best_dist'] = utils.fold_mask(batch_dists,
best_results['best_dist'],
successful_mask)
best_results['best_perturbation'] = \
perturbation.merge_perturbation(
best_results['best_perturbation'],
successful_mask)
# And then adjust the scale variables (lambdas)
new_scales = self.tweak_lambdas(var_scale_lo, var_scale_hi,
var_scale,
Variable(successful_mask))
var_scale_lo, var_scale_hi, var_scale = new_scales
# End binary search loop
if verbose:
num_successful = len([_ for _ in best_results['best_dist']
if _ < MAXFLOAT])
print("\n Ending attack")
print("Successful attacks for %03d/%03d examples in CONTINUOUS" % \
(num_successful, num_examples))
loss_fxn.cleanup_attack_batch()
perturbation.attach_originals(examples)
perturbation.attach_attr('distances', best_results['best_dist'])
return perturbation
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/__init__.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/adversarial_training.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
""" Contains training code for adversarial training """
from __future__ import print_function
import torch
import torch.cuda as cuda
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import random
from .utils import pytorch_utils as utils, checkpoints
##############################################################################
# #
# ATTACK PARAMETERS OBJECT #
# #
##############################################################################
class AdversarialAttackParameters(object):
""" Wrapper to store an adversarial attack object as well as some extra
parameters for how to use it in training
"""
def __init__(self, adv_attack_obj, proportion_attacked=1.0,
attack_specific_params=None):
""" Stores params for how to use adversarial attacks in training
ARGS:
adv_attack_obj : AdversarialAttack subclass -
thing that actually does the attack
proportion_attacked: float between [0.0, 1.0] - what proportion of
the minibatch we build adv examples for
attack_specific_params: possibly None dict, but possibly dict with
specific parameters for attacks
"""
self.adv_attack_obj = adv_attack_obj
self.proportion_attacked = proportion_attacked
attack_specific_params = attack_specific_params or {}
self.attack_specific_params = attack_specific_params
self.attack_kwargs = attack_specific_params.get('attack_kwargs', {})
def set_gpu(self, use_gpu):
""" Propagates changes of the 'use_gpu' parameter down to the attack
ARGS:
use_gpu : bool - if True, the attack uses the GPU, ow it doesn't
RETURNS:
None
"""
self.adv_attack_obj.use_gpu = use_gpu
def attack(self, inputs, labels):
""" Builds some adversarial examples given real inputs and labels
ARGS:
inputs : torch.Tensor (NxCxHxW) - tensor with examples needed
labels : torch.Tensor (N) - tensor with the examples needed
RETURNS:
some sample of (self.proportion_attacked * N ) examples that are
adversarial, and the corresponding NONADVERSARIAL LABELS
output is a tuple with three tensors:
(adv_examples, pre_adv_labels, selected_idxs, coupled )
adv_examples: Tensor with shape (N'xCxHxW) [the perturbed outputs]
pre_adv_labels: Tensor with shape (N') [original labels]
selected_idxs : Tensor with shape (N') [idxs selected]
adv_inputs : Tensor with shape (N') [examples used to make advs]
perturbation: Adversarial Perturbation Object
"""
num_elements = inputs.shape[0]
selected_idxs = sorted(random.sample(list(range(num_elements)),
int(self.proportion_attacked * num_elements)))
selected_idxs = inputs.new(selected_idxs).long()
if selected_idxs.numel() == 0:
return (None, None, None)
adv_inputs = Variable(inputs.index_select(0, selected_idxs))
pre_adv_labels = labels.index_select(0, selected_idxs)
perturbation = self.adv_attack_obj.attack(adv_inputs.data,
pre_adv_labels,
**self.attack_kwargs)
adv_examples = perturbation(adv_inputs)
return (adv_examples, pre_adv_labels, selected_idxs, adv_inputs,
perturbation)
def eval(self, ground_inputs, adv_inputs, labels, idxs, topk=1):
""" Outputs the accuracy of the adversarial examples
NOTE: notice the difference between N and N' in the argument
ARGS:
ground_inputs: Variable (NxCxHxW) - examples before we did
adversarial perturbation. Vals in [0, 1] range
adversarials: Variable (N'xCxHxW) - examples after we did
adversarial perturbation. Should be same shape and
in same order as ground_truth
labels: Variable (longTensor N) - correct labels of classification
output
idxs: Variable (longtensor N') - indices of ground_inputs/labels
used for adversarials.
RETURNS:
tuple of (% of correctly classified original examples,
% of correctly classified adversarial examples)
"""
selected_grounds = ground_inputs.index_select(0, idxs)
selected_labels = labels.index_select(0, idxs)
return self.adv_attack_obj.eval(selected_grounds, adv_inputs,
selected_labels, topk=topk)
def eval_attack_only(self, adv_inputs, labels, topk=1):
""" Outputs the accuracy of the adv_inputs only
ARGS:
adv_inputs: Variable NxCxHxW - examples after we did adversarial
perturbation
labels: Variable (longtensor N) - correct labels of classification
output
topk: int - criterion for 'correct' classification
RETURNS:
(int) number of correctly classified examples
"""
return self.adv_attack_obj.eval_attack_only(adv_inputs, labels,
topk=topk)
##############################################################################
# #
# TRAINING OBJECT #
# #
##############################################################################
class AdversarialTraining(object):
""" Wrapper for training of a NN with adversarial examples cooked in
"""
def __init__(self, classifier_net, normalizer,
experiment_name, architecture_name,
manual_gpu=None):
"""
ARGS:
classifier_net : nn.Module subclass - instance of neural net to classify
images. Can have already be trained, or not
normalizer : DifferentiableNormalize - object to convert to zero-mean
unit-variance domain
experiment_name : String - human-readable name of the 'trained_model'
(this is helpful for identifying checkpoints later)
manual_gpu : None or bool - if not None is a manual override of whether
or not to use the GPU. If left None, we use the GPU if we
can
ON NOMENCLATURE:
Depending on verbosity levels, training checkpoints are saved after
some training epochs. These are saved as
'<experiment_name>/<architecture_name>/<epoch>.path.tar'
Best practice is to keep architecture_name consistent across
adversarially trained models built off the same architecture and having
a descriptive experiment_name for each training instance
"""
self.classifier_net = classifier_net
self.normalizer = normalizer
self.experiment_name = experiment_name
self.architecture_name = architecture_name
if manual_gpu is not None:
self.use_gpu = manual_gpu
else:
self.use_gpu = utils.use_gpu()
self.verbosity_level = None
self.verbosity_minibatch = None
self.verbosity_adv = None
self.verbosity_epoch = None
self.logger = utils.TrainingLogger()
self.log_level = None
self.log_minibatch = None
self.log_adv = None
self.log_epoch = None
def reset_logger(self):
""" Clears the self.logger instance - useful occasionally """
self.logger = utils.TrainingLogger()
def set_verbosity_loglevel(self, level,
verbosity_or_loglevel='verbosity'):
""" Sets the verbosity or loglevel for training.
Is called in .train method so this method doesn't need to be
explicitly called.
Verbosity is mapped from a string to a comparable int 'level'.
<val>_level : int - comparable value of verbosity
<val>_minibatch: int - we do a printout every this many
minibatches
<val>_adv: int - we evaluate the efficacy of our attack every
this many minibatches
<val>_epoch: int - we printout/log and checkpoint every this many
epochs
ARGS:
level : string ['low', 'medium', 'high', 'snoop'],
varying levels of verbosity/logging in increasing order
RETURNS: None
"""
assert level in ['low', 'medium', 'high', 'snoop']
assert verbosity_or_loglevel in ['verbosity', 'loglevel']
setattr(self, verbosity_or_loglevel, level)
_level = {'low': 0,
'medium': 1,
'high': 2,
'snoop': 420}[level]
setattr(self, verbosity_or_loglevel + '_level', _level)
_minibatch = {'medium': 2000,
'high': 100,
'snoop': 1}.get(level)
setattr(self, verbosity_or_loglevel + '_minibatch', _minibatch)
_adv = {'medium': 2000,
'high': 100,
'snoop': 1}.get(level)
setattr(self, verbosity_or_loglevel + '_adv', _minibatch)
_epoch = {'low': 100,
'medium': 10,
'high': 1,
'snoop': 1}.get(level)
setattr(self, verbosity_or_loglevel + '_epoch', _epoch)
def _attack_subroutine(self, attack_parameters, inputs, labels,
epoch_num, minibatch_num, adv_saver,
logger):
""" Subroutine to run the specified attack on a minibatch and append
the results to inputs/labels.
NOTE: THIS DOES NOT MUTATE inputs/labels !!!!
ARGS:
attack_parameters: {k: AdversarialAttackParameters} (or None) -
if not None, contains info on how to do adv
attacks. If None, we don't train adversarially
inputs : Tensor (NxCxHxW) - minibatch of data we build adversarial
examples for
labels : Tensor (longtensor N) - minibatch of labels
epoch_num : int - number of which epoch we're working on.
Is helpful for printing
minibatch_num : int - number of which minibatch we're working on.
Is helpful for printing
adv_saver : None or checkpoints.CustomDataSaver -
if not None, we save the adversarial images for later
use, else we don't save them.
logger : utils.TrainingLogger instance - logger instance to keep
track of logging data if we need data for this instance
RETURNS:
(inputs, labels, adv_inputs, coupled_inputs)
where inputs = <arg inputs> ++ adv_inputs
labels is original labels
adv_inputs is the (Variable) adversarial examples generated,
coupled_inputs is the (Variable) inputs used to generate the
adversarial examples (useful for when we don't
augment 1:1).
"""
if attack_parameters is None:
return inputs, labels, None, None
assert isinstance(attack_parameters, dict)
adv_inputs_total, adv_labels_total, coupled_inputs = [], [], []
for (key, param) in attack_parameters.items():
adv_data = param.attack(inputs, labels)
adv_inputs, adv_labels, adv_idxs, og_adv_inputs, _ = adv_data
needs_print = (self.verbosity_level >= 1 and
minibatch_num % self.verbosity_adv == self.verbosity_adv - 1)
needs_log = (self.loglevel_level >= 1 and
minibatch_num % self.loglevel_adv == self.loglevel_adv - 1)
if needs_print or needs_log:
accuracy = param.eval(inputs, adv_inputs, labels, adv_idxs)
if needs_print:
print('[%d, %5d] accuracy: (%.3f, %.3f)' %
(epoch_num, minibatch_num + 1, accuracy[1], accuracy[0]))
if needs_log:
logger.log(key, epoch_num, minibatch_num + 1,
(accuracy[1], accuracy[0]))
if adv_saver is not None: # Save the adversarial examples
adv_saver.save_minibatch(adv_inputs, adv_labels)
adv_inputs_total.append(adv_inputs)
adv_labels_total.append(adv_labels)
coupled_inputs.append(og_adv_inputs)
inputs = torch.cat([inputs] + [_.data for _ in adv_inputs_total], dim=0)
labels = torch.cat([labels] + adv_labels_total, dim=0)
coupled = torch.cat(coupled_inputs, dim=0)
return inputs, labels, torch.cat(adv_inputs_total, dim=0), coupled
def train(self, data_loader, num_epochs, train_loss,
optimizer=None, attack_parameters=None,
verbosity='medium', loglevel='medium', logger=None,
starting_epoch=0, adversarial_save_dir=None,
regularize_adv_scale=None):
""" Modifies the NN weights of self.classifier_net by training with
the specified parameters s
ARGS:
data_loader: torch.utils.data.DataLoader OR
checkpoints.CustomDataLoader - object that loads the
data
num_epoch: int - number of epochs to train on
train_loss: ???? - TBD
optimizer: torch.Optimizer subclass - defaults to Adam with some
decent default params. Pass this in as an actual argument
to do anything different
attack_parameters: AdversarialAttackParameters obj | None |
{key: AdversarialAttackParameters} -
if not None, is either an object or dict of
objects containing names and info on how to do
adv attacks. If None, we don't train
adversarially
verbosity : string - must be 'low', 'medium', 'high', which
describes how much to print
loglevel : string - must be 'low', 'medium', 'high', which
describes how much to log
logger : if not None, is a utils.TrainingLogger instance. Otherwise
we use this instance's self.logger object to log
starting_epoch : int - which epoch number we start on. Is useful
for correct labeling of checkpoints and figuring
out how many epochs we actually need to run for
(i.e., num_epochs - starting_epoch)
adversarial_save_dir: string or None - if not None is the name of
the directory we save adversarial images to.
If None, we don't save adversarial images
regularize_adv_scale : float > 0 or None - if not None we do L1 loss
between the logits of the adv examples and
the inputs used to generate them. This is the
scale constant of that loss
stdout_prints: bool - if True we print out using stdout so we don't
spam logs like crazy
RETURNS:
None, but modifies the classifier_net's weights
"""
######################################################################
# Setup/ input validations #
######################################################################
self.classifier_net.train() # in training mode
assert isinstance(num_epochs, int)
if attack_parameters is not None:
if not isinstance(attack_parameters, dict):
attack_parameters = {'attack': attack_parameters}
# assert that the adv attacker uses the NN that's being trained
for param in attack_parameters.values():
assert (param.adv_attack_obj.classifier_net ==
self.classifier_net)
assert not (self.use_gpu and not cuda.is_available())
if self.use_gpu:
self.classifier_net.cuda()
if attack_parameters is not None:
for param in attack_parameters.values():
param.set_gpu(self.use_gpu)
# Verbosity parameters
assert verbosity in ['low', 'medium', 'high', 'snoop', None]
self.set_verbosity_loglevel(verbosity,
verbosity_or_loglevel='verbosity')
verbosity_level = self.verbosity_level
verbosity_minibatch = self.verbosity_minibatch
verbosity_epoch = self.verbosity_epoch
# Loglevel parameters and logger initialization
assert loglevel in ['low', 'medium', 'high', 'snoop', None]
if logger is None:
logger = self.logger
if logger.data_count() > 0:
print("WARNING: LOGGER IS NOT EMPTY! BE CAREFUL!")
logger.add_series('training_loss')
for key in (attack_parameters or {}).keys():
logger.add_series(key)
self.set_verbosity_loglevel(loglevel, verbosity_or_loglevel='loglevel')
loglevel_level = self.loglevel_level
loglevel_minibatch = self.loglevel_minibatch
loglevel_epoch = self.loglevel_epoch
# Adversarial image saver:
adv_saver = None
if adversarial_save_dir is not None and attack_parameters is not None:
adv_saver = checkpoints.CustomDataSaver(adversarial_save_dir)
# setup loss fxn, optimizer
optimizer = optimizer or optim.Adam(self.classifier_net.parameters(),
lr=0.001)
# setup regularize adv object
if regularize_adv_scale is not None:
regularize_adv_criterion = nn.L1Loss()
######################################################################
# Training loop #
######################################################################
for epoch in range(starting_epoch + 1, num_epochs + 1):
running_loss_print, running_loss_print_mb = 0.0, 0
running_loss_log, running_loss_log_mb = 0.0, 0
for i, data in enumerate(data_loader, 0):
inputs, labels = data
if self.use_gpu:
inputs = inputs.cuda()
labels = labels.cuda()
# Build adversarial examples
attack_out = self._attack_subroutine(attack_parameters,
inputs, labels,
epoch, i, adv_saver,
logger)
inputs, labels, adv_examples, adv_inputs = attack_out
# Now proceed with standard training
self.normalizer.differentiable_call()
self.classifier_net.train()
inputs, labels = Variable(inputs), Variable(labels)
optimizer.zero_grad()
# forward step
outputs = self.classifier_net.forward(self.normalizer(inputs))
loss = train_loss.forward(outputs, labels)
if regularize_adv_scale is not None:
# BE SURE TO 'DETACH' THE ADV_INPUTS!!!
reg_adv_loss = regularize_adv_criterion(adv_examples,
Variable(adv_inputs.data))
print(float(loss), regularize_adv_scale * float(reg_adv_loss))
loss = loss + regularize_adv_scale * reg_adv_loss
# backward step
loss.backward()
optimizer.step()
# print things
running_loss_print += float(loss.data)
running_loss_print_mb += 1
if (verbosity_level >= 1 and
i % verbosity_minibatch == verbosity_minibatch - 1):
print('[%d, %5d] loss: %.6f' %
(epoch, i + 1, running_loss_print /
float(running_loss_print_mb)))
running_loss_print = 0.0
running_loss_print_mb = 0
# log things
running_loss_log += float(loss.data)
running_loss_log_mb += 1
if (loglevel_level >= 1 and
i % loglevel_minibatch == loglevel_minibatch - 1):
logger.log('training_loss', epoch, i + 1,
running_loss_log / float(running_loss_log_mb))
running_loss_log = 0.0
running_loss_log_mb = 0
# end_of_epoch
if epoch % verbosity_epoch == 0:
print("COMPLETED EPOCH %04d... checkpointing here" % epoch)
checkpoints.save_state_dict(self.experiment_name,
self.architecture_name,
epoch, self.classifier_net,
k_highest=3)
if verbosity_level >= 1:
print('Finished Training')
return logger
def train_from_checkpoint(self, data_loader, num_epochs, loss_fxn,
optimizer=None, attack_parameters=None,
verbosity='medium',
starting_epoch='max',
adversarial_save_dir=None):
""" Resumes training from a saved checkpoint with the same architecture.
i.e. loads weights from specified checkpoint, figures out which
epoch we checkpointed on and then continues training until
we reach num_epochs epochs
ARGS:
same as in train
starting_epoch: 'max' or int - which epoch we start training from.
'max' means the highest epoch we can find,
an int means a specified int epoch exactly.
RETURNS:
None
"""
######################################################################
# Checkpoint handling block #
######################################################################
# which epoch to load
valid_epochs = checkpoints.list_saved_epochs(self.experiment_name,
self.architecture_name)
assert valid_epochs != []
if starting_epoch == 'max':
epoch = max(valid_epochs)
else:
assert starting_epoch in valid_epochs
epoch = starting_epoch
# modify the classifer to use these weights
self.classifier_net = checkpoints.load_state_dict(self.experiment_name,
self.architecture_name,
epoch,
self.classifier_net)
######################################################################
# Training block #
######################################################################
self.train(data_loader, num_epochs, loss_fxn,
optimizer=optimizer,
attack_parameters=attack_parameters,
verbosity=verbosity,
starting_epoch=epoch,
adversarial_save_dir=adversarial_save_dir)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/spatial_transformers.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
""" File that contains various parameterizations for spatial transformation
styles. At its simplest, spatial transforms can be affine grids,
parameterized by 6 values. At their most complex, for a CxHxW type image
grids can be parameterized by CxHxWx2 parameters.
This file will define subclasses of nn.Module that will have parameters
corresponding to the transformation parameters and will take in an image
and output a transformed image.
Further we'll also want a method to initialize each set to be the identity
initially
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import pytorch_utils as utils
from torch.autograd import Variable
##############################################################################
# #
# SKELETON CLASS #
# #
##############################################################################
class ParameterizedTransformation(nn.Module):
""" General class of transformations.
All subclasses need the following methods:
- norm: no args -> scalar variable
- identity_params: shape -> TENSOR : takes an input shape and outputs
the subclass-specific parameter for the identity
transformation
- forward : Variable -> Variable - is the transformation
"""
def __init__(self, **kwargs):
super(ParameterizedTransformation, self).__init__()
if kwargs.get('manual_gpu', None) is not None:
self.use_gpu = kwargs['manual_gpu']
else:
self.use_gpu = utils.use_gpu()
def norm(self, lp='inf'):
raise NotImplementedError("Need to call subclass's norm!")
@classmethod
def identity_params(self, shape):
raise NotImplementedError("Need to call subclass's identity_params!")
def merge_xform(self, other, self_mask):
""" Takes in an other instance of this same class with the same
shape of parameters (NxSHAPE) and a self_mask bytetensor of length
N and outputs the merge between self's parameters for the indices
of 1s in the self_mask and other's parameters for the indices of 0's
ARGS:
other: instance of same class as self with params of shape NxSHAPE -
the thing we merge with this one
self_mask : ByteTensor (length N) - which indices of parameters we
keep from self, and which we keep from other
RETURNS:
New instance of this class that's merged between the self and other
(same shaped params)
"""
# JUST DO ASSERTS IN THE SKELETON CLASS
assert self.__class__ == other.__class__
self_params = self.xform_params.data
other_params = other.xform_params.data
assert self_params.shape == other_params.shape
assert self_params.shape[0] == self_mask.shape[0]
assert other_params.shape[0] == self_mask.shape[0]
new_xform = self.__class__(shape=self.img_shape)
new_params = utils.fold_mask(self.xform_params.data,
other.xform_params.data, self_mask)
new_xform.xform_params = nn.Parameter(new_params)
new_xform.use_gpu = self.use_gpu
return new_xform
def forward(self, examples):
raise NotImplementedError("Need to call subclass's forward!")
###############################################################################
# #
# FULLY PARAMETERIZED SPATIAL TRANSFORMATION NETWORK #
# #
###############################################################################
class FullSpatial(ParameterizedTransformation):
def __init__(self, *args, **kwargs):
""" FullSpatial just has parameters that are the grid themselves.
Forward then will just call grid sample using these params directly
"""
super(FullSpatial, self).__init__(**kwargs)
img_shape = kwargs['shape']
self.img_shape = img_shape
self.xform_params = nn.Parameter(self.identity_params(img_shape))
def identity_params(self, shape):
""" Returns some grid parameters such that the minibatch of images isn't
changed when forward is called on it
ARGS:
shape: torch.Size - shape of the minibatch of images we'll be
transforming. First index should be num examples
RETURNS:
torch TENSOR (not variable!!!)
if shape arg has shape NxCxHxW, this has shape NxCxHxWx2
"""
# Work smarter not harder -- use idenity affine transforms here
num_examples = shape[0]
identity_affine_transform = torch.zeros(num_examples, 2, 3)
if self.use_gpu:
identity_affine_transform = identity_affine_transform.cuda()
identity_affine_transform[:,0,0] = 1
identity_affine_transform[:,1,1] = 1
return F.affine_grid(identity_affine_transform, shape).data
def stAdv_norm(self):
""" Computes the norm used in
"Spatially Transformed Adversarial Examples"
"""
# ONLY WORKS FOR SQUARE MATRICES
dtype = self.xform_params.data.type()
num_examples, height, width = tuple(self.xform_params.shape[0:3])
assert height == width
######################################################################
# Build permutation matrices #
######################################################################
def id_builder():
x = torch.zeros(height, width).type(dtype)
for i in range(height):
x[i,i] = 1
return x
col_permuts = []
row_permuts = []
# torch.matmul(foo, col_permut)
for col in ['left', 'right']:
col_val = {'left': -1, 'right': 1}[col]
idx = ((torch.arange(width) - col_val) % width)
idx = idx.type(dtype).type(torch.LongTensor)
if self.xform_params.is_cuda:
idx = idx.cuda()
col_permut = torch.zeros(height, width).index_copy_(1, idx.cpu(),
id_builder().cpu())
col_permut = col_permut.type(dtype)
if col == 'left':
col_permut[-1][0] = 0
col_permut[0][0] = 1
else:
col_permut[0][-1] = 0
col_permut[-1][-1] = 1
col_permut = Variable(col_permut)
col_permuts.append(col_permut)
row_permuts.append(col_permut.transpose(0, 1))
######################################################################
# Build delta_u, delta_v grids #
######################################################################
id_params = Variable(self.identity_params(self.img_shape))
delta_grids = self.xform_params - id_params
delta_grids = delta_grids.permute(0, 3, 1, 2)
######################################################################
# Compute the norm #
######################################################################
output = Variable(torch.zeros(num_examples).type(dtype))
for row_or_col, permutes in zip(['row', 'col'],
[row_permuts, col_permuts]):
for permute in permutes:
if row_or_col == 'row':
temp = delta_grids - torch.matmul(permute, delta_grids)
else:
temp = delta_grids - torch.matmul(delta_grids, permute)
temp = temp.pow(2)
temp = temp.sum(1)
temp = (temp + 1e-10).pow(0.5)
output.add_(temp.sum((1, 2)))
return output
def norm(self, lp='inf'):
""" Returns the 'norm' of this transformation in terms of an LP norm on
the parameters, summed across each transformation per minibatch
ARGS:
lp : int or 'inf' - which lp type norm we use
"""
if isinstance(lp, int) or lp == 'inf':
identity_params = Variable(self.identity_params(self.img_shape))
return utils.batchwise_norm(self.xform_params - identity_params, lp,
dim=0)
else:
assert lp == 'stAdv'
return self._stAdv_norm()
def clip_params(self):
""" Clips the parameters to be between -1 and 1 as required for
grid_sample
"""
clamp_params = torch.clamp(self.xform_params, -1, 1).data
change_in_params = clamp_params - self.xform_params.data
self.xform_params.data.add_(change_in_params)
def merge_xform(self, other, self_mask):
""" Takes in an other instance of this same class with the same
shape of parameters (NxSHAPE) and a self_mask bytetensor of length
N and outputs the merge between self's parameters for the indices
of 1s in the self_mask and other's parameters for the indices of 0's
"""
super(FullSpatial, self).merge_xform(other, self_mask)
new_xform = FullSpatial(shape=self.img_shape,
manual_gpu=self.use_gpu)
new_params = utils.fold_mask(self.xform_params.data,
other.xform_params.data, self_mask)
new_xform.xform_params = nn.Parameter(new_params)
return new_xform
def project_params(self, lp, lp_bound):
""" Projects the params to be within lp_bound (according to an lp)
of the identity map. First thing we do is clip the params to be
valid, too
ARGS:
lp : int or 'inf' - which LP norm we use. Must be an int or the
string 'inf'
lp_bound : float - how far we're allowed to go in LP land
RETURNS:
None, but modifies self.xform_params
"""
assert isinstance(lp, int) or lp == 'inf'
# clip first
self.clip_params()
# then project back
if lp == 'inf':
identity_params = self.identity_params(self.img_shape)
clamp_params = utils.clamp_ref(self.xform_params.data,
identity_params, lp_bound)
change_in_params = clamp_params - self.xform_params.data
self.xform_params.data.add_(change_in_params)
else:
raise NotImplementedError("Only L-infinity bounds working for now ")
def forward(self, x):
# usual forward technique
return F.grid_sample(x, self.xform_params)
###############################################################################
# #
# AFFINE TRANSFORMATION NETWORK #
# #
###############################################################################
class AffineTransform(ParameterizedTransformation):
""" Affine transformation -- just has 6 parameters per example: 4 for 2d
rotation, and 1 for translation in each direction
"""
def __init__(self, *args, **kwargs):
super(AffineTransform, self).__init__(**kwargs)
img_shape = kwargs['shape']
self.img_shape = img_shape
self.xform_params = nn.Parameter(self.identity_params(img_shape))
def norm(self, lp='inf'):
identity_params = Variable(self.identity_params(self.img_shape))
return utils.batchwise_norm(self.xform_params - identity_params, lp,
dim=0)
def identity_params(self, shape):
""" Returns parameters for identity affine transformation
ARGS:
shape: torch.Size - shape of the minibatch of images we'll be
transforming. First index should be num examples
RETURNS:
torch TENSOR (not variable!!!)
if shape arg has shape NxCxHxW, this has shape Nx2x3
"""
# Work smarter not harder -- use idenity affine transforms here
num_examples = shape[0]
identity_affine_transform = torch.zeros(num_examples, 2, 3)
if self.use_gpu:
identity_affine_transform = identity_affine_transform.cuda()
identity_affine_transform[:,0,0] = 1
identity_affine_transform[:,1,1] = 1
return identity_affine_transform
def project_params(self, lp, lp_bound):
""" Projects the params to be within lp_bound (according to an lp)
of the identity map. First thing we do is clip the params to be
valid, too
ARGS:
lp : int or 'inf' - which LP norm we use. Must be an int or the
string 'inf'
lp_bound : float - how far we're allowed to go in LP land
RETURNS:
None, but modifies self.xform_params
"""
assert isinstance(lp, int) or lp == 'inf'
diff = self.xform_params.data - self.identity_params(self.img_shape)
new_diff = utils.batchwise_lp_project(diff, lp, lp_bound)
self.xform_params.data.add_(new_diff - diff)
def forward(self, x):
# usual forward technique with affine grid
grid = F.affine_grid(self.xform_params, x.shape)
return F.grid_sample(x, grid)
class RotationTransform(AffineTransform):
""" Rotations only -- only has one parameter, the angle by which we rotate
"""
def __init__(self, *args, **kwargs):
super(RotationTransform, self).__init__(**kwargs)
'''
img_shape = kwargs['shape']
self.img_shape = img_shape
self.xform_params = nn.Parameter(self.identity_params(img_shape))
'''
def identity_params(self, shape):
num_examples = shape[0]
params = torch.zeros(num_examples)
if self.use_gpu:
params = params.cuda()
return params
def make_grid(self, x):
assert isinstance(x, Variable)
cos_xform = self.xform_params.cos()
sin_xform = self.xform_params.sin()
zeros = torch.zeros_like(self.xform_params)
affine_xform = torch.stack([cos_xform, -sin_xform, zeros,
sin_xform, cos_xform, zeros])
affine_xform = affine_xform.transpose(0, 1).contiguous().view(-1, 2, 3)
return F.affine_grid(affine_xform, x.shape)
def forward(self, x):
return F.grid_sample(x, self.make_grid(x))
class TranslationTransform(AffineTransform):
""" Rotations only -- only has one parameter, the angle by which we rotate
"""
def __init__(self, *args, **kwargs):
super(TranslationTransform, self).__init__(**kwargs)
def identity_params(self, shape):
num_examples = shape[0]
params = torch.zeros(num_examples, 2) # x and y translation only
if self.use_gpu:
params = params.cuda()
return params
def make_grid(self, x):
assert isinstance(x, Variable)
ones = Variable(torch.ones(self.xform_params.shape[0]))
zeros = Variable(torch.zeros(self.xform_params.shape[0]))
if self.xform_params.cuda:
ones = ones.cuda()
zeros = zeros.cuda()
affine_xform = torch.stack([ones, zeros, self.xform_params[:,0],
zeros, ones, self.xform_params[:,1]])
affine_xform = affine_xform.transpose(0, 1).contiguous().view(-1, 2, 3)
return F.affine_grid(affine_xform, x.shape)
def forward(self, x):
return F.grid_sample(x, self.make_grid(x))
##############################################################################
# #
# BARREL + PINCUSHION TRANSFORMATIONS #
# #
##############################################################################
class PointScaleTransform(ParameterizedTransformation):
""" Point Scale transformations are pincushion/barrel distortions.
We pick a point to anchor the image and optimize a distortion size to
either dilate or contract
"""
def __init__(self, *args, **kwargs):
super(PointScaleTransform, self).__init__(**kwargs)
img_shape = kwargs['shape']
self.img_shape = img_shape
self.xform_params = nn.Parameter(self.identity_params(img_shape))
def norm(self, lp='inf'):
return utils.batchwise_norm(self.xform_params, lp, dim=0)
def project_params(self, lp, lp_bound):
""" Projects the params to be within lp_bound (according to an lp)
of the identity map. First thing we do is clip the params to be
valid, too
ARGS:
lp : int or 'inf' - which LP norm we use. Must be an int or the
string 'inf'
lp_bound : float - how far we're allowed to go in LP land
RETURNS:
None, but modifies self.xform_params
"""
assert isinstance(lp, int) or lp == 'inf'
diff = self.xform_params.data
new_diff = utils.batchwise_lp_project(diff, lp, lp_bound)
self.xform_params.data.add_(new_diff - diff)
def identity_params(self, shape):
num_examples = shape[0]
identity_param = torch.zeros(num_examples)
if self.use_gpu:
identity_param = identity_param.cuda()
return identity_param
def make_grid(self):
######################################################################
# Compute identity flow grid first #
######################################################################
num_examples = self.img_shape[0]
identity_affine_transform = torch.zeros(num_examples, 2, 3)
if self.use_gpu:
identity_affine_transform = identity_affine_transform.cuda()
identity_affine_transform[:,0,0] = 1
identity_affine_transform[:,1,1] = 1
basic_grid = F.affine_grid(identity_affine_transform, self.img_shape)
######################################################################
# Compute scaling based on parameters #
######################################################################
radii_squared = basic_grid.pow(2).sum(-1)
new_radii = (radii_squared + 1e-20).pow(0.5) *\
(1 + self.xform_params.view(-1, 1, 1) * radii_squared)
thetas = torch.atan2(basic_grid[:,:,:,1], (basic_grid[:,:,:, 0]))
cosines = torch.cos(thetas) * new_radii
sines = torch.sin(thetas) * new_radii
return torch.stack([cosines, sines], -1)
def forward(self, x):
return F.grid_sample(x, self.make_grid())
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/image_utils.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
""" Specific utilities for image classification
(i.e. RGB images i.e. tensors of the form NxCxHxW )
"""
from __future__ import print_function
import torch
import numpy as np
import matplotlib.pyplot as plt
import random
def nhwc255_xform(img_np_array):
""" Takes in a numpy array and transposes it so that the channel is the last
axis. Also multiplies all values by 255.0
ARGS:
img_np_array : np.ndarray - array of shape (NxHxWxC) or (NxCxHxW)
[assumes that we're in NCHW by default,
but if not ambiguous will handle NHWC too ]
RETURNS:
array of form NHWC
"""
assert isinstance(img_np_array, np.ndarray)
shape = img_np_array.shape
assert len(shape) == 4
# determine which configuration we're in
ambiguous = (shape[1] == shape[3] == 3)
nhwc = (shape[1] == 3)
# transpose unless we're unambiguously in nhwc case
if nhwc and not ambiguous:
return img_np_array * 255.0
else:
return np.transpose(img_np_array, (0, 2, 3, 1)) * 255.0
def show_images(images, normalize=None, ipython=True,
margin_height=2, margin_color='red',
figsize=(18, 16)):
""" Shows pytorch tensors/variables as images """
# first format the first arg to be hz-stacked numpy arrays
if not isinstance(images, list):
images = [images]
images = [np.dstack(image.cpu().numpy()) for image in images]
image_shape = images[0].shape
assert all(image.shape == image_shape for image in images)
assert all(image.ndim == 3 for image in images) # CxHxW
# now build the list of final rows
rows = []
if margin_height > 0:
assert margin_color in ['red', 'black']
margin_shape = list(image_shape)
margin_shape[1] = margin_height
margin = np.zeros(margin_shape)
if margin_color == 'red':
margin[0] = 1
else:
margin = None
for image_row in images:
rows.append(margin)
rows.append(image_row)
rows = [_ for _ in rows[1:] if _ is not None]
plt.figure(figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
cat_rows = np.concatenate(rows, 1).transpose(1, 2, 0)
imshow_kwargs = {}
if cat_rows.shape[-1] == 1: # 1 channel: greyscale
cat_rows = cat_rows.squeeze()
imshow_kwargs['cmap'] = 'gray'
plt.imshow(cat_rows, **imshow_kwargs)
plt.show()
def display_adversarial_2row(classifier_net, normalizer, original_images,
adversarial_images, num_to_show=4, which='incorrect',
ipython=False, margin_width=2):
""" Displays adversarial images side-by-side with their unperturbed
counterparts. Opens a window displaying two rows: top row is original
images, bottom row is perturbed
ARGS:
classifier_net : nn - with a .forward method that takes normalized
variables and outputs logits
normalizer : object w/ .forward method - should probably be an instance
of utils.DifferentiableNormalize or utils.IdentityNormalize
original_images: Variable or Tensor (NxCxHxW) - original images to
display. Images in [0., 1.] range
adversarial_images: Variable or Tensor (NxCxHxW) - perturbed images to
display. Should be same shape as original_images
num_to_show : int - number of images to show
which : string in ['incorrect', 'random', 'correct'] - which images to
show.
-- 'incorrect' means successfully attacked images,
-- 'random' means some random selection of images
-- 'correct' means unsuccessfully attacked images
ipython: bool - if True, we use in an ipython notebook so slightly
different way to show Images
margin_width - int : height in pixels of the red margin separating top
and bottom rows. Set to 0 for no margin
RETURNS:
None, but displays images
"""
assert which in ['incorrect', 'random', 'correct']
# If not 'random' selection, prune to only the valid things
to_sample_idxs = []
if which != 'random':
classifier_net.eval() # can never be too safe =)
# classify the originals with top1
original_norm_var = normalizer.forward(original_images)
original_out_logits = classifier_net.forward(original_norm_var)
_, original_out_classes = original_out_logits.max(1)
# classify the adversarials with top1
adv_norm_var = normalizer.forward(adversarial_images)
adv_out_logits = classifier_net.forward(adv_norm_var)
_, adv_out_classes = adv_out_logits.max(1)
# collect indices of matching
selector = lambda var: (which == 'correct') == bool(float(var))
for idx, var_el in enumerate(original_out_classes == adv_out_classes):
if selector(var_el):
to_sample_idxs.append(idx)
else:
to_sample_idxs = list(range(original_images.shape[0]))
# Now select some indices to show
if to_sample_idxs == []:
print("Couldn't show anything. Try changing the 'which' argument here")
return
to_show_idxs = random.sample(to_sample_idxs, min([num_to_show,
len(to_sample_idxs)]))
# Now start building up the images : first horizontally, then vertically
top_row = torch.cat([original_images[idx] for idx in to_show_idxs], dim=2)
bottom_row = torch.cat([adversarial_images[idx] for idx in to_show_idxs],
dim=2)
if margin_width > 0:
margin = torch.zeros(3, margin_width, top_row.shape[-1])
margin[0] = 1.0 # make it red
margin = margin.type(type(top_row))
stack = [top_row, margin, bottom_row]
else:
stack = [top_row, bottom_row]
plt.imshow(torch.cat(stack, dim=1).cpu().numpy().transpose(1, 2, 0))
plt.show()
def display_adversarial_notebook():
pass
def nchw_l2(x, y, squared=True):
""" Computes l2 norm between two NxCxHxW images
ARGS:
x, y: Tensor/Variable (NxCxHxW) - x, y must be same type & shape.
squared : bool - if True we return squared loss, otherwise we return
square root of l2
RETURNS:
||x - y ||_2 ^2 (no exponent if squared == False),
shape is (Nx1x1x1)
"""
temp = torch.pow(x - y, 2) # square diff
for i in range(1, temp.dim()): # reduce on all but first dimension
temp = torch.sum(temp, i, keepdim=True)
if not squared:
temp = torch.pow(temp, 0.5)
return temp.squeeze()
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/pytorch_utils.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
""" Utilities for general pytorch helpfulness """
from __future__ import print_function
import torch
import numpy as np
import torchvision.transforms as transforms
import torch.cuda as cuda
import gc
import os
import warnings
from torch.autograd import Variable, Function
import subprocess
from functools import reduce
###############################################################################
# #
# SAFETY DANCE #
# #
###############################################################################
# aka things for safer pytorch usage
def use_gpu():
""" The shortcut to retrieve the environment variable 'MISTER_ED_GPU'"""
try:
str_val = os.environ['MISTER_ED_GPU']
except:
set_global_gpu()
str_val = os.environ['MISTER_ED_GPU']
assert str_val in ['True', 'False']
return str_val == 'True'
def set_global_gpu(manual=None):
""" Sets the environment variable 'MISTER_ED_GPU'. Defaults to using gpu
if cuda is available
ARGS:
manual : bool - we set the 'MISTER_ED_GPU' environment var to the string
of whatever this is
RETURNS
None
"""
if manual is None:
val = cuda.is_available()
else:
val = manual
os.environ['MISTER_ED_GPU'] = str(val)
def unset_global_gpu():
""" Removes the environment variable 'MISTER_ED_GPU'
# NOTE: this relies on unsetenv, which works on 'most flavors of Unix'
according to the docs
"""
try:
os.unsetenv('MISTER_ED_GPU')
except:
raise Warning("os.unsetenv(.) isn't working properly")
def cuda_assert(use_cuda):
assert not (use_cuda and not cuda.is_available())
def safe_var(entity, **kwargs):
""" Returns a variable of an entity, which may or may not already be a
variable
"""
warnings.warn("As of >=pytorch0.4.0 this is no longer necessary",
DeprecationWarning)
if isinstance(entity, Variable):
return entity
elif isinstance(entity, torch._C._TensorBase):
return Variable(entity, **kwargs)
else:
raise Exception("Can't cast %s to a Variable" %
entity.__class__.__name__)
def safe_tensor(entity):
""" Returns a tensor of an entity, which may or may not already be a
tensor
"""
warnings.warn("As of >=pytorch0.4.0 this is no longer necessary",
DeprecationWarning)
if isinstance(entity, Variable):
return entity.data
elif isinstance(entity, torch.tensor._TensorBase):
return entity
elif isinstance(entity, np.ndarray):
return torch.Tensor(entity) # UNSAFE CUDA CASTING
else:
raise Exception("Can't cast %s to a Variable" %
entity.__class__.__name__)
##############################################################################
# #
# CONVENIENCE STORE #
# #
##############################################################################
# aka convenient things that are not builtin to pytorch
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
return str(self.avg)
def tuple_getter(tensor, idx_tuple):
""" access a tensor by a tuple """
tensor_ = tensor
for el in idx_tuple:
tensor_ = tensor_[el]
return tensor_
def tuple_setter(tensor, idx_tuple, val):
""" Sets a tensor element while indexing by a tuple"""
tensor_ = tensor
for el in idx_tuple[:-1]:
tensor_ = tensor_[el]
tensor_[idx_tuple[-1]] = val
return tensor
def torch_argmax(tensor):
""" Returns the idx tuple that corresponds to the max value in the tensor"""
flat_tensor = tensor.view(tensor.numel())
_, argmax = flat_tensor.max(0)
return np.unravel_index(int(argmax), tensor.shape)
def torch_argmin(tensor):
""" Returns the idx tuple that corresponds to the min value in the tensor"""
flat_tensor = tensor.view(tensor.numel())
_, argmin = flat_tensor.min(0)
return np.unravel_index(int(argmin), tensor.shape)
def clamp_ref(x, y, l_inf):
""" Clamps each element of x to be within l_inf of each element of y """
return torch.clamp(x - y, -l_inf, l_inf) + y
def torch_arctanh(x, eps=1e-6):
x *= (1. - eps)
return (torch.log((1 + x) / (1 - x))) * 0.5
def tanh_rescale(x, x_min=-1., x_max=1.):
return (torch.tanh(x) + 1) * 0.5 * (x_max - x_min) + x_min
def checkpoint_incremental_array(output_file, numpy_list,
return_concat=True):
""" Takes in a string of a filename and a list of numpy arrays and
concatenates them along first axis, saves them to a file, and then
outputs a list containing only that single concatenated array
ARGS:
output_file : string ending in .npy - full path location of the
place we're saving this numpy array
numpy_list : list of numpy arrays (all same shape except for the first
axis) - list of arrays we concat and save to file
return_concat : boolean - if True, we return these concatenated arrays
in a list, else we return nothing
RETURNS:
maybe nothing, maybe the a singleton list containing the concatenated
arrays
"""
concat = np.concatenate(numpy_list, axis=0)
np.save(output_file, concat)
if return_concat:
return [concat]
def sizeof_fmt(num, suffix='B'):
""" https://stackoverflow.com/a/1094933
answer by Sridhar Ratnakumar """
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def clip_0_1(tensorlike):
# Clips tensorlike object into [0., 1.0] range
return torch.clamp(tensorlike, 0.0, 1.0)
def clamp_0_1_delta(x, y):
""" Returns the delta that'd have to be added to (x + y) such that
(x + y) + delta is in the range [0.0, 1.0]
"""
return torch.clamp(x + y, 0.0, 1.0) - (x + y)
def random_linf_pertubation(examples_like, l_inf):
""" Returns an object of the same type/shape as examples_like that holds
a uniformly random pertubation in the l_infinity box of l_inf.
NOTE THAT THIS DOES NOT ADD TO examples_like!
"""
is_var = isinstance(examples_like, Variable)
random_tensor = (torch.rand(*examples_like.shape) * l_inf * 2 -
torch.ones(*examples_like.shape) * l_inf)
random_tensor.type(type(examples_like))
if is_var:
return Variable(random_tensor)
else:
return random_tensor
def batchwise_norm(examples, lp, dim=0):
""" Returns the per-example norm of the examples, keeping along the
specified dimension.
e.g. if examples is NxCxHxW, applying this fxn with dim=0 will return a
N-length tensor with the lp norm of each example
ARGS:
examples : tensor or Variable - needs more than one dimension
lp : string or int - either 'inf' or an int for which lp norm we use
dim : int - which dimension to keep
RETURNS:
1D object of same type as examples, but with shape examples.shape[dim]
"""
assert isinstance(lp, int) or lp == 'inf'
examples = torch.abs(examples)
example_dim = examples.dim()
if dim != 0:
examples = examples.transpose(dim, 0)
if lp == 'inf':
for reduction in range(1, example_dim):
examples, _ = examples.max(1)
return examples
else:
examples = torch.pow(examples + 1e-10, lp)
for reduction in range(1, example_dim):
examples = examples.sum(1)
return torch.pow(examples, 1.0 / lp)
def batchwise_lp_project(x, lp, lp_bound, dim=0):
""" Projects x (a N-by-(...) TENSOR) to be a N-by-(...) TENSOR into the
provided lp ball
ARGS:
x : Tensor (N-by-(...)) - arbitrary style
lp : 'inf' or int - which style of lp we use
lp_bound : float - size of lp ball we project into
dim : int - if not 0 is the dimension we keep and project onto
RETURNS:
None
"""
assert isinstance(lp, int) or lp == 'inf'
if lp == 'inf':
return torch.clamp(x, -lp_bound, lp_bound)
needs_squeeze = False
if len(x.shape) == 1:
x = x.unsqueeze(1)
needs_squeeze = True
output = torch.renorm(x, lp, dim, lp_bound)
if needs_squeeze:
return output.squeeze()
return output
def summed_lp_norm(examples, lp):
""" Returns the sum of the lp norm of each example in examples
ARGS:
examples : tensor or Variable, with first dimension having size N
lp : string or int - either 'inf' or an int for which lp norm we use
RETURNS:
sum of each of the lp norm of each of the N elements in examples
"""
return torch.sum(batchwise_norm(examples, lp, dim=0))
def random_from_lp_ball(tensorlike, lp, lp_bound, dim=0):
""" Returns a new object of the same type/shape as tensorlike that is
randomly samples from the unit ball.
NOTE THIS IS NOT A UNIFORM SAMPLING METHOD!
(that's hard to implement, https://mathoverflow.net/a/9192/123034)
ARGS:
tensorlike : Tensor - reference object for which we generate
a new object of same shape/memory_location
lp : int or 'inf' - which style of lp we use
lp_bound : float - size of the L
dim : int - which dimension is the 'keep dimension'
RETURNS:
new tensorlike where each slice across dim is uniform across the
lp ball of size lp_bound
"""
assert isinstance(lp, int) or lp == 'inf'
rand_direction = torch.rand(tensorlike.shape).type(tensorlike.type())
if lp == 'inf':
return rand_direction * (2 * lp_bound) - lp_bound
else:
rand_direction = rand_direction - 0.5 # allow for sign swapping
# first magnify such that each element is above the ball
min_norm = torch.min(batchwise_norm(rand_direction.abs(), lp, dim=dim))
rand_direction = rand_direction / (min_norm + 1e-6)
rand_magnitudes = torch.rand(tensorlike.shape[dim]).type(
tensorlike.type())
rand_magnitudes = rand_magnitudes.unsqueeze(1)
rand_magnitudes = rand_magnitudes.expand(*rand_direction.shape)
return torch.renorm(rand_direction, lp, dim, lp_bound) * rand_magnitudes
def tanh_transform(tensorlike, forward=True):
""" Takes in Tensor or Variable and converts it between [0, 1] range and
(-inf, +inf) range by performing an invertible tanh transformation.
ARGS:
tensorlike : Tensor or Variable (arbitrary shape) - object to be
modified into or out of tanh space
forward : bool - if True we convert from [0, 1] space to (-inf, +inf)
space
if False we convert from (-inf, +inf) space to [0, 1]
space
RETURNS:
object of the same shape/type as tensorlike, but with the appropriate
transformation
"""
if forward:
assert torch.min(tensorlike) >= 0.0
assert torch.max(tensorlike) <= 1.0
# first convert to [-1, +1] space
temp = (tensorlike * 2 - 1) * (1 - 1e-6)
return torch.log((1 + temp) / (1 - temp)) / 2.0
else:
return (torch.tanh(tensorlike) + 1) / 2.0
def fold_mask(x, y, mask):
""" Creates a new tensor that's the result of masking between x and y
ARGS:
x : Tensor or Variable (NxSHAPE) - tensor that we're selecting where the
masked values are 1
y : Tensor or Variable (NxSHAPE) - tensor that we're selecting where the
masked values are 0
mask: ByteTensor (N) - masked values. Is only one dimensional: we expand
it in the creation of this
RETURNS:
new object of the same shape/type as x and y
"""
assert x.shape == y.shape
assert mask.shape == (x.shape[0],)
assert type(x) == type(y)
is_var = isinstance(x, Variable)
if is_var:
assert isinstance(mask, Variable)
per_example_shape = x.shape[1:]
make_broadcastable = lambda m: m.view(-1, *tuple([1] * (x.dim() - 1)))
broadcast_mask = make_broadcastable(mask)
broadcast_not_mask = make_broadcastable(1 - safe_tensor(mask))
if is_var:
broadcast_not_mask = Variable(broadcast_not_mask)
output = torch.zeros_like(x)
output.add_(x * (broadcast_mask.type(x.type())))
output.add_(y * (broadcast_not_mask.type(y.type())))
return output
###############################################################################
# #
# CUDA RELATED THINGS #
# #
###############################################################################
# fxn taken from https://discuss.pytorch.org/t/memory-leaks-in-trans-conv/12492
def get_gpu_memory_map():
try:
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
])
except:
result = "<CAN'T GET GPU MEM>"
try:
return float(result)
except:
return result
def rough_gpu_estimate():
""" Roughly estimates the size of the cuda tensors stored on GPUs.
If multiple gpus, returns a dict of {GPU_id: total num elements }
otherwise just returns the total number of elements
"""
cuda_count = {}
listprod = lambda l: reduce(lambda x, y: x * y, l)
for el in gc.get_objects():
if isinstance(el, (torch.tensor._TensorBase, Variable)) and el.is_cuda:
device = el.get_device()
cuda_count[device] = (cuda_count.get(device, 0) +
listprod(el.size()))
if len(cuda_count.keys()) == 0:
return 0
elif len(cuda_count.keys()) == 1:
return sizeof_fmt(cuda_count.values()[0])
else:
return {k: sizeof_fmt(v) for k, v in cuda_count.items()}
##############################################################################
# #
# CLASSIFICATION HELPERS #
# #
##############################################################################
# aka little utils that are useful for classification
def accuracy_int(output, target, topk=1):
""" Computes the number of correct examples in the output.
RETURNS an int!
"""
_, pred = output.topk(topk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return int(correct.data.sum())
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
###############################################################################
# #
# NORMALIZERS #
# #
###############################################################################
class IdentityNormalize(Function):
def __init__(self):
pass
def forward(self, var):
return var
def differentiable_call(self):
pass
class DifferentiableNormalize(Function):
def __init__(self, mean, std):
super(DifferentiableNormalize, self).__init__()
self.mean = mean
self.std = std
self.differentiable = True
self.nondiff_normer = transforms.Normalize(mean, std)
def __call__(self, var):
if self.differentiable:
return self.forward(var)
else:
return self.nondiff_normer(var)
def _setter(self, c, mean, std):
""" Modifies params going forward """
if mean is not None:
self.mean = mean
assert len(self.mean) == c
if std is not None:
self.std = std
assert len(self.std) == c
if mean is not None or std is not None:
self.nondiff_normer = transforms.Normalize(self.mean, self.std)
def differentiable_call(self):
""" Sets the __call__ method to be the differentiable version """
self.differentiable = True
def nondifferentiable_call(self):
""" Sets the __call__ method to be the torchvision.transforms version"""
self.differentiable = False
def forward(self, var, mean=None, std=None):
""" Normalizes var by subtracting the mean of each channel and then
dividing each channel by standard dev
ARGS:
self - stores mean and std for later
var - Variable of shape NxCxHxW
mean - if not None is a list of length C for channel-means
std - if not None is a list of length C for channel-stds
RETURNS:
variable of normalized var
"""
c = var.shape[1]
self._setter(c, mean, std)
mean_var = Variable(var.data.new(self.mean).view(1, c, 1, 1))
std_var = Variable(var.data.new(self.std).view(1, c, 1, 1))
return (var - mean_var) / std_var
##############################################################################
# #
# TRAINING LOGGER #
# #
##############################################################################
class TrainingLogger(object):
def __init__(self):
""" Unified object to keep track of training data at a specified logging
level. Namely this tracks ground accuracy, loss and attack accuracy
for each attack incorporated into adversarial training.
Will ultimately contain plotting techniques too (TODO!)
"""
self.series = {}
def data_count(self):
""" Returns the number of data points in this logger instance """
return sum(len(_) for _ in self.series.values())
def add_series(self, name):
""" Adds the name of a 'data series' where each data series is a list
of data-entries, where each data-entry is of the form
((epoch, minibatch), data-value ) [and data-value is a float]
"""
if name not in self.series:
self.series[name] = []
def sort_series(self, name, return_keys=False):
""" Simply returns the series of specified name sorted by epoch and then
minibatch.
ARGS:
name: string - name of exsiting series in self.series
return_keys: bool - if True, the output list is like
[((epoch, minibatch), val), ...]
and if False, it's just like [val, ... val...]
RETURNS:
sorted list of outputs, the exact form of which is determined by
the value of return_keys
"""
data_series = self.series[name]
sorted_series = sorted(data_series, key=lambda p: p[0])
if return_keys is False:
return [_[1] for _ in sorted_series]
else:
return sorted_series
def get_series(self, name):
""" simple getter method for the given named data series """
return self.series[name]
def log_datapoint(self, name, data_tuple):
""" Logs the full data point
ARGS:
name: string - name of existing series in self.series
data_tuple : tuple of form ((epoch, minibatch), value)
RETURNS:
None
"""
self.series[name].append(data_tuple)
def log(self, name, epoch, minibatch, value):
""" Logs the data point by specifying each of epoch, minibatch, value
ARGS:
name : string - name of existing series in self.series
epoch: int - which epoch of training we're logging
minibatch : int - which minibatch of training we're logging
value : <unspecified, but preferably float> - value we're logging
"""
self.log_datapoint(name, ((epoch, minibatch), value))
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/discretization.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
""" File that holds techniques for discretizing images --
In general, images of the form NxCxHxW will with values in the [0.,1.] range
need to be converted to the [0, 255 (int)] range to be displayed as images.
Sometimes the naive rounding scheme can mess up the classification, so this
file holds techniques to discretize these images into tensors with values
of the form i/255.0 for some integers i.
"""
import torch
from torch.autograd import Variable
from . import pytorch_utils as utils
##############################################################################
# #
# HELPER METHODS #
# #
##############################################################################
def discretize_image(img_tensor, zero_one=False):
""" Discretizes an image tensor into a tensor filled with ints ranging
between 0 and 255
ARGS:
img_tensor : floatTensor (NxCxHxW) - tensor to be discretized
pixel_max : int - discretization bucket size
zero_one : bool - if True divides output by 255 before returning it
"""
assert float(torch.min(img_tensor)) >= 0.
assert float(torch.max(img_tensor)) <= 1.0
original_shape = img_tensor.shape
if img_tensor.dim() != 4:
img_tensor = img_tensor.unsqueeze(0)
int_tensors = [] # actually floatTensor, but full of ints
img_shape = original_shape[1:]
for example in img_tensor:
pixel_channel_tuples = zip(*list(smp.toimage(example).getdata()))
int_tensors.append(img_tensor.new(pixel_channel_tuples).view(img_shape))
stacked_tensors = torch.stack(int_tensors)
if zero_one:
return stacked_tensors / 255.0
return stacked_tensors
##############################################################################
# #
# MAIN DISCRETIZATION TECHNIQUES #
# #
##############################################################################
def discretized_adversarial(img_tensor, classifier_net, normalizer,
flavor='greedy'):
""" Takes in an image_tensor and classifier/normalizer pair and outputs a
'discretized' image_tensor [each val is i/255.0 for some integer i]
with the same classification
ARGS:
img_tensor : tensor (NxCxHxW) - tensor of images with values between
0.0 and 1.0.
classifier_net : NN - neural net with .forward method to classify
normalized images
normalizer : differentiableNormalizer object - normalizes 0,1 images
into classifier_domain
flavor : string - either 'random' or 'greedy', determining which
'next_pixel_to_flip' function we use
RETURNS:
img_tensor of the same shape, but no with values of the form i/255.0
for integers i.
"""
img_tensor = utils.safe_tensor(img_tensor)
nptf_map = {'random': flip_random_pixel,
'greedy': flip_greedy_pixel}
next_pixel_to_flip = nptf_map[flavor](classifier_net, normalizer)
##########################################################################
# First figure out 'correct' labels and the 'discretized' labels #
##########################################################################
var_img = utils.safe_var(img_tensor)
norm_var = normalizer.forward(var_img)
norm_output = classifier_net.forward(norm_var)
correct_targets = norm_output.max(1)[1]
og_discretized = utils.safe_var(discretize_image(img_tensor, zero_one=True))
norm_discretized = normalizer.forward(og_discretized)
discretized_output = classifier_net.forward(norm_discretized)
discretized_targets = discretized_output.max(1)[1]
##########################################################################
# Collect idxs for examples affected by discretization #
##########################################################################
incorrect_idxs = set()
for i, el in enumerate(correct_targets.ne(discretized_targets)):
if float(el) != 0:
incorrect_idxs.add(i)
##########################################################################
# Fix all bad images #
##########################################################################
corrected_imgs = []
for idx in incorrect_idxs:
desired_target = correct_targets[idx]
example = og_discretized[idx].data.clone() # tensor
signs = torch.sign(var_img - og_discretized)
bad_discretization = True
pixels_changed_so_far = set() # populated with tuples of idxs
while bad_discretization:
pixel_idx, grad_sign = next_pixel_to_flip(example,
pixels_changed_so_far,
desired_target)
pixels_changed_so_far.add(pixel_idx)
if grad_sign == 0:
grad_sign = utils.tuple_getter(signs[idx], pixel_idx)
new_val = (grad_sign / 255. + utils.tuple_getter(example, pixel_idx))
utils.tuple_setter(example, pixel_idx, float(new_val))
new_out = classifier_net.forward(normalizer.forward( \
Variable(example.unsqueeze(0))))
bad_discretization = (int(desired_target) != int(new_out.max(1)[1]))
corrected_imgs.append(example)
# Stack up results
output = []
for idx in range(len(img_tensor)):
if idx in incorrect_idxs:
output.append(corrected_imgs.pop(0))
else:
output.append(og_discretized[idx].data)
return torch.stack(output) # Variable
#############################################################################
# #
# FLIP TECHNIQUES #
# #
#############################################################################
''' Flip techniques in general have the following specs:
ARGS:
classifier_net : NN - neural net with .forward method to classify
normalized images
normalizer : differentiableNormalizer object - normalizes 0,1 images
into classifier_domain
RETURNS: flip_function
'''
'''
Flip function is a function that takes the following args:
ARGS:
img_tensor : Tensor (CxHxW) - image tensor in range 0.0 to 1.0 and is
already discretized
pixels_changed_so_far: set - set of index_tuples that have already been
modified (we don't want to modify a pixel by
more than 1/255 in any channel)
correct_target : torch.LongTensor (1) - single element in a tensor that
is the target class
(e.g. int between 0 and 9 for CIFAR )
RETURNS: (idx_tuple, sign)
index_tuple is a triple of indices indicating which pixel-channel needs
to be modified, and sign is in {-1, 0, 1}. If +-1, we will modify the
pixel-channel in that direction, otherwise we'll modify in the opposite
of the direction that discretization rounded to.
'''
def flip_random_pixel(classifier_net, normalizer):
def flip_fxn(img_tensor, pixels_changed_so_far, correct_target):
numel = img_tensor.numel()
if len(pixels_changed_so_far) > numel * .9:
raise Exception("WHAT IS GOING ON???")
while True:
pixel_idx, _ = utils.random_element_index(img_tensor)
if pixel_idx not in pixels_changed_so_far:
return pixel_idx, 0
return flip_fxn
def flip_greedy_pixel(classifier_net, normalizer):
def flip_fxn(img_tensor, pixels_changed_so_far, correct_target,
classifier_net=classifier_net, normalizer=normalizer):
# Computes gradient and figures out which px most affects class_out
classifier_net.zero_grad()
img_var = Variable(img_tensor.unsqueeze(0), requires_grad=True)
class_out = classifier_net.forward(normalizer.forward(img_var))
criterion = torch.nn.CrossEntropyLoss()
loss = criterion(class_out, correct_target) # RESHAPE HERE
loss.backward()
# Really inefficient algorithm here, can probably do better
new_grad_data = img_var.grad.data.clone().squeeze()
signs = new_grad_data.sign()
for idx_tuple in pixels_changed_so_far:
utils.tuple_setter(new_grad_data, idx_tuple, 0)
argmax = utils.torch_argmax(new_grad_data.abs())
return argmax, -1 * utils.tuple_getter(signs, argmax)
return flip_fxn
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/__init__.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/checkpoints.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
""" Code for saving/loading pytorch models and batches of adversarial images
CHECKPOINT NAMING CONVENTIONS:
<unique_experiment_name>.<architecture_abbreviation>.<6 digits of epoch number>path.tar
e.g.
fgsm_def.resnet32.20180301.120000.path.tar
All checkpoints are stored in CHECKPOINT_DIR
Checkpoints are state dicts only!!!
"""
import torch
import os
import re
import glob
from .. import config
import numpy as np
import random
CHECKPOINT_DIR = config.MODEL_PATH
OUTPUT_IMAGE_DIR = config.OUTPUT_IMAGE_PATH
##############################################################################
# #
# CHECKPOINTING MODELS #
# #
##############################################################################
def clear_experiment(experiment_name, architecture):
""" Deletes all saved state dicts for an experiment/architecture pair """
for filename in params_to_filename(experiment_name, architecture):
full_path = os.path.join(*[CHECKPOINT_DIR, filename])
os.remove(full_path) if os.path.exists(full_path) else None
def list_saved_epochs(experiment_name, architecture):
""" Returns a list of int epochs we've checkpointed for this
experiment name and architecture
"""
extract_epoch = lambda f: int(f.split('.')[-3])
filename_list = params_to_filename(experiment_name, architecture)
return [extract_epoch(f) for f in filename_list]
def params_to_filename(experiment_name, architecture, epoch_val=None):
""" Outputs string name of file.
ARGS:
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int/(intLo, intHi)/None -
- if int we return this int exactly
- if (intLo, intHi) we return all existing filenames with
highest epoch in range (intLo, intHi), in sorted order
- if None, we return all existing filenames with params
in ascending epoch-sorted order
RETURNS:
filenames: string or (possibly empty) string[] of just the base name
of saved models
"""
if isinstance(epoch_val, int):
return '.'.join([experiment_name, architecture, '%06d' % epoch_val,
'path', 'tar'])
glob_prefix = os.path.join(*[CHECKPOINT_DIR,
'%s.%s.*' % (experiment_name, architecture)])
re_prefix = '%s\.%s\.' % (experiment_name, architecture)
re_suffix = r'\.path\.tar'
valid_name = lambda f: bool(re.match(re_prefix + r'\d{6}' + re_suffix, f))
select_epoch = lambda f: int(re.sub(re_prefix, '',
re.sub(re_suffix, '', f)))
valid_epoch = lambda e: (e >= (epoch_val or (0, 0))[0] and
e <= (epoch_val or (0, float('inf')))[1])
filename_epoch_pairs = []
for full_path in glob.glob(glob_prefix):
filename = os.path.basename(full_path)
if not valid_name(filename):
continue
epoch = select_epoch(filename)
if valid_epoch(epoch):
filename_epoch_pairs.append((filename, epoch))
return [_[0] for _ in sorted(filename_epoch_pairs, key=lambda el: el[1])]
def save_state_dict(experiment_name, architecture, epoch_val, model,
k_highest=10):
""" Saves the state dict of a model with the given parameters.
ARGS:
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int - which epoch we're saving
model : model - object we're saving the state dict of
k_higest : int - if not None, we make sure to not include more than
k state_dicts for (experiment_name, architecture) pair,
keeping the k-most recent if we overflow
RETURNS:
The model we saved
"""
# First resolve THIS filename
this_filename = params_to_filename(experiment_name, architecture, epoch_val)
# Next clear up memory if too many state dicts
current_filenames = params_to_filename(experiment_name, architecture)
delete_els = []
if k_highest is not None:
num_to_delete = len(current_filenames) - k_highest + 1
if num_to_delete > 0:
delete_els = sorted(current_filenames)[:num_to_delete]
for delete_el in delete_els:
full_path = os.path.join(*[CHECKPOINT_DIR, delete_el])
os.remove(full_path) if os.path.exists(full_path) else None
# Finally save the state dict
torch.save(model.state_dict(), os.path.join(*[CHECKPOINT_DIR,
this_filename]))
return model
def load_state_dict_from_filename(filename, model):
""" Skips the whole parameter argument thing and just loads the whole
state dict from a filename.
ARGS:
filename : string - filename without directories
model : nn.Module - has 'load_state_dict' method
RETURNS:
the model loaded with the weights contained in the file
"""
assert len(glob.glob(os.path.join(*[CHECKPOINT_DIR, filename]))) == 1
# LOAD FILENAME
# If state_dict in keys, use that as the loader
right_dict = lambda d: d.get('state_dict', d)
model.load_state_dict(right_dict(torch.load(
os.path.join(*[CHECKPOINT_DIR, filename]))))
return model
def load_state_dict(experiment_name, architecture, epoch, model):
""" Loads a checkpoint that was previously saved
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int - which epoch we're loading
"""
filename = params_to_filename(experiment_name, architecture, epoch)
return load_state_dict_from_filename(filename, model)
###############################################################################
# #
# CHECKPOINTING DATA #
# #
###############################################################################
"""
This is a hacky fix to save batches of adversarial images along with their
labels.
"""
class CustomDataSaver(object):
# TODO: make this more pytorch compliant
def __init__(self, image_subdirectory):
self.image_subdirectory = image_subdirectory
# make this folder if it doesn't exist yet
def save_minibatch(self, examples, labels):
""" Assigns a random name to this minibatch and saves the examples and
labels in two separate files:
<random_name>.examples.npy and <random_name>.labels.npy
ARGS:
examples: Variable or Tensor (NxCxHxW) - examples to be saved
labels : Variable or Tensor (N) - labels matching the examples
"""
# First make both examples and labels into numpy arrays
examples = examples.cpu().numpy()
labels = labels.cpu().numpy()
# Make a name for the files
random_string = str(random.random())[2:] # DO THIS BETTER WHEN I HAVE INTERNET
# Save both files
example_file = '%s.examples.npy' % random_string
example_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
example_file)
np.save(example_path, examples)
label_file = '%s.labels.npy' % random_string
label_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
label_file)
np.save(label_path, labels)
class CustomDataLoader(object):
# TODO: make this more pytorch compliant
def __init__(self, image_subdirectory, batch_size=128, to_tensor=True,
use_gpu=False):
super(CustomDataLoader, self).__init__()
self.image_subdirectory = image_subdirectory
self.batch_size = batch_size
assert to_tensor >= use_gpu
self.to_tensor = to_tensor
self.use_gpu = use_gpu
def _prepare_data(self, examples, labels):
""" Takes in numpy examples and labels and tensor-ifies and cuda's them
if necessary
"""
if self.to_tensor:
examples = torch.Tensor(examples)
labels = torch.Tensor(labels)
if self.use_gpu:
examples = examples.cuda()
labels = labels.cuda()
return (examples, labels)
def _base_loader(self, prefix, which):
assert which in ['examples', 'labels']
filename = '%s.%s.npy' % (prefix, which)
full_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
filename)
return np.load(full_path)
def _example_loader(self, prefix):
""" Loads the numpy array of examples given the random 'prefix' """
return self._base_loader(prefix, 'examples')
def _label_loader(self, prefix):
""" Loads the numpy array of labels given the random 'prefix' """
return self._base_loader(prefix, 'labels')
def __iter__(self):
# First collect all the filenames:
glob_prefix = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
'*')
files = glob.glob(glob_prefix)
valid_random_names = set(os.path.basename(_).split('.')[0]
for _ in files)
# Now loop through filenames and yield out minibatches of correct size
running_examples, running_labels = [], []
running_size = 0
for random_name in valid_random_names:
# Load data from files and append to 'running' lists
loaded_examples = self._example_loader(random_name)
loaded_labels = self._label_loader(random_name)
running_examples.append(loaded_examples)
running_labels.append(loaded_labels)
running_size += loaded_examples.shape[0]
if running_size < self.batch_size:
# Load enough data to populate one minibatch, which might
# take multiple files
continue
# Concatenate all images together
merged_examples = np.concatenate(running_examples, axis=0)
merged_labels = np.concatenate(running_labels, axis=0)
# Make minibatches out of concatenated things,
for batch_no in range(running_size // self.batch_size):
index_lo = batch_no * self.batch_size
index_hi = index_lo + self.batch_size
example_batch = merged_examples[index_lo:index_hi]
label_batch = merged_labels[index_lo:index_hi]
yield self._prepare_data(example_batch, label_batch)
# Handle any remainder for remaining files
remainder_idx = (running_size // self.batch_size) * self.batch_size
running_examples = [merged_examples[remainder_idx:]]
running_labels = [merged_labels[remainder_idx:]]
running_size = running_size - remainder_idx
# If we're out of files, yield this last sub-minibatch of data
if running_size > 0:
merged_examples = np.concatenate(running_examples, axis=0)
merged_labels = np.concatenate(running_labels, axis=0)
yield self._prepare_data(merged_examples, merged_labels)
|
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from recoloradv.
#
# Source:
# https://github.com/cassidylaidlaw/ReColorAdv/blob/master/recoloradv/mister_ed/utils/pytorch_ssim.py
#
# The license for the original version of this file can be
# found in the `recoloradv` directory (LICENSE_RECOLORADV).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
""" Implementation directly lifted from Po-Hsun-Su for pytorch ssim
See github repo here: https://github.com/Po-Hsun-Su/pytorch-ssim
"""
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average) |
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/respace.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, **kwargs)
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/resample.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
from abc import ABC, abstractmethod
import numpy as np
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/image_datasets.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
import math
import random
from PIL import Image
import blobfile as bf
from mpi4py import MPI
import numpy as np
from torch.utils.data import DataLoader, Dataset
def load_data(
*,
data_dir,
batch_size,
image_size,
class_cond=False,
deterministic=False,
random_crop=False,
random_flip=True,
):
"""
For a dataset, create a generator over (images, kwargs) pairs.
Each images is an NCHW float tensor, and the kwargs dict contains zero or
more keys, each of which map to a batched Tensor of their own.
The kwargs dict can be used for class labels, in which case the key is "y"
and the values are integer tensors of class labels.
:param data_dir: a dataset directory.
:param batch_size: the batch size of each returned pair.
:param image_size: the size to which images are resized.
:param class_cond: if True, include a "y" key in returned dicts for class
label. If classes are not available and this is true, an
exception will be raised.
:param deterministic: if True, yield results in a deterministic order.
:param random_crop: if True, randomly crop the images for augmentation.
:param random_flip: if True, randomly flip the images for augmentation.
"""
if not data_dir:
raise ValueError("unspecified data directory")
all_files = _list_image_files_recursively(data_dir)
classes = None
if class_cond:
# Assume classes are the first part of the filename,
# before an underscore.
class_names = [bf.basename(path).split("_")[0] for path in all_files]
sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}
classes = [sorted_classes[x] for x in class_names]
dataset = ImageDataset(
image_size,
all_files,
classes=classes,
shard=MPI.COMM_WORLD.Get_rank(),
num_shards=MPI.COMM_WORLD.Get_size(),
random_crop=random_crop,
random_flip=random_flip,
)
if deterministic:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True
)
else:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True
)
while True:
yield from loader
def _list_image_files_recursively(data_dir):
results = []
for entry in sorted(bf.listdir(data_dir)):
full_path = bf.join(data_dir, entry)
ext = entry.split(".")[-1]
if "." in entry and ext.lower() in ["jpg", "jpeg", "png", "gif"]:
results.append(full_path)
elif bf.isdir(full_path):
results.extend(_list_image_files_recursively(full_path))
return results
class ImageDataset(Dataset):
def __init__(
self,
resolution,
image_paths,
classes=None,
shard=0,
num_shards=1,
random_crop=False,
random_flip=True,
):
super().__init__()
self.resolution = resolution
self.local_images = image_paths[shard:][::num_shards]
self.local_classes = None if classes is None else classes[shard:][::num_shards]
self.random_crop = random_crop
self.random_flip = random_flip
def __len__(self):
return len(self.local_images)
def __getitem__(self, idx):
path = self.local_images[idx]
with bf.BlobFile(path, "rb") as f:
pil_image = Image.open(f)
pil_image.load()
pil_image = pil_image.convert("RGB")
if self.random_crop:
arr = random_crop_arr(pil_image, self.resolution)
else:
arr = center_crop_arr(pil_image, self.resolution)
if self.random_flip and random.random() < 0.5:
arr = arr[:, ::-1]
arr = arr.astype(np.float32) / 127.5 - 1
out_dict = {}
if self.local_classes is not None:
out_dict["y"] = np.array(self.local_classes[idx], dtype=np.int64)
return np.transpose(arr, [2, 0, 1]), out_dict
def center_crop_arr(pil_image, image_size):
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * image_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = image_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = (arr.shape[0] - image_size) // 2
crop_x = (arr.shape[1] - image_size) // 2
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * smaller_dim_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = smaller_dim_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = random.randrange(arr.shape[0] - image_size + 1)
crop_x = random.randrange(arr.shape[1] - image_size + 1)
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/nn.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
Various utilities for neural networks.
"""
import math
import torch as th
import torch.nn as nn
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * th.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(th.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with th.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with th.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = th.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/unet.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
from abc import abstractmethod
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
use_checkpoint=False,
use_new_attention_order=False,
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.use_checkpoint = use_checkpoint
self.norm = normalization(channels)
self.qkv = conv_nd(1, channels, channels * 3, 1)
if use_new_attention_order:
# split qkv before split heads
self.attention = QKVAttention(self.num_heads)
else:
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
def forward(self, x):
return checkpoint(self._forward, (x,), self.parameters(), True)
def _forward(self, x):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
h = self.attention(qkv)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial ** 2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, length),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
ch = input_ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=int(model_channels * mult),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(model_channels * mult)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(self, x, timesteps, y=None):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
hs.append(h)
h = self.middle_block(h, emb)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb)
h = h.type(x.dtype)
return self.out(h)
class SuperResModel(UNetModel):
"""
A UNetModel that performs super-resolution.
Expects an extra kwarg `low_res` to condition on a low-resolution image.
"""
def __init__(self, image_size, in_channels, *args, **kwargs):
super().__init__(image_size, in_channels * 2, *args, **kwargs)
def forward(self, x, timesteps, low_res=None, **kwargs):
_, _, new_height, new_width = x.shape
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
x = th.cat([x, upsampled], dim=1)
return super().forward(x, timesteps, **kwargs)
class EncoderUNetModel(nn.Module):
"""
The half UNet model with attention and timestep embedding.
For usage, see UNet.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
pool="adaptive",
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.pool = pool
if pool == "adaptive":
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.AdaptiveAvgPool2d((1, 1)),
zero_module(conv_nd(dims, ch, out_channels, 1)),
nn.Flatten(),
)
elif pool == "attention":
assert num_head_channels != -1
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
AttentionPool2d(
(image_size // ds), ch, num_head_channels, out_channels
),
)
elif pool == "spatial":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
nn.ReLU(),
nn.Linear(2048, self.out_channels),
)
elif pool == "spatial_v2":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
normalization(2048),
nn.SiLU(),
nn.Linear(2048, self.out_channels),
)
else:
raise NotImplementedError(f"Unexpected {pool} pooling")
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
def forward(self, x, timesteps):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:return: an [N x K] Tensor of outputs.
"""
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
results = []
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = self.middle_block(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = th.cat(results, axis=-1)
return self.out(h)
else:
h = h.type(x.dtype)
return self.out(h)
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/script_util.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
import argparse
import inspect
from . import gaussian_diffusion as gd
from .respace import SpacedDiffusion, space_timesteps
from .unet import SuperResModel, UNetModel, EncoderUNetModel
NUM_CLASSES = 1000
def diffusion_defaults():
"""
Defaults for image and classifier training.
"""
return dict(
learn_sigma=False,
diffusion_steps=1000,
noise_schedule="linear",
timestep_respacing="",
use_kl=False,
predict_xstart=False,
rescale_timesteps=False,
rescale_learned_sigmas=False,
)
def classifier_defaults():
"""
Defaults for classifier models.
"""
return dict(
image_size=64,
classifier_use_fp16=False,
classifier_width=128,
classifier_depth=2,
classifier_attention_resolutions="32,16,8", # 16
classifier_use_scale_shift_norm=True, # False
classifier_resblock_updown=True, # False
classifier_pool="attention",
)
def model_and_diffusion_defaults():
"""
Defaults for image training.
"""
res = dict(
image_size=64,
num_channels=128,
num_res_blocks=2,
num_heads=4,
num_heads_upsample=-1,
num_head_channels=-1,
attention_resolutions="16,8",
channel_mult="",
dropout=0.0,
class_cond=False,
use_checkpoint=False,
use_scale_shift_norm=True,
resblock_updown=False,
use_fp16=False,
use_new_attention_order=False,
)
res.update(diffusion_defaults())
return res
def classifier_and_diffusion_defaults():
res = classifier_defaults()
res.update(diffusion_defaults())
return res
def create_model_and_diffusion(
image_size,
class_cond,
learn_sigma,
num_channels,
num_res_blocks,
channel_mult,
num_heads,
num_head_channels,
num_heads_upsample,
attention_resolutions,
dropout,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
use_checkpoint,
use_scale_shift_norm,
resblock_updown,
use_fp16,
use_new_attention_order,
):
model = create_model(
image_size,
num_channels,
num_res_blocks,
channel_mult=channel_mult,
learn_sigma=learn_sigma,
class_cond=class_cond,
use_checkpoint=use_checkpoint,
attention_resolutions=attention_resolutions,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
dropout=dropout,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
use_new_attention_order=use_new_attention_order,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return model, diffusion
def create_model(
image_size,
num_channels,
num_res_blocks,
channel_mult="",
learn_sigma=False,
class_cond=False,
use_checkpoint=False,
attention_resolutions="16",
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
dropout=0,
resblock_updown=False,
use_fp16=False,
use_new_attention_order=False,
):
if channel_mult == "":
if image_size == 512:
channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
elif image_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif image_size == 128:
channel_mult = (1, 1, 2, 3, 4)
elif image_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported image size: {image_size}")
else:
channel_mult = tuple(int(ch_mult) for ch_mult in channel_mult.split(","))
attention_ds = []
for res in attention_resolutions.split(","):
attention_ds.append(image_size // int(res))
return UNetModel(
image_size=image_size,
in_channels=3,
model_channels=num_channels,
out_channels=(3 if not learn_sigma else 6),
num_res_blocks=num_res_blocks,
attention_resolutions=tuple(attention_ds),
dropout=dropout,
channel_mult=channel_mult,
num_classes=(NUM_CLASSES if class_cond else None),
use_checkpoint=use_checkpoint,
use_fp16=use_fp16,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
resblock_updown=resblock_updown,
use_new_attention_order=use_new_attention_order,
)
def create_classifier_and_diffusion(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
learn_sigma,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
):
classifier = create_classifier(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return classifier, diffusion
def create_classifier(
image_size,
classifier_use_fp16,
classifier_width,
classifier_depth,
classifier_attention_resolutions,
classifier_use_scale_shift_norm,
classifier_resblock_updown,
classifier_pool,
):
if image_size == 512:
channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
elif image_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif image_size == 128:
channel_mult = (1, 1, 2, 3, 4)
elif image_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported image size: {image_size}")
attention_ds = []
for res in classifier_attention_resolutions.split(","):
attention_ds.append(image_size // int(res))
return EncoderUNetModel(
image_size=image_size,
in_channels=3,
model_channels=classifier_width,
out_channels=1000,
num_res_blocks=classifier_depth,
attention_resolutions=tuple(attention_ds),
channel_mult=channel_mult,
use_fp16=classifier_use_fp16,
num_head_channels=64,
use_scale_shift_norm=classifier_use_scale_shift_norm,
resblock_updown=classifier_resblock_updown,
pool=classifier_pool,
)
def sr_model_and_diffusion_defaults():
res = model_and_diffusion_defaults()
res["large_size"] = 256
res["small_size"] = 64
arg_names = inspect.getfullargspec(sr_create_model_and_diffusion)[0]
for k in res.copy().keys():
if k not in arg_names:
del res[k]
return res
def sr_create_model_and_diffusion(
large_size,
small_size,
class_cond,
learn_sigma,
num_channels,
num_res_blocks,
num_heads,
num_head_channels,
num_heads_upsample,
attention_resolutions,
dropout,
diffusion_steps,
noise_schedule,
timestep_respacing,
use_kl,
predict_xstart,
rescale_timesteps,
rescale_learned_sigmas,
use_checkpoint,
use_scale_shift_norm,
resblock_updown,
use_fp16,
):
model = sr_create_model(
large_size,
small_size,
num_channels,
num_res_blocks,
learn_sigma=learn_sigma,
class_cond=class_cond,
use_checkpoint=use_checkpoint,
attention_resolutions=attention_resolutions,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
dropout=dropout,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
)
diffusion = create_gaussian_diffusion(
steps=diffusion_steps,
learn_sigma=learn_sigma,
noise_schedule=noise_schedule,
use_kl=use_kl,
predict_xstart=predict_xstart,
rescale_timesteps=rescale_timesteps,
rescale_learned_sigmas=rescale_learned_sigmas,
timestep_respacing=timestep_respacing,
)
return model, diffusion
def sr_create_model(
large_size,
small_size,
num_channels,
num_res_blocks,
learn_sigma,
class_cond,
use_checkpoint,
attention_resolutions,
num_heads,
num_head_channels,
num_heads_upsample,
use_scale_shift_norm,
dropout,
resblock_updown,
use_fp16,
):
_ = small_size # hack to prevent unused variable
if large_size == 512:
channel_mult = (1, 1, 2, 2, 4, 4)
elif large_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif large_size == 64:
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f"unsupported large size: {large_size}")
attention_ds = []
for res in attention_resolutions.split(","):
attention_ds.append(large_size // int(res))
return SuperResModel(
image_size=large_size,
in_channels=3,
model_channels=num_channels,
out_channels=(3 if not learn_sigma else 6),
num_res_blocks=num_res_blocks,
attention_resolutions=tuple(attention_ds),
dropout=dropout,
channel_mult=channel_mult,
num_classes=(NUM_CLASSES if class_cond else None),
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
resblock_updown=resblock_updown,
use_fp16=use_fp16,
)
def create_gaussian_diffusion(
*,
steps=1000,
learn_sigma=False,
sigma_small=False,
noise_schedule="linear",
use_kl=False,
predict_xstart=False,
rescale_timesteps=False,
rescale_learned_sigmas=False,
timestep_respacing="",
):
betas = gd.get_named_beta_schedule(noise_schedule, steps)
if use_kl:
loss_type = gd.LossType.RESCALED_KL
elif rescale_learned_sigmas:
loss_type = gd.LossType.RESCALED_MSE
else:
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
return SpacedDiffusion(
use_timesteps=space_timesteps(steps, timestep_respacing),
betas=betas,
model_mean_type=(
gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
),
model_var_type=(
(
gd.ModelVarType.FIXED_LARGE
if not sigma_small
else gd.ModelVarType.FIXED_SMALL
)
if not learn_sigma
else gd.ModelVarType.LEARNED_RANGE
),
loss_type=loss_type,
rescale_timesteps=rescale_timesteps,
)
def add_dict_to_argparser(parser, default_dict):
for k, v in default_dict.items():
v_type = type(v)
if v is None:
v_type = str
elif isinstance(v, bool):
v_type = str2bool
parser.add_argument(f"--{k}", default=v, type=v_type)
def args_to_dict(args, keys):
return {k: getattr(args, k) for k in keys}
def str2bool(v):
"""
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected")
|
# ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/__init__.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
Codebase for "Improved Denoising Diffusion Probabilistic Models".
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.