python_code
stringlengths 0
258k
|
---|
import torch
import typing
from contextlib import nullcontext
from torchbenchmark.util.model import BenchmarkModel
from torch_geometric.nn import GAT, GCN, GraphSAGE
import torch.nn.functional as F
from tqdm import tqdm
from pathlib import Path
from torch_geometric.loader import NeighborLoader
from torchbenchmark.util.framework.gnn.config import parse_tb_args
from typing import List
from torch import Tensor
models_dict = {
'gat': GAT,
'gcn': GCN,
'sage': GraphSAGE,
}
class GNNModel(BenchmarkModel):
# To recognize this is a GNN model
GNN_MODEL = True
# These two variables should be defined by subclasses
DEFAULT_TRAIN_BSIZE = None
DEFAULT_EVAL_BSIZE = None
# Default eval precision on CUDA device is fp16
DEFAULT_EVAL_CUDA_PRECISION = "fp16"
def __init__(self, model_name, test, device, jit=False, batch_size = None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
self.tb_args, self.extra_args = parse_tb_args(self.extra_args)
root = str(Path(__file__).parent.parent.parent.parent)
sparse = True if self.tb_args.graph_type == "sparse" else False
if sparse:
data = torch.load(f'{root}/data/.data/Reddit_minimal/sub_reddit_sparse.pt')
else:
data = torch.load(f'{root}/data/.data/Reddit_minimal/sub_reddit.pt')
print(data)
mask = None
sampler = None
kwargs = {
'batch_size': self.batch_size,
'shuffle': False,
'num_workers': 0,
}
self.subgraph_loader = NeighborLoader(
data,
num_neighbors=[-1], # layer-wise inference
input_nodes=mask,
sampler=sampler,
**kwargs,
)
Model = models_dict.get(model_name, None)
num_layers = 1
hidden_channels = 64
input_channels = data.num_features
out_channels = 41 # num_classes
if model_name == "gat":
num_heads = 2
self.model = Model(input_channels, hidden_channels, num_layers, out_channels, heads=num_heads)
else:
self.model = Model(input_channels, hidden_channels, num_layers, out_channels)
self.model = self.model.to(device)
tmp_example_inputs = []
tmp_example_outputs = []
self.num_batch = 0
for batch in self.subgraph_loader:
self.num_batch += 1
if hasattr(batch, 'adj_t'):
edge_index = batch.adj_t.to(device)
else:
edge_index = batch.edge_index.to(device)
tmp_example_inputs.append({"x": batch.x.to(device), "edge_index": edge_index})
tmp_example_outputs.append(batch.y.to(device))
self.example_inputs = tmp_example_inputs
self.example_outputs = tmp_example_outputs
if test == "train":
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.001)
self.model.train()
elif test == "eval":
self.model.eval()
self.amp_context = nullcontext
def get_module(self):
return self.model, self.example_inputs[0]
def train(self):
for batch_id in range(self.num_batch):
self.optimizer.zero_grad()
out = self.model(**self.example_inputs[batch_id])
loss = F.cross_entropy(out, self.example_outputs[batch_id])
loss.backward()
self.optimizer.step()
def eval(self) -> typing.Tuple[torch.Tensor]:
with self.amp_context():
xs: List[Tensor] = []
result = self.subgraph_loader.data.x.cpu()
for batch_id in range(self.num_batch):
x = self.model(**self.example_inputs[batch_id])
xs.append(x.cpu())
result = torch.cat(xs, dim=0)
return (result, )
|
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py
ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
from contextlib import suppress
import torch
from torch import nn
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.models import create_model
from timm.layers import convert_splitbn_model
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from timm.utils import NativeScaler
from timm.loss import JsdCrossEntropy, SoftTargetCrossEntropy, LabelSmoothingCrossEntropy
from timm.data import create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from .loader import create_fake_imagenet_dataset
def timm_instantiate_eval(args):
# create eval model
eval_model = create_model(
args.model_name,
pretrained=args.pretrained,
num_classes=args.num_classes,
in_chans=3,
global_pool=args.gp,
scriptable=args.torchscript)
data_config = resolve_data_config(vars(args), model=eval_model, use_test_size=True, verbose=True)
eval_model = eval_model.to(args.device)
# enable channels last layout if set
if args.channels_last:
eval_model = eval_model.to(memory_format=torch.channels_last)
if args.num_gpu > 1:
eval_model = torch.nn.DataParallel(eval_model, device_ids=list(range(args.num_gpu)))
crop_pct = data_config['crop_pct']
# create dataset
dataset_eval = create_fake_imagenet_dataset(size=args.eval_num_batch*args.eval_batch_size)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.eval_batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
pin_memory=args.pin_mem,
tf_preprocessing=args.tf_preprocessing,
persistent_workers=False,
)
return eval_model, loader_eval
def timm_instantiate_train(args):
# create train model
model = create_model(
args.model_name,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint)
data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
model = model.to(args.device)
# enable channels last layout if set
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
print(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
# setup optimizer
optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if args.use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
# setup distributed training
if args.distributed:
model = NativeDDP(model, device_ids=[args.local_rank], broadcast_buffers=not args.no_ddp_bb)
# NOTE: EMA model does not need to be wrapped by DDP
# setup learning rate schedule and starting epoch
lr_scheduler, _ = create_scheduler(args, optimizer)
# create fake imagenet dataset
fake_dataset = create_fake_imagenet_dataset(size=args.batch_size * args.train_num_batch)
dataset_train = fake_dataset
dataset_eval = fake_dataset
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeline
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
# Not supported by timm 0.4.12
# num_aug_repeats=args.aug_repeats,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader,
# Not supported by timm 0.4.12
# worker_seeding=args.worker_seeding,
persistent_workers=False,
)
loader_validate = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size or args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
persistent_workers=False,
)
# setup loss function
if args.jsd_loss:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing)
elif mixup_active:
# NOTE: the latest timm package (0.4.12) doesn't support BinaryCrossEntropy
# smoothing is handled with mixup target transform which outputs sparse, soft targets
# if args.bce_loss:
# train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh)
# else:
train_loss_fn = SoftTargetCrossEntropy()
elif args.smoothing:
# if args.bce_loss:
# train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing, target_threshold=args.bce_target_thresh)
# else:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = nn.CrossEntropyLoss()
train_loss_fn = train_loss_fn.to(args.device)
validate_loss_fn = nn.CrossEntropyLoss().to(args.device)
# return all the inputs needed by train and eval loop
return model, loader_train, loader_validate, optimizer, \
train_loss_fn, lr_scheduler, amp_autocast, \
loss_scaler, mixup_fn, validate_loss_fn
|
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py
ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
from torchvision.datasets.fakedata import FakeData
def create_fake_imagenet_dataset(size):
fakedata = FakeData(size=size)
return fakedata
|
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py
ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import torch
from collections import OrderedDict
from contextlib import suppress
from timm.utils import reduce_tensor, dispatch_clip_grad, accuracy
from timm.utils import AverageMeter
from timm.models.helpers import model_parameters
def train_one_epoch(
epoch, model, loader, optimizer, loss_fn, args,
lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
# batch_time_m = AverageMeter()
# data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
# end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in zip(range(args.train_num_batch), loader):
last_batch = batch_idx == last_idx
# data_time_m.update(time.time() - end)
if not args.prefetcher and args.device == "cuda":
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer,
clip_grad=args.clip_grad, clip_mode=args.clip_mode,
parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),
create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head='agc' in args.clip_mode),
value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
# if model_ema is not None:
# model_ema.update(model)
if args.device == "cuda":
torch.cuda.synchronize()
num_updates += 1
# batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
# if args.local_rank == 0:
# _logger.info(
# 'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
# 'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) '
# 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
# '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
# 'LR: {lr:.3e} '
# 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
# epoch,
# batch_idx, len(loader),
# 100. * batch_idx / last_idx,
# loss=losses_m,
# batch_time=batch_time_m,
# rate=input.size(0) * args.world_size / batch_time_m.val,
# rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
# lr=lr,
# data_time=data_time_m))
# if args.save_images and output_dir:
# torchvision.utils.save_image(
# input,
# os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
# padding=0,
# normalize=True)
# if saver is not None and args.recovery_interval and (
# last_batch or (batch_idx + 1) % args.recovery_interval == 0):
# saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
# end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
# return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
# end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher and args.device == "cuda":
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
if args.device == "cuda":
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
# batch_time_m.update(time.time() - end)
# end = time.time()
# if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
# log_name = 'Test' + log_suffix
# _logger.info(
# '{0}: [{1:>4d}/{2}] '
# 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
# 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
# 'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
# 'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
# log_name, batch_idx, last_idx, batch_time=batch_time_m,
# loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
|
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py
ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import os
import yaml
import torch
import argparse
def setup_args_distributed(args):
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
assert args.rank >= 0
return args
def get_args(config_string="", config_file=None):
def _parse_args():
# Do we have a config file to parse?
if config_file:
with open(config_file, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(config_string)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset parameters
# parser.add_argument('data_dir', metavar='DIR',
# help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
parser.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
# Model parameters
parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL',
help='Name of model to train (default: "resnet50"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N',
help='validation batch size override (default: None)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=2e-5,
help='weight decay (default: 2e-5)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.05)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT',
help='amount to decay each learning rate cycle (default: 0.5)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit, cycles enabled if > 1')
parser.add_argument('--lr-k-decay', type=float, default=1.0,
help='learning rate k-decay for cosine/poly (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 300)')
parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N',
help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=100, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default=None, metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--aug-repeats', type=int, default=0,
help='Number of augmentation repetitions (distributed training only) (default: 0)')
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--jsd-loss', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
parser.add_argument('--bce-loss', action='store_true', default=False,
help='Enable BCE loss w/ Mixup/CutMix use.')
parser.add_argument('--bce-target-thresh', type=float, default=None,
help='Threshold for binarizing softened BCE targets (default: None, disabled)')
parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='reduce',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--worker-seeding', type=str, default='all',
help='worker seed mode (default: all)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('--checkpoint-hist', type=int, default=10, metavar='N',
help='number of checkpoints to keep (default: 10)')
parser.add_argument('-j', '--workers', type=int, default=0, metavar='N',
help='how many training processes to use (default: 0)')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--no-ddp-bb', action='store_true', default=False,
help='Force broadcast buffers for native DDP to off.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--experiment', default='', type=str, metavar='NAME',
help='name of train experiment, name of sub-folder for output')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
# Inference args
parser.add_argument('--eval-batch-size', type=int, default=256, metavar='N',
help='input batch size for inference (default: 256)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--tf-preprocessing', action='store_true', default=False,
help='Use Tensorflow preprocessing pipeline (require CPU TF installed')
args, _args_text = _parse_args()
return args
|
import torch.nn as nn
import dataclasses
from timm.optim import create_optimizer
@dataclasses.dataclass
class OptimizerOption:
lr: float
opt: str
weight_decay: float
momentum: float
class TimmConfig:
def __init__(self, model, device):
self.model = model
self.device = device
# Configurations
self.num_classes = self.model.num_classes
self.loss = nn.CrossEntropyLoss().to(self.device)
self.target_shape = tuple()
self.input_size = self.model.default_cfg["input_size"]
# Default optimizer configurations borrowed from:
# https://github.com/rwightman/pytorch-image-models/blob/779107b693010934ac87c8cecbeb65796e218488/timm/optim/optim_factory.py#L78
opt_args = OptimizerOption(lr=1e-4, opt="sgd", weight_decay = 0.0001, momentum = 0.9)
self.optimizer = create_optimizer(opt_args, self.model)
|
from contextlib import suppress
import torch
import typing
import timm
from torchbenchmark.util.model import BenchmarkModel
from .timm_config import TimmConfig
from typing import Generator, Tuple, Optional
class TimmModel(BenchmarkModel):
# To recognize this is a timm model
TIMM_MODEL = True
# These two variables should be defined by subclasses
DEFAULT_TRAIN_BSIZE = None
DEFAULT_EVAL_BSIZE = None
# Default eval precision on CUDA device is fp16
DEFAULT_EVAL_CUDA_PRECISION = "fp16"
def __init__(self, model_name, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
self.model = timm.create_model(model_name, pretrained=False, scriptable=True)
self.cfg = TimmConfig(model = self.model, device = device)
self.example_inputs = self._gen_input(self.batch_size)
self.model.to(
device=self.device
)
if test == "train":
self.model.train()
elif test == "eval":
self.model.eval()
self.amp_context = suppress
def gen_inputs(self, num_batches:int=1) -> Tuple[Generator, Optional[int]]:
def _gen_inputs():
while True:
result = []
for _i in range(num_batches):
result.append((self._gen_input(self.batch_size), ))
if self.dargs.precision == "fp16":
result = list(map(lambda x: (x[0].half(), ), result))
yield result
return (_gen_inputs(), None)
def _gen_input(self, batch_size):
return torch.randn((batch_size,) + self.cfg.input_size, device=self.device)
def _gen_target(self, batch_size):
return torch.empty(
(batch_size,) + self.cfg.target_shape,
device=self.device, dtype=torch.long).random_(self.cfg.num_classes)
def _step_train(self):
self.cfg.optimizer.zero_grad()
with self.amp_context():
output = self.model(self.example_inputs)
if isinstance(output, tuple):
output = output[0]
target = self._gen_target(output.shape[0])
self.cfg.loss(output, target).backward()
self.cfg.optimizer.step()
def _step_eval(self):
output = self.model(self.example_inputs)
return output
def get_optimizer(self):
return self.cfg.optimizer
def set_optimizer(self, optimizer) -> None:
self.cfg.optimizer = optimizer
def enable_fp16_half(self):
self.model = self.model.half()
self.example_inputs = self.example_inputs.half()
def enable_channels_last(self):
self.model = self.model.to(memory_format=torch.channels_last)
self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last)
def get_module(self):
return self.model, (self.example_inputs,)
def train(self):
self._step_train()
def eval(self) -> typing.Tuple[torch.Tensor]:
with torch.no_grad():
with self.amp_context():
out = self._step_eval()
return (out, )
|
from datasets import load_dataset
def prep_dataset(hf_args):
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if hf_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(hf_args.dataset_name, hf_args.dataset_config_name)
else:
data_files = {}
if hf_args.train_file is not None:
data_files["train"] = hf_args.train_file
if hf_args.validation_file is not None:
data_files["validation"] = hf_args.validation_file
extension = hf_args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
return raw_datasets
def preprocess_dataset(hf_args, raw_datasets, tokenizer, prefix, accelerator):
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
# Get the language codes for input/target.
source_lang = hf_args.source_lang.split("_")[0]
target_lang = hf_args.target_lang.split("_")[0]
padding = "max_length" if hf_args.pad_to_max_length else False
# Temporarily set max_target_length for training.
max_target_length = hf_args.max_target_length
padding = "max_length" if hf_args.pad_to_max_length else False
def preprocess_function(examples):
inputs = [ex[source_lang] for ex in examples["translation"]]
targets = [ex[target_lang] for ex in examples["translation"]]
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=hf_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and hf_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
num_proc=hf_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not hf_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
return train_dataset, eval_dataset |
"""
Hacked from https://github.com/huggingface/transformers/blob/main/examples/pytorch/translation/run_translation_no_trainer.py
It runs HuggingFace transformer models translation on WMT16
"""
import argparse
from transformers import SchedulerType
task_to_keys = {
# hf args to include for different tasks
# english to romanian
"wmt-en-ro": [
"--dataset_name", "wmt16",
"--dataset_config_name", "ro-en",
"--source_lang", "en",
"--target_lang", "ro",
],
# english to german
"wmt-en-de": [
"--dataset_name", "stas/wmt14-en-de-pre-processed",
"--source_lang", "en",
"--target_lang", "de",
],
}
def parse_torchbench_args(extra_args):
parser = argparse.ArgumentParser()
parser.add_argument("--task_name", default="wmt-en-ro", choices=task_to_keys.keys(), help="Name of task to run")
# validate in train by default
parser.add_argument("--validate_in_train", action="store_false", help="Validate result in train")
# use fp16 mixed precision by default
parser.add_argument("--fp16", default="amp", choices=["amp", "no"], help="Enable mixed precision")
parser.add_argument(
"--distributed", default="none", choices=["ddp", "fsdp", "deepspeed", "none"],
help="distributed training paradigm, by default using DDP"
)
tb_args = parser.parse_args(extra_args)
return tb_args
def parse_args(in_args):
parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--predict_with_generate",
type=bool,
default=True,
help="",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--num_beams",
type=int,
default=None,
help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
),
)
parser.add_argument(
"--max_source_length",
type=int,
default=1024,
help=(
"The maximum total input sequence length after "
"tokenization.Sequences longer than this will be truncated, sequences shorter will be padded."
),
)
parser.add_argument(
"--max_target_length",
type=int,
default=128,
help=(
"The maximum total sequence length for target text after "
"tokenization. Sequences longer than this will be truncated, sequences shorter will be padded."
"during ``evaluate`` and ``predict``."
),
)
parser.add_argument(
"--val_max_target_length",
type=int,
default=None,
help=(
"The maximum total sequence length for validation "
"target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be "
"padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` "
"param of ``model.generate``, which is used during ``evaluate`` and ``predict``."
),
)
parser.add_argument(
"--pad_to_max_length",
type=bool,
default=False,
help=(
"Whether to pad all samples to model maximum sentence "
"length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More"
"efficient on GPU but very bad for TPU."
),
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--ignore_pad_token_for_loss",
type=bool,
default=True,
help="Whether to ignore the tokens corresponding to padded labels in the loss computation or not.",
)
parser.add_argument("--source_lang", type=str, default=None, help="Source language id for translation.")
parser.add_argument("--target_lang", type=str, default=None, help="Target language id for translation.")
parser.add_argument(
"--source_prefix",
type=str,
default=None,
help="A prefix to add before every source text (useful for T5 models).",
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", type=bool, default=None, help="Overwrite the cached training and evaluation sets"
)
# seems to be unused, commenting out
# parser.add_argument(
# "--max_length",
# type=int,
# default=128,
# help=(
# "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
# " sequences shorter will be padded if `--pad_to_max_lengh` is passed."
# ),
# )
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--model_type",
type=str,
default=None,
help="Model type to use if training from scratch.",
# choices=MODEL_TYPES, # unused, commented out for simplicity
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
args = parser.parse_args(in_args)
# Sanity checks
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a task name or a training/validation file.")
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
|
from datasets import load_dataset
from transformers import PretrainedConfig
from .args import task_to_keys
def preprocess_dataset(hf_args, config, model, tokenizer, raw_datasets, num_labels, label_list, is_regression, accelerator):
# Preprocessing the raw_datasets
if hf_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[hf_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and hf_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
# logger.warning(
# "Your model seems to have been trained with labels, but they don't match the dataset: ",
# f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
# "\nIgnoring the model labels as a result.",
# )
pass
elif hf_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
if label_to_id is not None:
model.config.label2id = label_to_id
model.config.id2label = {id: label for label, id in config.label2id.items()}
elif hf_args.task_name is not None and not is_regression:
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {id: label for label, id in config.label2id.items()}
padding = "max_length" if hf_args.pad_to_max_length else False
def preprocess_function(examples):
# Tokenize the texts
texts = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*texts, padding=padding, max_length=hf_args.max_length, truncation=True)
if "label" in examples:
if label_to_id is not None:
# Map labels to IDs (not necessary for GLUE tasks)
result["labels"] = [label_to_id[l] for l in examples["label"]]
else:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
remove_columns=raw_datasets["train"].column_names,
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation_matched" if hf_args.task_name == "mnli" else "validation"]
if hf_args.task_name == "mnli":
mnli_eval_dataset = raw_datasets["validation_mismatched"]
else:
mnli_eval_dataset = None
return train_dataset, eval_dataset, mnli_eval_dataset
def prep_dataset(hf_args):
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if hf_args.task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset("glue", hf_args.task_name)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": hf_args.train_file, "validation": hf_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if hf_args.do_predict:
if hf_args.test_file is not None:
train_extension = hf_args.train_file.split(".")[-1]
test_extension = hf_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = hf_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
# for key in data_files.keys():
# logger.info(f"load a local file for {key}: {data_files[key]}")
if hf_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
raw_datasets = load_dataset("csv", data_files=data_files, cache_dir=hf_args.cache_dir)
else:
# Loading a dataset from local json files
raw_datasets = load_dataset("json", data_files=data_files, cache_dir=hf_args.cache_dir)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
return raw_datasets
def prep_labels(hf_args, raw_datasets):
# Labels
if hf_args.task_name is not None:
is_regression = hf_args.task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = raw_datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
return num_labels, label_list, is_regression |
"""
Hacked from https://github.com/huggingface/transformers/blob/6fc38adff272ea3148e05888edf67eeb00170453/examples/pytorch/text-classification/run_glue.py
It runs HuggingFace transformer models on the GLUE benchmark
"""
import argparse
from transformers import SchedulerType
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
def parse_torchbench_args(extra_args):
parser = argparse.ArgumentParser()
parser.add_argument("--task_name", default="cola", choices=task_to_keys.keys(), help="Name of task to run")
# validate in train by default
parser.add_argument("--validate_in_train", action="store_false", help="Validate result in train")
# use fp16 mixed precision by default
parser.add_argument("--fp16", default="amp", choices=["amp", "no"], help="Enable mixed precision")
parser.add_argument(
"--distributed", default="none", choices=["ddp", "fsdp", "deepspeed", "none"],
help="distributed training paradigm, by default using DDP"
)
tb_args = parser.parse_args(extra_args)
return tb_args
def parse_args(in_args):
parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task")
parser.add_argument(
"--task_name",
type=str,
default=None,
help="The name of the glue task to train on.",
choices=list(task_to_keys.keys()),
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--max_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
args = parser.parse_args(in_args)
# Sanity checks
if args.task_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a task name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
return args |
import argparse
def parse_tb_args(args):
parser = argparse.ArgumentParser()
# default resolution: 800x1333
parser.add_argument("--resize", choices=["default", "448x608"], default="default", help="Resize the image to specified size")
args, unknown_args = parser.parse_known_args(args)
return args, unknown_args |
import os
import shutil
import sys
import subprocess
from pathlib import Path
from urllib import request
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
# Load pre-trained weights
# copied from https://github.com/facebookresearch/detectron2/blob/5934a1452801e669bbf9479ae222ce1a8a51f52e/MODEL_ZOO.md
MODEL_WEIGHTS_MAP = {
"detectron2_fasterrcnn_r_50_c4": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_C4_1x/137257644/model_final_721ade.pkl",
"detectron2_fasterrcnn_r_50_dc5": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_DC5_1x/137847829/model_final_51d356.pkl",
"detectron2_fasterrcnn_r_50_fpn": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_FPN_1x/137257794/model_final_b275ba.pkl",
"detectron2_fasterrcnn_r_101_c4": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_C4_3x/138204752/model_final_298dad.pkl",
"detectron2_fasterrcnn_r_101_dc5": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_DC5_3x/138204841/model_final_3e0943.pkl",
"detectron2_fasterrcnn_r_101_fpn": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_FPN_3x/137851257/model_final_f6e8b1.pkl",
"detectron2_maskrcnn_r_50_c4": "https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x/137259246/model_final_9243eb.pkl",
"detectron2_maskrcnn_r_50_fpn": "https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/137260431/model_final_a54504.pkl",
"detectron2_maskrcnn_r_101_c4": "https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x/138363239/model_final_a2914c.pkl",
"detectron2_maskrcnn_r_101_fpn": "https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/138205316/model_final_a3ec72.pkl",
"detectron2_maskrcnn": "https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/137260431/model_final_a54504.pkl",
"detectron2_fcos_r_50_fpn": None,
}
def check_data_dir():
coco2017_data_dir = os.path.join(CURRENT_DIR.parent.parent.parent, "data", ".data", "coco2017-minimal")
assert os.path.exists(coco2017_data_dir), "Couldn't find coco2017 minimal data dir, please run install.py again."
def install_model_weights(model_name, model_dir):
assert model_name in MODEL_WEIGHTS_MAP, f"Model {model_name} is not in MODEL_WEIGHTS_MAP. Cannot download the model weights file."
model_full_path = Path(os.path.join(model_dir, ".data", f"{model_name}.pkl"))
if model_name in MODEL_WEIGHTS_MAP and MODEL_WEIGHTS_MAP[model_name]:
# download the file if not exists
# TODO: verify the model file integrity
if os.path.exists(model_full_path):
return
model_full_path.parent.mkdir(parents=True, exist_ok=True)
request.urlretrieve(MODEL_WEIGHTS_MAP[model_name], model_full_path)
def pip_install_requirements():
requirements_file = os.path.join(CURRENT_DIR, "requirements.txt")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirements_file])
# This is to workaround https://github.com/facebookresearch/detectron2/issues/3934
def remove_tools_directory():
try:
import tools
import detectron2
d2_dir_path = Path(detectron2.__file__).parent
assumed_tools_path = d2_dir_path.parent.joinpath("tools")
if tools.__file__ and assumed_tools_path.exists():
shutil.rmtree(str(assumed_tools_path))
except ImportError:
# if the "tools" package doesn't exist, do nothing
pass
def install_detectron2(model_name, model_dir):
check_data_dir()
install_model_weights(model_name, model_dir)
pip_install_requirements()
remove_tools_directory()
|
from torchbenchmark.util.framework.detectron2.config import parse_tb_args
from torchbenchmark.util.model import BenchmarkModel
import itertools
import os
from pathlib import Path
import torch
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(CURRENT_DIR.parent.parent.parent, "data", ".data", "coco2017-minimal")
assert os.path.exists(DATA_DIR), "Couldn't find coco2017 minimal data dir, please run install.py again."
if not 'DETECTRON2_DATASETS' in os.environ:
os.environ['DETECTRON2_DATASETS'] = DATA_DIR
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.engine import default_argument_parser
from detectron2.solver import build_optimizer
from detectron2.config import LazyConfig, get_cfg, instantiate
from detectron2 import model_zoo
from detectron2.modeling import build_model
from detectron2.utils.events import EventStorage
from torch.utils._pytree import tree_map
from detectron2.checkpoint import DetectionCheckpointer
import detectron2.data.transforms as T
from detectron2.config import LazyCall as L
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from typing import Tuple
def setup(args):
if args.config_file.endswith(".yaml"):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.SOLVER.BASE_LR = 0.001 # Avoid NaNs. Not useful in this script anyway.
# set images per batch to 1
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.WEIGHTS = args.model_file
if args.resize == "448x608":
cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 300
cfg.INPUT.MIN_SIZE_TEST = 448
cfg.INPUT.MAX_SIZE_TEST = 608
cfg.merge_from_list(args.opts)
cfg.freeze()
else:
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
if args.fcos_use_bn:
cfg.model.head.norm = "BN"
return cfg
def prefetch(dataloader, device, precision="fp32"):
r = []
dtype = torch.float16 if precision == "fp16" else torch.float32
for batch in dataloader:
r.append(tree_map(lambda x: x.to(device, dtype=dtype) if isinstance(x, torch.Tensor) else x, batch))
return r
def get_abs_path(config):
import detectron2
detectron2_root = os.path.abspath(os.path.dirname(detectron2.__file__))
return os.path.join(detectron2_root, "model_zoo", "configs", config)
class Detectron2Model(BenchmarkModel):
# To recognize this is a detectron2 model
DETECTRON2_MODEL = True
# Default eval precision on CUDA device is fp16
DEFAULT_EVAL_CUDA_PRECISION = "fp16"
# Default batch sizes
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
# Skip correctness check, because the output tensor can't be verified using
# cosine similarity or torch.close()
SKIP_CORRECTNESS_CHECK = True
def __init__(self, variant, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
self.tb_args, self.extra_args = parse_tb_args(self.extra_args)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
# load model file
assert hasattr(self, "model_file"), f"Detectron2 models must specify its model_file."
if self.model_file:
assert (os.path.exists(self.model_file)), f"Detectron2 model file specified {self.model_file} doesn't exist."
parser = default_argument_parser()
args = parser.parse_args(["--config-file", get_abs_path(variant)])
# setup pre-trained model weights
args.model_file = self.model_file
args.resize = self.tb_args.resize
if hasattr(self, "FCOS_USE_BN") and self.FCOS_USE_BN:
args.fcos_use_bn = True
cfg = setup(args)
if hasattr(cfg, "MODEL") and cfg.MODEL.DEVICE != self.device:
cfg.defrost()
cfg.MODEL.DEVICE = self.device
cfg.freeze()
if args.config_file.endswith(".yaml"):
self.model = build_model(cfg).to(self.device)
else:
self.model = instantiate(cfg.model).to(self.device)
# setup model and return the dataloader
if self.test == "train":
if hasattr(self, "FCOS_USE_BN") and self.FCOS_USE_BN:
raise NotImplementedError("FCOS train is not supported by upstream detectron2. " \
"See GH Issue: https://github.com/facebookresearch/detectron2/issues/4369.")
self.optimizer = build_optimizer(cfg, self.model)
loader = self.setup_train()
elif self.test == "eval":
loader = self.setup_eval(cfg, args)
self.example_inputs = prefetch(itertools.islice(loader, 100), self.device)
# torchbench: only run 1 batch
self.NUM_BATCHES = 1
def setup_train(self):
if hasattr(self, "FCOS_USE_BN") and self.FCOS_USE_BN:
raise NotImplementedError("FCOS train is not supported by upstream detectron2. " \
"See GH Issue: https://github.com/facebookresearch/detectron2/issues/4369.")
checkpointer = DetectionCheckpointer(self.model, optimizer=self.optimizer)
checkpointer.load(self.model_file)
self.model.train()
# Always use coco.py to initialize train data
# setup train dataset
data_cfg = model_zoo.get_config("common/data/coco.py").dataloader
data_cfg.train.dataset.names = "coco_2017_val_100"
data_cfg.train.total_batch_size = self.batch_size
if self.tb_args.resize == "448x608":
data_cfg.train.mapper.augmentations = [L(T.ResizeShortestEdge)(short_edge_length=448, max_size=608)]
loader = instantiate(data_cfg.train)
return loader
def setup_eval(self, cfg, args):
# load model from pretrained checkpoint
DetectionCheckpointer(self.model).load(self.model_file)
self.model.eval()
if args.config_file.endswith(".yaml"):
cfg.defrost()
cfg.DATASETS.TEST = ("coco_2017_val_100", )
loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0], batch_size=self.batch_size)
else:
data_cfg = model_zoo.get_config("common/data/coco.py").dataloader
data_cfg.test.dataset.names = "coco_2017_val_100"
data_cfg.test.batch_size = self.batch_size
if self.tb_args.resize == "448x608":
data_cfg.test.mapper.augmentations = [L(T.ResizeShortestEdge)(short_edge_length=448, max_size=608)]
loader = instantiate(data_cfg.test)
return loader
def get_module(self):
return self.model, (self.example_inputs[0], )
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self.setup_train()
def enable_fp16_half(self):
assert self.dargs.precision == "fp16", f"Expected precision fp16, get {self.dargs.precision}"
self.model = self.model.half()
self.example_inputs = prefetch(self.example_inputs, self.device, self.dargs.precision)
def train(self):
with EventStorage():
for batch_id in range(self.NUM_BATCHES):
loss_dict = self.model(self.example_inputs[batch_id])
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
def eval(self) -> Tuple[torch.Tensor]:
with torch.no_grad():
for batch_id in range(self.NUM_BATCHES):
out = self.model(self.example_inputs[batch_id])
# retrieve output tensors
outputs = []
for item in out:
fields = list(map(lambda x: list(x.get_fields().values()), item.values()))
for boxes in fields:
tensor_box = list(filter(lambda x: isinstance(x, torch.Tensor), boxes))
outputs.extend(tensor_box)
return tuple(outputs)
|
"""
Patch the transformer source code to enable optimizations.
"""
import os
import subprocess
import sys
from .model_factory import class_models
from transformers import AutoConfig, ReformerConfig, BigBirdConfig, BertConfig
PATCH_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "patches")
def cache_model(name: str):
import transformers
model_config = eval(class_models[name][2])
model_ctor = getattr(transformers, class_models[name][3])
model_ctor.from_config(model_config)
def patch_transformers():
import transformers
transformers_dir = os.path.dirname(transformers.__file__)
if not os.path.exists(PATCH_DIR):
return
for patch_file in os.listdir(PATCH_DIR):
patch_file_fullpatch = os.path.join(PATCH_DIR, patch_file)
if not patch_file_fullpatch.endswith(".patch"):
continue
try:
subprocess.check_output(["patch", "-p1", "--forward", "-i", patch_file_fullpatch, "-r", "/tmp/rej"], cwd=transformers_dir)
except subprocess.SubprocessError as e:
output_str = str(e.output)
if "previously applied" in output_str:
return
else:
print(str(output_str))
sys.exit(1)
|
import argparse
import torch
from torchbenchmark.util.model import BenchmarkModel
from typing import List, Dict, Tuple
def add_bool_arg(parser: argparse.ArgumentParser, name: str, default_value: bool=True):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=name, action='store_true')
group.add_argument('--no-' + name, dest=name, action='store_false')
parser.set_defaults(**{name: default_value})
def parse_args(model: BenchmarkModel, extra_args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser()
# by default, enable half precision for inference
add_bool_arg(parser, "eval_fp16", default_value=True)
args = parser.parse_args(extra_args)
args.device = model.device
args.jit = model.jit
# disable fp16 when device is CPU
if args.device == "cpu":
args.eval_fp16 = False
return args
def apply_args(model: BenchmarkModel, args: argparse.Namespace):
# apply eval_fp16
if args.eval_fp16:
model.model, model.example_inputs = enable_eval_fp16(model.model, model.example_inputs)
def enable_eval_fp16(model: torch.nn.Module, example_input: Dict[str, torch.tensor]) -> Tuple[torch.nn.Module, Dict[str, torch.tensor]]:
return model.half(), {'input_ids': example_input['input_ids'].half()} |
import math
import random
import os
import torch
from contextlib import nullcontext
from torch import optim
from torchbenchmark.util.model import BenchmarkModel
import transformers
from transformers import AutoConfig, ReformerConfig, BertConfig
from typing import Tuple
class_models = {
# 'name': (train_max_length, eval_max_length, config, model)
'hf_GPT2': (512, 1024, 'AutoConfig.from_pretrained("gpt2")', 'AutoModelForCausalLM'),
'hf_GPT2_large': (512, 1024, 'AutoConfig.from_pretrained("gpt2-large")', 'AutoModelForCausalLM'),
'hf_T5': (1024, 2048, 'AutoConfig.from_pretrained("t5-small")', 'AutoModelForSeq2SeqLM'),
'hf_T5_base': (1024, 2048, 'AutoConfig.from_pretrained("t5-base")', 'AutoModelForSeq2SeqLM'),
'hf_T5_large': (512, 512, 'AutoConfig.from_pretrained("t5-large")', 'AutoModelForSeq2SeqLM'),
'hf_Bart': (512, 512, 'AutoConfig.from_pretrained("facebook/bart-base")', 'AutoModelForSeq2SeqLM'),
'hf_Reformer': (4096, 4096, 'ReformerConfig()', 'AutoModelForMaskedLM'),
'hf_BigBird': (1024, 4096, 'BigBirdConfig(attention_type="block_sparse",)', 'AutoModelForMaskedLM'),
'hf_Albert': (512, 512, 'AutoConfig.from_pretrained("albert-base-v2")', 'AutoModelForMaskedLM'),
'hf_DistilBert': (512, 512, 'AutoConfig.from_pretrained("distilbert-base-uncased")', 'AutoModelForMaskedLM'),
'hf_Longformer': (1024, 4096, 'AutoConfig.from_pretrained("allenai/longformer-base-4096")', 'AutoModelForMaskedLM'),
'hf_Bert': (512, 512, 'BertConfig()', 'AutoModelForMaskedLM'),
# see https://huggingface.co/bert-large-cased
'hf_Bert_large': (512, 512, 'BertConfig(hidden_size=1024, num_hidden_layers=24, num_attention_heads=16)', 'AutoModelForMaskedLM'),
}
cpu_input_slice = {
'hf_BigBird': 5,
'hf_Longformer': 8,
'hf_T5': 4,
'hf_GPT2': 4,
'hf_Reformer': 2,
}
class ArgsToKwargsWrapper(torch.nn.Module):
def __init__(self, model):
super(ArgsToKwargsWrapper, self).__init__()
self.model = model
def forward(self, input_ids, decoder_input_ids):
return self.model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
class HuggingFaceModel(BenchmarkModel):
HF_MODEL = True
# Default eval precision on CUDA device is fp16(half mode)
DEFAULT_EVAL_CUDA_PRECISION = "fp16"
def __init__(self, name, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
self.name = name
if test == "train":
self.max_length = class_models[name][0]
elif test == "eval":
self.max_length = class_models[name][1]
# workaround the bigbird config import
if name == "hf_BigBird":
from transformers import BigBirdConfig
config = eval(class_models[name][2])
if class_models[name][2] == "ReformerConfig()" and not config.num_buckets:
# silence "config.num_buckets is not set. Setting config.num_buckets to 128"
config.num_buckets = 128
class_ctor = getattr(transformers, class_models[name][3])
self.model = class_ctor.from_config(config).to(device)
self.optimizer = optim.Adam(
self.model.parameters(),
lr=0.001,
# TODO resolve https://github.com/pytorch/torchdynamo/issues/1083
capturable=bool(int(os.getenv("ADAM_CAPTURABLE", 0)
)))
# populate these on-demand to avoid wasting memory when not used
self.vocab_size = config.vocab_size
self.dynamic_example_inputs = None
if test == "train":
input_ids = torch.randint(0, config.vocab_size, (self.batch_size, self.max_length)).to(device)
decoder_ids = torch.randint(0, config.vocab_size, (self.batch_size, self.max_length)).to(device)
self.example_inputs = {'input_ids': input_ids, 'labels': decoder_ids}
self.model.train()
elif test == "eval":
# Cut the length of sentence when running on CPU, to reduce test time
if self.device == "cpu" and self.name in cpu_input_slice:
self.max_length = int(self.max_length / cpu_input_slice[self.name])
eval_context = torch.randint(0, config.vocab_size, (self.batch_size, self.max_length)).to(device)
self.example_inputs = {'input_ids': eval_context, }
if class_models[name][3] == 'AutoModelForSeq2SeqLM':
self.example_inputs['decoder_input_ids'] = eval_context
self.model.eval()
self.amp_context = nullcontext
def get_module(self, wrap_model=True):
if class_models[self.name][3] == 'AutoModelForSeq2SeqLM':
k = 'labels' if self.test == 'train' else 'decoder_input_ids'
if not wrap_model:
return self.model, (
self.example_inputs['input_ids'], self.example_inputs[k])
return ArgsToKwargsWrapper(self.model), (
self.example_inputs['input_ids'], self.example_inputs[k])
return self.model, (self.example_inputs["input_ids"], )
def get_dynamic_shapes_module(self):
if self.dynamic_example_inputs is None:
nbuckets = 8
nsamples = 32
n = int(math.log2(self.max_length))
buckets = [2**n for n in range(n - nbuckets, n)]
self.dynamic_example_inputs = [
{
'input_ids': torch.randint(0, self.vocab_size, (self.batch_size, bucket_len)).to(self.device),
'labels': torch.randint(0, self.vocab_size, (self.batch_size, bucket_len)).to(self.device)}
for bucket_len in random.choices(buckets, k=nsamples)
]
if class_models[self.name][3] == 'AutoModelForSeq2SeqLM':
raise NotImplementedError("Not yet supported")
# TODO(whc) why is labels not passed through?
return self.model, [(i['input_ids'],) for i in self.dynamic_example_inputs]
def enable_fp16_half(self):
self.model = self.model.half()
def train(self):
with self.amp_context():
outputs = self.model(**self.example_inputs)
loss = outputs.loss
loss.backward()
self.optimizer.step()
def eval(self) -> Tuple[torch.Tensor]:
with torch.no_grad():
with self.amp_context():
out = self.model(**self.example_inputs)
# logits: prediction scores of language modeling head
# https://github.com/huggingface/transformers/blob/v4.16.2/src/transformers/modeling_outputs.py#L455
# transformations such as fx2trt will cast the original output type to dict
if isinstance(out, tuple):
return out
elif hasattr(out, 'logits'):
return (out.logits, )
else:
return (out["logits"], )
|
import argparse
import importlib
import os
import submitit
import sys
import torch
import uuid
from pathlib import Path
from typing import List
def parse_args(args: List[str]=None):
parser = argparse.ArgumentParser(description='Submitit for PyTorch Distributed Benchmark', add_help=False)
parser.add_argument(
"--ngpus",
default=2,
type=int,
help="Number of gpus to request on each node"
)
parser.add_argument(
"--nodes",
default=1,
type=int,
help="Number of nodes to request"
)
parser.add_argument(
"--timeout",
default=1440,
type=int,
help="Duration of the job"
)
parser.add_argument(
"--profiler",
default=False,
type=bool,
help="Measure with PyTorch Profiler. Disabled by default, as it crashes on AWS"
)
parser.add_argument(
"--partition",
default="train",
type=str,
help="The Slurm partition to submit to"
)
parser.add_argument(
"--cluster",
default=None,
help="Which slurm cluster to target. Use 'local' to run jobs locally, 'debug' to run jobs in process"
)
parser.add_argument(
"--job_dir",
default=os.getcwd(),
type=str,
help="A shared folder across all worker processes"
)
parser.add_argument(
"--model",
type=str,
default="torchbenchmark.e2e_models.hf_bert.Model",
help="specify the model to experiment with, by default uses e2e_models.hf_bert"
)
parser.add_argument(
"--trainer",
type=str,
default="torchbenchmark.util.distributed.trainer.Trainer",
help="trainer loop class, can be customized for specific behavior",
)
parser.add_argument(
"--distributed",
type=str,
choices=["ddp", "ddp_no_static_graph", "fsdp", "deepspeed", "none"],
default="ddp",
help="distributed training paradigm, by default using DDP",
)
parser.add_argument(
"--exclude",
type=str,
default="",
help="comma-separated list of nodes to exclude from the slurm allocation",
)
try:
if args:
return parser.parse_known_args(args)
else:
return parser.parse_known_args()
except:
parser.print_help()
sys.exit(0)
def get_init_file(args):
# Init file must not exist, but it's parent dir must exist.
os.makedirs(args.job_dir, exist_ok=True)
init_file = Path(args.job_dir) / f"{uuid.uuid4().hex}_init"
print(init_file)
if init_file.exists():
os.remove(str(init_file))
return init_file
class TrainerWrapper(object):
def __init__(self, args, model_args=None):
self.args = args
self.model_args = model_args
self.args.output_dir = args.job_dir
def __call__(self):
self._setup_gpu_args()
pos = self.args.model.rfind(".")
module = importlib.import_module(self.args.model[:pos])
model_class = getattr(module, self.args.model[(pos+1):])
pos = self.args.trainer.rfind(".")
module = importlib.import_module(self.args.trainer[:pos])
trainer_class = getattr(module, self.args.trainer[(pos+1):])
return trainer_class(self.args, model_class, model_args=self.model_args).measure()
def checkpoint(self):
self.args.dist_url = get_init_file(self.args).as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
os.environ["LOCAL_RANK"] = str(job_env.local_rank)
os.environ["RANK"] = str(job_env.global_rank)
os.environ["WORLD_SIZE"] = str(job_env.num_tasks)
def main():
args, model_args, = parse_args()
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, cluster=args.cluster, slurm_max_num_timeout=3000)
executor.update_parameters(
gpus_per_node=args.ngpus,
# one task per GPU
tasks_per_node=args.ngpus,
cpus_per_task=10,
nodes=args.nodes,
timeout_min=args.timeout,
# Below are cluster dependent parameters
slurm_partition=args.partition,
slurm_signal_delay_s=120,
slurm_exclude=args.exclude,
)
executor.update_parameters(name="distbench", slurm_array_parallelism=1, timeout_min=1000)
args.dist_url = get_init_file(args).as_uri()
args.output_dir = args.job_dir
job = executor.submit(TrainerWrapper(args, model_args))
# print ID of the Slurm job
print(job.job_id)
# waits for completion and returns output
print(job.results())
if __name__=="__main__":
main()
|
from datetime import datetime
import os
from pathlib import Path
from statistics import stdev
import torch
from torch.cuda import Event
from torch.profiler import profile, ProfilerActivity, tensorboard_trace_handler
from torchbenchmark.util.e2emodel import E2EBenchmarkModel, nested
import torch.distributed as dist
class Trainer():
DEFAULT_MEASURE_ITERATIONS = 10
def __init__(self, args, model_class, mode="SPMD", model_args=None):
self.args = args
self.model_args = model_args
self.model_class = model_class
self.mode = mode
self.local_rank = int(os.getenv("LOCAL_RANK", -1))
self.setup()
extra_args = [
"--distributed",
self.args.distributed,
]
extra_args.extend(self.model_args)
# create model instance after Trainer setup, so that
# visible devices won't be revised in model constructor
self.e2e_benchmark: E2EBenchmarkModel = model_class("train", batch_size=None, extra_args=extra_args)
expected_attrs = ["model", "optimizer", "train_dataloader", "accelerator", "run_contexts"]
assert all(attr in dir(self.e2e_benchmark) for attr in expected_attrs), (
"Missing attributes in the input E2EBenchmarkModel implementation: "
f"{[attr for attr in expected_attrs if attr not in dir(self.e2e_benchmark)]}"
)
self.rank = dist.get_rank()
def setup(self):
if self.mode == "SPMD":
# set the visible devices so that each SPMD process only sees one
# CUDA device
# N.B.: this has to be done before using any CUDA API from torch
# N.B.: Remove the following block as HF Accelerator by default puts
# the model to the device corresponding to LOCAL_RANK. It's better
# to use CUDA_VISIBLE_DEVICES and cuda:0 if HF Accelerator can avoid
# using local_rank as the device id.
"""
os.environ["CUDA_VISIBLE_DEVICES"] = f"{local_rank}"
assert torch.cuda.device_count() == 1, (
"SPMD Trainer expects 1 visible device per process, but saw "
f"{torch.cuda.device_count()} devices."
)
"""
torch.cuda.set_device(self.local_rank)
world_size = int(os.getenv("WORLD_SIZE", -1))
rank = int(os.getenv("RANK", -1))
assert self.local_rank != -1 and world_size != -1 and rank != -1, (
"Failed to retrieve SPMD configurations from environment "
f"variables. local_rank={self.local_rank}, world_size={world_size}, "
f"rank={rank}."
)
# TODO: hardcode NCCL for now, make this configurable if necessary
dist.init_process_group("nccl", init_method=self.args.dist_url, rank=rank, world_size=world_size)
else:
raise ValueError(f"Unrecognized distributed training mode {self.mode}")
def measure(self):
niters = self.DEFAULT_MEASURE_ITERATIONS
# TODO: using dummy data for now to rule out dataloader delays
batch = self.e2e_benchmark.next_batch()
######################################
# 1. warming up CUDACachingAllocator #
######################################
for _ in range(self.DEFAULT_MEASURE_ITERATIONS):
with nested(*self.e2e_benchmark.run_contexts):
loss = self.e2e_benchmark.run_forward(batch)
self.e2e_benchmark.run_backward(loss)
self.e2e_benchmark.run_optimizer_step()
# wait for all pending CUDA ops to finish
torch.cuda.synchronize(device=self.local_rank)
now = datetime.now()
name = f"{type(self).__name__}_{now.strftime('%Y_%m_%d_%H_%M_%S')}"
##################################################################
# 2. measure raw delays and memory to rule out profiler overhead #
##################################################################
events_pre_fwd = [Event(enable_timing=True) for _ in range(niters)]
events_pre_bwd = [Event(enable_timing=True) for _ in range(niters)]
events_pre_opt = [Event(enable_timing=True) for _ in range(niters)]
events_post_opt = [Event(enable_timing=True) for _ in range(niters)]
with nested(*self.e2e_benchmark.run_contexts):
for i in range(niters):
events_pre_fwd[i].record()
loss = self.e2e_benchmark.run_forward(batch)
events_pre_bwd[i].record()
self.e2e_benchmark.run_backward(loss)
events_pre_opt[i].record()
self.e2e_benchmark.run_optimizer_step()
events_post_opt[i].record()
# wait for all pending CUDA ops to finish
torch.cuda.synchronize(device=self.local_rank)
delays_fwd = [pre.elapsed_time(post) for pre, post in zip(events_pre_fwd, events_pre_bwd)]
delays_bwd = [pre.elapsed_time(post) for pre, post in zip(events_pre_bwd, events_pre_opt)]
delays_opt = [pre.elapsed_time(post) for pre, post in zip(events_pre_opt, events_post_opt)]
mean_fwd = float(sum(delays_fwd)) / len(delays_fwd)
stdev_fwd = stdev(delays_fwd)
mean_bwd = float(sum(delays_bwd)) / len(delays_bwd)
stdev_bwd = stdev(delays_bwd)
mean_opt = float(sum(delays_opt)) / len(delays_opt)
stdev_opt = stdev(delays_opt)
iter_time = events_pre_fwd[0].elapsed_time(events_post_opt[-1]) / niters
# write results
delay_dir = f"{self.args.job_dir}/delay"
Path(delay_dir).mkdir(parents=True, exist_ok=True)
fout = open(f"{delay_dir}/{name}.log", "w")
fout.write(
f"{mean_fwd:.2f}, {stdev_fwd:.2f}, "
f"{mean_bwd:.2f}, {stdev_bwd:.2f}, "
f"{mean_opt:.2f}, {stdev_opt:.2f}\n"
)
fout.close()
if self.args.profiler:
# N.B.: disable PyTorch Profiler by default due to
# https://github.com/pytorch/pytorch/issues/75369
################################################
# 3. meausre complete metrics through profiler #
################################################
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
record_shapes=True, # Causes seg fault in export_chrome_trace
with_stack=True, # Causes seg fault with EFA
with_flops=True, # Causes seg fault in export_chrome_trace
on_trace_ready=tensorboard_trace_handler(
f"{self.args.job_dir}/tb/{name}",
self.rank,
use_gzip=True,
)
):
for i in range(niters):
loss = self.e2e_benchmark.run_forward(batch)
self.e2e_benchmark.run_backward(loss)
self.e2e_benchmark.run_optimizer_step()
# wait for all pending CUDA ops to finish
torch.cuda.synchronize(device=self.local_rank)
# wait for all peers to finish
dist.barrier(device_ids=[self.local_rank])
return {
"iter" : iter_time,
"fwd_mean" : mean_fwd,
"fwd_stdev" : stdev_fwd,
"bwd_mean" : mean_bwd,
"bwd_stdev" : stdev_bwd,
"opt_mean" : mean_opt,
"opt_stdev" : stdev_opt,
}
def teardown(self):
if self.mode == "SPMD":
dist.destroy_process_group()
|
from datetime import datetime
import os
from pathlib import Path
from statistics import stdev
from typing import Optional
import numpy as np
import torch
from torch.cuda import Event
from torch.profiler import profile, ProfilerActivity, schedule, tensorboard_trace_handler
from torchbenchmark.util.env_check import same
from torchbenchmark.util.model import BenchmarkModel
import torch.distributed as dist
class Trainer():
DEFAULT_MEASURE_ITERATIONS = 10
PROFILE_ITERATIONS = 2
def __init__(self, args, model_class, mode="SPMD", model_args=None):
self.args = args
self.model_args = model_args
self.model_class = model_class
self.mode = mode
self.local_rank = int(os.getenv("LOCAL_RANK", -1))
self.global_rank = int(os.getenv("RANK", -1))
self.setup()
# specify the name of the distributed trainer
extra_args = [
"--distributed",
self.args.distributed,
]
extra_args.extend(model_args)
batch_size = getattr(args, "batch_size", None)
# create model instance after Trainer setup, so that
# visible devices won't be revised in model constructor
self.benchmark: BenchmarkModel = model_class(test="train", device="cuda", batch_size=batch_size, extra_args=extra_args)
# options: "reference" or "test"
self.check_correctness_distributed : Optional[str] = getattr(args, "check_correctness_distributed", None)
self.reference_data_path : Optional[str] = getattr(args, "reference_data_path", None)
# reduce iterations to speed up the tests
if self.check_correctness_distributed:
self.DEFAULT_MEASURE_ITERATIONS = 2
self.rank = dist.get_rank()
def setup(self):
if self.mode == "SPMD":
# set the visible devices so that each SPMD process only sees one
# CUDA device
# N.B.: this has to be done before using any CUDA API from torch
# N.B.: Remove the following block as HF Accelerator by default puts
# the model to the device corresponding to LOCAL_RANK. It's better
# to use CUDA_VISIBLE_DEVICES and cuda:0 if HF Accelerator can avoid
# using local_rank as the device id.
"""
os.environ["CUDA_VISIBLE_DEVICES"] = f"{local_rank}"
assert torch.cuda.device_count() == 1, (
"SPMD Trainer expects 1 visible device per process, but saw "
f"{torch.cuda.device_count()} devices."
)
"""
torch.cuda.set_device(self.local_rank)
world_size = int(os.getenv("WORLD_SIZE", -1))
rank = int(os.getenv("RANK", -1))
assert self.local_rank != -1 and world_size != -1 and rank != -1, (
"Failed to retrieve SPMD configurations from environment "
f"variables. local_rank={self.local_rank}, world_size={world_size}, "
f"rank={rank}."
)
# TODO: hardcode NCCL for now, make this configurable if necessary
dist.init_process_group("nccl", init_method=self.args.dist_url, rank=rank, world_size=world_size)
else:
raise ValueError(f"Unrecognized distributed training mode {self.mode}")
def measure(self):
niters = self.DEFAULT_MEASURE_ITERATIONS
correctness = None
if self.check_correctness_distributed is not None:
self.benchmark.invoke()
if self.global_rank == 0:
grad_params = {}
for name, param in self.benchmark.model.named_parameters():
if param.requires_grad:
if param.grad is not None:
grad_params[name + ".grad"] = param.grad.cpu()
else:
grad_params[name + ".grad"] = None
if self.check_correctness_distributed == "reference":
with open(self.reference_data_path, "wb") as f:
torch.save(grad_params, f)
elif self.check_correctness_distributed == "test":
with open(self.reference_data_path, "rb") as f:
ref_params = torch.load(f)
def do_correctness_check():
correctness = True
for ref_name, ref_param in ref_params.items():
if ref_name not in grad_params:
correctness = False
print(f"correctness failure: {ref_name} in reference params but not in test params")
test_param = grad_params[ref_name]
atol = rtol = 1e-4
if not same(test_param, ref_param, cos_similarity=False, atol=atol*40, rtol=rtol*40):
correctness=False
print(f"correctness failure: Test model differs from reference model in parameter: {ref_name}")
for test_name, test_param in grad_params.items():
if test_name not in ref_params:
correctness = False
print(f"correctness failure: {test_name} in reference params but not in ref params")
return correctness
correctness = do_correctness_check()
######################################
# 1. warming up CUDACachingAllocator #
######################################
for _ in range(self.DEFAULT_MEASURE_ITERATIONS):
self.benchmark.invoke()
torch.cuda.reset_peak_memory_stats()
self.benchmark.invoke()
# wait for all pending CUDA ops to finish
torch.cuda.synchronize(device=self.local_rank)
max_memory = torch.cuda.max_memory_allocated(device=self.local_rank)
now = datetime.now()
name = f"{type(self).__name__}_{now.strftime('%Y_%m_%d_%H_%M_%S')}"
##################################################################
# 2. measure raw delays and memory to rule out profiler overhead #
##################################################################
events_pre_train = [Event(enable_timing=True) for _ in range(niters)]
events_post_train = [Event(enable_timing=True) for _ in range(niters)]
for i in range(niters):
events_pre_train[i].record()
self.benchmark.invoke()
events_post_train[i].record()
# wait for all pending CUDA ops to finish
torch.cuda.synchronize(device=self.local_rank)
latency_train = [pre.elapsed_time(post) for pre, post in zip(events_pre_train, events_post_train)]
median_latency = np.median(latency_train)
stdev_latency = stdev(latency_train)
if self.args.profiler:
# N.B.: disable PyTorch Profiler by default due to
# https://github.com/pytorch/pytorch/issues/75369
################################################
# 3. meausre complete metrics through profiler #
################################################
wait_runs = 2
warmup_runs = 2
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
record_shapes=True, # Causes seg fault in export_chrome_trace
with_stack=True, # Causes seg fault with EFA
with_flops=True, # Causes seg fault in export_chrome_trace
on_trace_ready=tensorboard_trace_handler(
f"{self.args.job_dir}/tb/{name}",
self.rank,
use_gzip=True,
),
schedule=schedule(wait=wait_runs, warmup=warmup_runs, active=self.PROFILE_ITERATIONS),
) as profiler:
for i in range(self.PROFILE_ITERATIONS + warmup_runs + wait_runs):
self.benchmark.invoke()
profiler.step()
# wait for all pending CUDA ops to finish
torch.cuda.synchronize(device=self.local_rank)
# wait for all peers to finish
dist.barrier(device_ids=[self.local_rank])
return {
"latency_median" : median_latency,
"latency_stdev" : stdev_latency,
"max_memory" : max_memory,
**({"correctness": correctness} if correctness is not None else {}),
}
def teardown(self):
if self.mode == "SPMD":
dist.destroy_process_group()
|
from io import UnsupportedOperation
import os
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
def apply_trainer(model, trainer):
local_rank = int(os.getenv("LOCAL_RANK", -1))
if trainer == "ddp" or trainer == "ddp_no_static_graph":
static_graph = (trainer == "ddp")
ddp_model = DDP(
model,
device_ids=[local_rank],
# If buffer broadcast is necessary, specific optimizations might be
# necessary to optimize performance. Disable it by default.
broadcast_buffers=False,
# Set gradient as bucket view to avoid unnecessary copies
gradient_as_bucket_view=True,
# TODO: tune bucket_cap_mb
static_graph=static_graph,
)
return ddp_model
elif trainer == "fsdp":
fsdp_model = FSDP(
model,
device_id = torch.cuda.current_device()
)
return fsdp_model
raise UnsupportedOperation(f"Only DDP, FSDP are currently supported, but tried to use {trainer}")
|
import torch
import argparse
from torchbenchmark.util.backends import create_backend
from typing import List
def parse_torchscript_args(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser()
# enable ofi by default
parser.add_argument("--no-ofi", action='store_true', help="disable optimize_for_inference")
parser.add_argument("--fuser", type=str, default="", choices=["fuser0", "fuser1", "fuser2", "fuser3"], help="enable fuser")
args, unknown_args = parser.parse_known_args(args)
return args, unknown_args
@create_backend
def torchscript(model: 'torchbenchmark.util.model.BenchmarkModel', backend_args: List[str]):
model.jit = True
backend_args, extra_args = parse_torchscript_args(backend_args)
if model.device == "cpu" and backend_args.fuser == "fuser2":
raise NotImplementedError(f"{backend_args.fuser} only works with GPU.")
if model.test != "eval" and backend_args.fuser == "fuser3":
raise NotImplementedError(f"{backend_args.fuser} only works with eval mode.")
if backend_args.fuser:
model.add_context(lambda: torch.jit.fuser(backend_args.fuser))
def _torchscript():
# customized jit callback function
if hasattr(model, 'jit_callback'):
if backend_args.no_ofi:
raise NotImplementedError("Customized jit callback doesn't support options.")
model.jit_callback()
return
module, example_inputs = model.get_module()
if hasattr(torch.jit, '_script_pdt'):
module = torch.jit._script_pdt(module, example_inputs=[example_inputs, ])
else:
module = torch.jit.script(module, example_inputs=[example_inputs, ])
if model.test == "eval" and not backend_args.no_ofi:
if backend_args.fuser != "fuser3":
module = torch.jit.optimize_for_inference(module)
else:
module = torch.jit.freeze(module)
model.set_module(module)
return _torchscript, extra_args
|
import os
import argparse
import torch
from torchbenchmark.util.backends import create_backend
from typing import List, Tuple
try:
from fx2ait.acc_tracer import acc_tracer
from fx2ait.ait_module import AITModule
from fx2ait.fx2ait import AITInterpreter
except:
# if fx2ait is not available, skip it.
pass
def parse_ait_args(args: List[str]) -> Tuple[argparse.Namespace, List[str]]:
parser = argparse.ArgumentParser()
parser.add_argument("--use_cuda_graph", action='store_true', help="enable CUDA Graph")
args, unknown_args = parser.parse_known_args(args)
return args, unknown_args
@create_backend
def fx2ait(model: 'torchbenchmark.util.model.BenchmarkModel', backend_args: List[str]):
AIT_WORK_PATH = os.path.join("/tmp", ".torchbench", "ait")
assert model.dargs.precision == "fp16", f"AITemplate only support float16 precision, but get {model.dargs.precision}"
OSS_AITModel = False
try:
# Load Non-OSS
torch.ops.load_library("//deeplearning/ait:AITModel")
except Exception:
torch.ops.load_library("build/libait_model.so")
OSS_AITModel = True
ait_options, extra_args = parse_ait_args(backend_args)
def _ait():
mod, inputs = model.get_module()
traced = acc_tracer.trace(mod, inputs)
interp = AITInterpreter(traced, inputs, AIT_WORK_PATH, "logs")
interp_result = interp.run()
ctor = torch.classes.ait.AITModel if OSS_AITModel else torch.classes.fb.AITModel
ait_mod = AITModule(
ctor(
interp_result.engine.lib_path,
interp_result.input_names,
interp_result.output_names,
torch.float16,
torch.float16,
1, # num_runtimes
),
interp_result,
)
ait_mod.engine.use_cuda_graph = ait_options.use_cuda_graph
return _ait, extra_args
|
"""
Support TorchDynamo(https://github.com/facebookresearch/torchdynamo) backends
"""
import argparse
import contextlib
import distutils.util
from typing import List
import torch
import torch._dynamo as torchdynamo
from torchbenchmark.util.model import is_staged_train_test
def parse_torchdynamo_args(model: 'torchbenchmark.util.model.BenchmarkModel', dynamo_args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser()
available_backends = torchdynamo.list_backends(exclude_tags=None)
parser.add_argument(
"--torchdynamo", choices=available_backends, help="Specify torchdynamo backends"
)
parser.add_argument(
"--tritonmm", type=str, help="torchinductor.config.triton.mm configuration"
)
parser.add_argument(
"--dynamic_shapes",
action='store_true',
help="dynamic shape and symbolic tracing",
)
parser.add_argument(
"--pt2_debug_log",
action='store_true',
help="enable debug log for PT2 (dynamo, inductor, AOTAutograd)",
)
parser.add_argument(
"--full_graph",
action='store_true',
help="capture full graph and no python",
)
parser.add_argument(
"--optimize_dynamo_ddp",
action='store_true',
help="enable extra optimizations for DDP + dynamo"
)
parser.add_argument(
"--torchinductor_cudagraph",
type=distutils.util.strtobool,
default="true",
)
parser.add_argument(
"--torchinductor_fallback_random",
type=distutils.util.strtobool,
default="false",
)
parser.add_argument(
"--dynamo_disable_optimizer_step",
type=distutils.util.strtobool,
default="false",
)
args, extra_args = parser.parse_known_args(dynamo_args)
return args, extra_args
def apply_torchdynamo_args(model: 'torchbenchmark.util.model.BenchmarkModel', args: argparse.Namespace, precision: str):
if args.torchdynamo == "fx2trt" and precision == "fp16":
dynamo_optimizer = torchdynamo.optimize(torchdynamo.optimizations.backends.fx2trt_compiler_fp16)
else:
dynamo_kwargs = {}
if args.dynamic_shapes:
dynamo_kwargs["dynamic"] = True
if args.full_graph:
dynamo_kwargs["nopython"] = True
dynamo_optimizer = torchdynamo.optimize(args.torchdynamo, **dynamo_kwargs)
if args.pt2_debug_log:
import logging
torch._logging.set_logs(dynamo=logging.DEBUG, inductor=logging.DEBUG, aot=logging.DEBUG)
if args.torchdynamo == "inductor":
import torch._inductor as torchinductor
torchinductor.config.triton.cudagraphs = bool(args.torchinductor_cudagraph)
# Setup torchinductor.config.triton.mm
if args.tritonmm == "triton":
torchinductor.config.triton.mm = "triton"
# currently can't pass correctness with use_bmm = True
# torchinductor.config.triton.use_bmm = True
# used for correctness checks, to avoid triton rand() behaving differently from torch rand().
torchinductor.config.fallback_random = bool(args.torchinductor_fallback_random)
if bool(args.dynamo_disable_optimizer_step):
found_optimizer_step = False
try:
model.cfg.optimizer.step = torch._dynamo.disable(model.cfg.optimizer.step)
found_optimizer_step = True
except AttributeError:
pass
try:
model.optimizer.step = torch._dynamo.disable(model.optimizer.step)
found_optimizer_step = True
except AttributeError:
pass
if not found_optimizer_step:
warnings.warn("--dynamo_disable_optimizer_step is set to True, but the optimizer could not be found on this model")
if model.test == "train":
if is_staged_train_test(model):
model.forward = dynamo_optimizer(model.forward)
else:
model.train = dynamo_optimizer(model.train)
else:
model.eval = dynamo_optimizer(model.eval)
if args.optimize_dynamo_ddp:
@contextlib.contextmanager
def optimize_ddp_ctx(val: bool):
old_value = torchdynamo.config.optimize_ddp
try:
torchdynamo.config.optimize_ddp = val
yield
finally:
torchdynamo.config.optimize_ddp = old_value
model.add_context(lambda: optimize_ddp_ctx(True))
torchdynamo.reset()
|
"""
Utils for managing backends
"""
import functools
BACKENDS = dict()
def create_backend(fn):
@functools.wraps(fn)
def inner(model: 'torchbenchmark.util.model.BenchmarkModel', **kwargs):
if model is None:
return None
try:
return fn(model, **kwargs)
except KeyboardInterrupt:
raise
except Exception as e:
print(f"{fn.__name__} error: {e}")
raise
BACKENDS[fn.__name__] = inner
return inner
def list_backends():
"""
Return valid strings that can be passed to:
@torchdynamo.optimize(<backend>)
def foo(...):
....
"""
return sorted(BACKENDS.keys())
# register the backends
from .jit import torchscript
from .ait import fx2ait
from .trt import fx2trt, torch_trt
from .cudagraph import cudagraph
__all__ = [list_backends, create_backend ]
|
import torch
from torchbenchmark.util.backends import create_backend
from typing import List
WARMUP_ITER = 3
@create_backend
def cudagraph(model: 'torchbenchmark.util.model.BenchmarkModel', backend_args: List[str]):
cudagraph_func_name = f"cudagraph_{model.test}"
assert hasattr(model, cudagraph_func_name), f"CUDA Graph only works on models implement {cudagraph_func_name}()"
if model.test == "train":
assert hasattr(model, "SKIP_ZERO_GRAD"), f"The model must support skipping zero grad in its train test."
def _cudagraph():
# CUDAGraph can't be copied/pickled, disable copying in correctness checking
model.DEEPCOPY = False
model.SKIP_ZERO_GRAD = True
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
for _ in range(WARMUP_ITER):
if model.test == "train":
model.opt.zero_grad(set_to_none=True)
model.invoke()
torch.cuda.current_stream().wait_stream(s)
# capture
cuda_graph = torch.cuda.CUDAGraph()
if model.test == "train":
model.opt.zero_grad(set_to_none=True)
with torch.cuda.graph(cuda_graph):
model.invoke()
model.g = cuda_graph
if model.test == "train":
model.train = getattr(model, cudagraph_func_name)
elif model.test == "eval":
model.eval = getattr(model, cudagraph_func_name)
else:
assert False, f"Expected model test train or eval, get {model.test}"
return _cudagraph, backend_args
|
# By default, FlopCountAnalysis count one fused-mult-add (FMA) as one flop.
# However, in our context, we count 1 FMA as 2 flops instead of 1.
# https://github.com/facebookresearch/fvcore/blob/7a0ef0c0839fa0f5e24d2ef7f5d48712f36e7cd7/fvcore/nn/flop_count.py
def enable_fvcore_flops(model: 'torchbenchmark.util.model.BenchmarkModel', flops_fma=2.0):
assert hasattr(model, 'TORCHVISION_MODEL') and model.TORCHVISION_MODEL, "fvcore flops is only available on torchvision models!"
assert model.test == "eval", "fvcore flops is only available on inference tests, as it doesn't measure backward pass."
from fvcore.nn import FlopCountAnalysis
model.flops = FlopCountAnalysis(model.model, tuple(model.example_inputs)).total()
model.flops = model.flops / model.batch_size * flops_fma
|
from typing import List
import torch
from torchbenchmark.util.backends import create_backend
from torchbenchmark.util.env_check import is_hf_model
@create_backend
def fx2trt(model: 'torchbenchmark.util.model.BenchmarkModel', backend_args: List[str]):
FP16 = True if model.dargs.precision == "fp16" else False
HF_MODEL = True if is_hf_model(model) else False
assert model.device == "cuda" and model.test == "eval", f"fx2trt only works on CUDA inference tests."
def _fx2trt():
from torch_tensorrt.fx import compile
from torch_tensorrt.fx.utils import LowerPrecision
module, example_inputs = model.get_module()
precision = LowerPrecision.FP16 if FP16 else LowerPrecision.FP32
if HF_MODEL:
from transformers.utils.fx import symbolic_trace as hf_symbolic_trace
traced_model = hf_symbolic_trace(
module,
batch_size = model.batch_size,
sequence_lenghth = model.max_length
)
trt_model = compile(
traced_model,
example_inputs,
max_batch_size=model.batch_size,
lower_precision=precision,
explicit_batch_dimension=True,
max_workspace_size=20 << 30,
)
else:
trt_model = compile(module=module,
input=example_inputs,
max_batch_size=model.batch_size,
lower_precision=precision)
model.set_module(trt_model)
return _fx2trt, backend_args
@create_backend
def torch_trt(model: 'torchbenchmark.util.model.BenchmarkModel', backend_args: List[str]):
FP16 = True if model.dargs.precision == "fp16" else False
assert model.device == "cuda" and model.test == "eval", f"fx2trt only works on CUDA inference tests."
def _torch_trt():
import torch_tensorrt
module, example_inputs = model.get_module()
if FP16:
torchtrt_dtype = torch_tensorrt.dtype.half
torch_dtype = torch.half
else:
torchtrt_dtype = torch_tensorrt.dtype.float
torch_dtype = torch.float32
trt_input = [torch_tensorrt.Input(shape=example_inputs[0].shape, dtype=torch_dtype)]
trt_module = torch_tensorrt.compile(module, inputs=trt_input, enabled_precisions=torchtrt_dtype)
model.set_module(trt_module)
return _torch_trt, backend_args
|
"""
Utilities to measure metrics of a model.
"""
import torch
import time
import dataclasses
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark import ModelTask
from typing import List, Union, Tuple, Optional
WARMUP_ROUNDS = 10
BENCHMARK_ITERS = 15
MEMPROF_ITER = 2
NANOSECONDS_PER_MILLISECONDS = 1_000_000.0
@dataclasses.dataclass
class TorchBenchModelMetrics:
latencies: List[float]
cpu_peak_mem: Optional[float]
gpu_peak_mem: Optional[float]
def get_latencies(func, device: str, nwarmup=WARMUP_ROUNDS, num_iter=BENCHMARK_ITERS) -> List[float]:
"Run one step of the model, and return the latency in milliseconds."
# Warm-up `nwarmup` rounds
for _i in range(nwarmup):
func()
result_summary = []
for _i in range(num_iter):
if device == "cuda":
torch.cuda.synchronize()
# Collect time_ns() instead of time() which does not provide better precision than 1
# second according to https://docs.python.org/3/library/time.html#time.time.
t0 = time.time_ns()
func()
torch.cuda.synchronize() # Wait for the events to be recorded!
t1 = time.time_ns()
else:
t0 = time.time_ns()
func()
t1 = time.time_ns()
result_summary.append((t1 - t0) / NANOSECONDS_PER_MILLISECONDS)
return result_summary
def get_peak_memory(func, device: str, num_iter=MEMPROF_ITER, export_metrics_file='', metrics_needed=[], metrics_gpu_backend='dcgm', cpu_monitored_pid=None) -> Tuple[Optional[float], Optional[str], Optional[float]]:
"Run one step of the model, and return the peak memory in MB."
from components.model_analyzer.TorchBenchAnalyzer import ModelAnalyzer
new_metrics_needed = [_ for _ in metrics_needed if _ in ['cpu_peak_mem', 'gpu_peak_mem']]
if not new_metrics_needed:
raise ValueError(f"Expected metrics_needed to be non-empty, get: {metrics_needed}")
mem_model_analyzer = ModelAnalyzer(export_metrics_file, new_metrics_needed, metrics_gpu_backend, cpu_monitored_pid)
continue_num_iter = BENCHMARK_ITERS - num_iter
def work_func():
if device == "cuda":
torch.cuda.synchronize()
func()
torch.cuda.synchronize()
else:
func()
t0 = time.time_ns()
work_func()
t1 = time.time_ns()
# if total execution time is less than 15ms, we run the model for BENCHMARK_ITERS times
# to get more accurate peak memory
if (t1 - t0) < 15 * NANOSECONDS_PER_MILLISECONDS:
num_iter = BENCHMARK_ITERS
else:
num_iter = MEMPROF_ITER
mem_model_analyzer.start_monitor()
for _i in range(num_iter):
work_func()
mem_model_analyzer.stop_monitor()
mem_model_analyzer.aggregate()
device_id = None
gpu_peak_mem = None
cpu_peak_mem = None
if 'gpu_peak_mem' in metrics_needed:
device_id, gpu_peak_mem = mem_model_analyzer.calculate_gpu_peak_mem()
if 'cpu_peak_mem' in metrics_needed:
cpu_peak_mem = mem_model_analyzer.calculate_cpu_peak_mem()
if export_metrics_file:
mem_model_analyzer.update_export_name("_peak_memory")
mem_model_analyzer.export_all_records_to_csv()
return cpu_peak_mem, device_id, gpu_peak_mem
def get_model_test_metrics(model: Union[BenchmarkModel, ModelTask], metrics= [], export_metrics_file= False, metrics_gpu_backend='nvml') -> TorchBenchModelMetrics:
latencies = None
cpu_peak_mem = None
gpu_peak_mem = None
if not (isinstance(model, BenchmarkModel) or isinstance(model, ModelTask)):
raise ValueError(f"Expected BenchmarkModel or ModelTask, get type: {type(model)}")
device = model.device if isinstance(model, BenchmarkModel) else model.get_model_attribute("device")
if 'latencies' in metrics:
latencies = get_latencies(model.invoke, device)
if 'cpu_peak_mem' in metrics or 'gpu_peak_mem' in metrics:
cpu_peak_mem, _device_id, gpu_peak_mem = get_peak_memory(model.invoke, device, export_metrics_file=export_metrics_file, metrics_needed=metrics, metrics_gpu_backend=metrics_gpu_backend, cpu_monitored_pid=model.worker.proc_pid())
return TorchBenchModelMetrics(latencies, cpu_peak_mem, gpu_peak_mem)
|
"""
Utilities to instantiate TorchBench models in the same process or child process.
Functions in this file don't handle exceptions.
They expect callers handle all exceptions.
"""
import os
import importlib
import dataclasses
from typing import Optional, List, Dict
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark import _list_model_paths, ModelTask
WORKER_TIMEOUT = 600 # seconds
BS_FIELD_NAME = "batch_size"
@dataclasses.dataclass
class TorchBenchModelConfig:
name: str
device: str
test: str
batch_size: Optional[int]
jit: bool
extra_args: List[str]
extra_env: Optional[Dict[str, str]] = None
def _set_extra_env(extra_env):
if not extra_env:
return
for env_key in extra_env:
os.environ[env_key] = extra_env[env_key]
def inject_model_invoke(model_task: ModelTask, inject_function):
model_task.replace_invoke(inject_function.__module__, inject_function.__name__)
def load_model_isolated(config: TorchBenchModelConfig, timeout: float=WORKER_TIMEOUT) -> ModelTask:
""" Load and return the model in a subprocess. """
task = ModelTask(config.name, timeout=timeout, extra_env=config.extra_env)
if not task.model_details.exists:
raise ValueError(f"Failed to import model task: {config.name}. Please run the model manually to make sure it succeeds, or report a bug.")
task.make_model_instance(test=config.test, device=config.device, jit=config.jit, batch_size=config.batch_size, extra_args=config.extra_args)
task_batch_size = task.get_model_attribute(BS_FIELD_NAME)
# check batch size
if config.batch_size and (not config.batch_size == task_batch_size):
raise ValueError(f"User specify batch size {config.batch_size}," +
f"but model {task.name} runs with batch size {task_batch_size}. Please report a bug.")
return task
def load_model(config: TorchBenchModelConfig) -> BenchmarkModel:
"""Load and return a model instance in the same process. """
package = "torchbenchmark"
module = importlib.import_module(f'.models.{config.name}', package=package)
Model = getattr(module, 'Model', None)
if not Model:
raise ValueError(f"Error: {module} does not define attribute Model.")
model_instance = Model(test=config.test, device=config.device, batch_size=config.batch_size, jit=config.jit, extra_args=config.extra_args)
# check name
if not model_instance.name == config.name:
raise ValueError(f"Required model {config.name}, loaded {model_instance.name}.")
# check batch size
if config.batch_size and (not config.batch_size == model_instance.batch_size):
raise ValueError(f"User specify batch size {config.batch_size}," +
f"but model {model_instance.name} runs with batch size {model_instance.batch_size}. Please report a bug.")
_set_extra_env(config.extra_env)
return model_instance
def list_devices() -> List[str]:
"""Return a list of available devices."""
devices = ["cpu"]
import torch
if torch.cuda.is_available():
devices.append("cuda")
return devices
def list_tests() -> List[str]:
"""Return a list of available tests."""
return ["train", "eval"]
def list_models() -> List[str]:
"""Return a list of names of all TorchBench models"""
model_paths = _list_model_paths()
model_names = list(map(lambda x: os.path.basename(x), model_paths))
return model_names
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 2
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(name="hf_BigBird", test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
import dataclasses
@dataclasses.dataclass
class DRQConfig:
env = "cartpole_swingup"
# IMPORTANT: if action_repeat is used the effective number of env steps needs to be
# multiplied by action_repeat in the result graphs.
# This is a common practice for a fair comparison.
# See the 2nd paragraph in Appendix C of SLAC: https://arxiv.org/pdf/1907.00953.pdf
# See Dreamer TF2's implementation: https://github.com/danijar/dreamer/blob/02f0210f5991c7710826ca7881f19c64a012290c/dreamer.py#L340
action_repeat = 4
# train
num_train_steps = 1
num_train_iters = 1
# num_seed_steps can't be zero
# and steps in train must be bigger than num_seed_steps
num_seed_steps = 1
replay_buffer_capacity = 100000
seed = 1
# eval
eval_frequency = 5000
# observation
image_size = 84
image_pad = 4
frame_stack = 3
# global params
lr = 1e-3
# IMPORTANT: please use a batch size of 512 to reproduce the results in the paper. Hovewer, with a smaller batch size it still works well.
batch_size = 128
# Agent configurations
discount = 0.99
init_temperature = 0.1
actor_update_frequency = 2
critic_tau = 0.01
critic_target_update_frequency = 2
# Actor configurations
hidden_dim = 1024
hidden_depth = 2
log_std_bounds = [-10, 2]
# Encoder configurations
feature_dim = 50
# obs data, relative to __file__
obs_path = "obs.pkl"
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
from . import utils
class Encoder(nn.Module):
"""Convolutional encoder for image-based observations."""
def __init__(self, obs_shape, feature_dim):
super().__init__()
assert len(obs_shape) == 3
self.num_layers = 4
self.num_filters = 32
self.output_dim = 35
self.output_logits = False
self.feature_dim = feature_dim
self.convs = nn.ModuleList([
nn.Conv2d(obs_shape[0], self.num_filters, 3, stride=2),
nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1),
nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1),
nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1)
])
self.head = nn.Sequential(
nn.Linear(self.num_filters * 35 * 35, self.feature_dim),
nn.LayerNorm(self.feature_dim))
self.outputs = dict()
def forward_conv(self, obs):
obs = obs / 255.
self.outputs['obs'] = obs
conv = torch.relu(self.convs[0](obs))
self.outputs['conv1'] = conv
for i in range(1, self.num_layers):
conv = torch.relu(self.convs[i](conv))
self.outputs['conv%s' % (i + 1)] = conv
# Changed view to reshape here to support channels last input
# TODO: upstream this change to https://github.com/denisyarats/drq/blob/master/drq.py#L48
h = conv.reshape(conv.size(0), -1)
return h
def forward(self, obs, detach=False):
h = self.forward_conv(obs)
if detach:
h = h.detach()
out = self.head(h)
if not self.output_logits:
out = torch.tanh(out)
self.outputs['out'] = out
return out
def copy_conv_weights_from(self, source):
"""Tie convolutional layers"""
for i in range(self.num_layers):
utils.tie_weights(src=source.convs[i], trg=self.convs[i])
def log(self, logger, step):
pass
class Actor(nn.Module):
"""torch.distributions implementation of an diagonal Gaussian policy."""
def __init__(self, encoder_cfg, action_shape, hidden_dim, hidden_depth,
log_std_bounds):
super().__init__()
self.encoder = Encoder(*encoder_cfg)
self.log_std_bounds = log_std_bounds
self.trunk = utils.mlp(self.encoder.feature_dim, hidden_dim,
2 * action_shape[0], hidden_depth)
self.outputs = dict()
self.apply(utils.weight_init)
def forward(self, obs, detach_encoder=False):
obs = self.encoder(obs, detach=detach_encoder)
mu, log_std = self.trunk(obs).chunk(2, dim=-1)
# constrain log_std inside [log_std_min, log_std_max]
log_std = torch.tanh(log_std)
log_std_min, log_std_max = self.log_std_bounds
log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std +
1)
std = log_std.exp()
self.outputs['mu'] = mu
self.outputs['std'] = std
dist = utils.SquashedNormal(mu, std)
return dist
def log(self, logger, step):
pass
class Critic(nn.Module):
"""Critic network, employes double Q-learning."""
def __init__(self, encoder_cfg, action_shape, hidden_dim, hidden_depth):
super().__init__()
self.encoder = Encoder(*encoder_cfg)
self.Q1 = utils.mlp(self.encoder.feature_dim + action_shape[0],
hidden_dim, 1, hidden_depth)
self.Q2 = utils.mlp(self.encoder.feature_dim + action_shape[0],
hidden_dim, 1, hidden_depth)
self.outputs = dict()
self.apply(utils.weight_init)
def forward(self, obs, action, detach_encoder=False):
assert obs.size(0) == action.size(0)
obs = self.encoder(obs, detach=detach_encoder)
obs_action = torch.cat([obs, action], dim=-1)
q1 = self.Q1(obs_action)
q2 = self.Q2(obs_action)
self.outputs['q1'] = q1
self.outputs['q2'] = q2
return q1, q2
def log(self, logger, step):
pass
class DRQAgent(object):
"""Data regularized Q: actor-critic method for learning from pixels."""
def __init__(self, cfg, device, obs_shape, action_shape, action_range):
self.action_range = action_range
self.device = torch.device(device)
self.discount = cfg.discount
self.critic_tau = cfg.critic_tau
self.actor_update_frequency = cfg.actor_update_frequency
self.critic_target_update_frequency = cfg.critic_target_update_frequency
self.batch_size = cfg.batch_size
encoder_cfg = (obs_shape, cfg.feature_dim)
self.actor = Actor(encoder_cfg=encoder_cfg,
action_shape=action_shape,
hidden_dim=cfg.hidden_dim,
hidden_depth=cfg.hidden_depth,
log_std_bounds=cfg.log_std_bounds).to(self.device)
self.critic = Critic(encoder_cfg=encoder_cfg,
action_shape=action_shape,
hidden_dim=cfg.hidden_dim,
hidden_depth=cfg.hidden_depth).to(self.device)
self.critic_target = Critic(encoder_cfg=encoder_cfg,
action_shape=action_shape,
hidden_dim=cfg.hidden_dim,
hidden_depth=cfg.hidden_depth).to(self.device)
self.critic_target.load_state_dict(self.critic.state_dict())
# tie conv layers between actor and critic
self.actor.encoder.copy_conv_weights_from(self.critic.encoder)
self.log_alpha = torch.tensor(np.log(cfg.init_temperature)).to(device)
self.log_alpha.requires_grad = True
# set target entropy to -|A|
self.target_entropy = -action_shape[0]
# optimizers
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),
lr=cfg.lr)
self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=cfg.lr)
self.train()
self.critic_target.train()
def train(self, training=True):
self.training = training
self.actor.train(training)
self.critic.train(training)
@property
def alpha(self):
return self.log_alpha.exp()
def act(self, obs, sample=False):
obs = torch.FloatTensor(obs).to(self.device)
obs = obs.unsqueeze(0)
dist = self.actor(obs)
action = dist.sample() if sample else dist.mean
action = action.clamp(*self.action_range)
assert action.ndim == 2 and action.shape[0] == 1
return utils.to_np(action[0])
def update_critic(self, obs, obs_aug, action, reward, next_obs,
next_obs_aug, not_done, logger, step):
with torch.no_grad():
dist = self.actor(next_obs)
next_action = dist.rsample()
log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)
target_Q1, target_Q2 = self.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1,
target_Q2) - self.alpha.detach() * log_prob
target_Q = reward + (not_done * self.discount * target_V)
dist_aug = self.actor(next_obs_aug)
next_action_aug = dist_aug.rsample()
log_prob_aug = dist_aug.log_prob(next_action_aug).sum(-1,
keepdim=True)
target_Q1, target_Q2 = self.critic_target(next_obs_aug,
next_action_aug)
target_V = torch.min(
target_Q1, target_Q2) - self.alpha.detach() * log_prob_aug
target_Q_aug = reward + (not_done * self.discount * target_V)
target_Q = (target_Q + target_Q_aug) / 2
# get current Q estimates
current_Q1, current_Q2 = self.critic(obs, action)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(
current_Q2, target_Q)
Q1_aug, Q2_aug = self.critic(obs_aug, action)
critic_loss += F.mse_loss(Q1_aug, target_Q) + F.mse_loss(
Q2_aug, target_Q)
# logger.log('train_critic/loss', critic_loss, step)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
self.critic.log(logger, step)
def update_actor_and_alpha(self, obs, logger, step):
# detach conv filters, so we don't update them with the actor loss
dist = self.actor(obs, detach_encoder=True)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
# detach conv filters, so we don't update them with the actor loss
actor_Q1, actor_Q2 = self.critic(obs, action, detach_encoder=True)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_loss = (self.alpha.detach() * log_prob - actor_Q).mean()
# optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.actor.log(logger, step)
self.log_alpha_optimizer.zero_grad()
alpha_loss = (self.alpha *
(-log_prob - self.target_entropy).detach()).mean()
alpha_loss.backward()
self.log_alpha_optimizer.step()
def update(self, replay_buffer, logger, step):
obs, action, reward, next_obs, not_done, obs_aug, next_obs_aug = replay_buffer.sample(
self.batch_size)
self.update_critic(obs, obs_aug, action, reward, next_obs,
next_obs_aug, not_done, logger, step)
if step % self.actor_update_frequency == 0:
self.update_actor_and_alpha(obs, logger, step)
if step % self.critic_target_update_frequency == 0:
utils.soft_update_params(self.critic, self.critic_target,
self.critic_tau)
|
import copy
import math
import pickle as pkl
import numpy as np
import torch
import os
import sys
import torch.nn as nn
from typing import Tuple
import torch.nn.functional as F
from gym import spaces
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import REINFORCEMENT_LEARNING
from .utils import FrameStack, set_seed_everywhere, eval_mode
from .drq import DRQAgent
from .config import DRQConfig
from .replay_buffer import ReplayBuffer
class MockEnv:
def __init__(self, obs):
self._norm_action_space = spaces.Box(
low=-1.0,
high=1.0,
shape=[1],
dtype=np.float32)
self._observation_space = spaces.Box(
low=0,
high=255,
shape=[9, 84, 84],
dtype=np.uint8
)
self.obs = obs
self._max_episode_steps = 250
self.metadata = {'render.modes': []}
self.reward_range = (-float('inf'), float('inf'))
def step(self, action):
reward = 0.0
done = False
info_state = [0.016243, 3.1355, -0.0052817, -0.01073]
info = dict()
info["internal_state"] = info_state
info["discount"] = 1.0
return (self.obs, reward, done, info)
def seed(self, seed=None):
self._norm_action_space.seed(seed)
self._observation_space.seed(seed)
def reset(self):
return self.obs
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._norm_action_space
def make_env(cfg):
if cfg.env == 'ball_in_cup_catch':
domain_name = 'ball_in_cup'
task_name = 'catch'
elif cfg.env == 'point_mass_easy':
domain_name = 'point_mass'
task_name = 'easy'
else:
domain_name = cfg.env.split('_')[0]
task_name = '_'.join(cfg.env.split('_')[1:])
# per dreamer: https://github.com/danijar/dreamer/blob/02f0210f5991c7710826ca7881f19c64a012290c/wrappers.py#L26
camera_id = 2 if domain_name == 'quadruped' else 0
current_dir = os.path.dirname(os.path.realpath(__file__))
mockobs = pkl.load(open(os.path.join(current_dir, cfg.obs_path), "rb"))
low = np.amin(mockobs)
high = np.amax(mockobs)
mockobs = np.random.randint(low=11, high=228, size=mockobs.shape, dtype=np.uint8)
env = MockEnv(mockobs)
env = FrameStack(env, k=cfg.frame_stack)
env.seed(cfg.seed)
assert env.action_space.low.min() >= -1
assert env.action_space.high.max() <= 1
return env
class Model(BenchmarkModel):
task = REINFORCEMENT_LEARNING.OTHER_RL
# Batch size is not adjustable in this model
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
CANNOT_SET_CUSTOM_OPTIMIZER = True
# this model will cause infinite loop if deep-copied
DEEPCOPY = False
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
self.cfg = DRQConfig()
set_seed_everywhere(self.cfg.seed)
self.env = make_env(self.cfg)
obs_shape = self.env.observation_space.shape
action_shape = self.env.action_space.shape
action_range = [
float(self.env.action_space.low.min()),
float(self.env.action_space.high.max())
]
self.agent = DRQAgent(self.cfg, self.device, obs_shape, action_shape, action_range)
self.replay_buffer = ReplayBuffer(self.env.observation_space.shape,
self.env.action_space.shape,
self.cfg.replay_buffer_capacity,
self.cfg.image_pad, self.device)
self.step = 0
def get_module(self):
obs = self.env.reset()
obs = torch.FloatTensor(obs).to(self.device)
obs = obs.unsqueeze(0)
return self.agent.actor, (obs, )
def set_module(self, new_model):
self.agent.actor = new_model
def train(self):
episode, episode_reward, episode_step, done = 0, 0, 1, True
if True:
obs = self.env.reset()
done = False
episode_reward = 0
episode_step = 0
episode += 1
if self.step < self.cfg.num_seed_steps:
action = self.env.action_space.sample()
else:
with eval_mode(self.agent):
action = self.agent.act(obs, sample=True)
# run training update
if self.step >= self.cfg.num_seed_steps:
for _ in range(self.cfg.num_train_iters):
self.agent.update(self.replay_buffer, None,
self.step)
next_obs, reward, done, info = self.env.step(action)
# allow infinite bootstrap
done = float(done)
done_no_max = 0 if episode_step + 1 == self.env._max_episode_steps else done
episode_reward += reward
self.replay_buffer.add(obs, action, reward, next_obs, done,
done_no_max)
obs = next_obs
episode_step += 1
self.step += 1
def eval(self) -> Tuple[torch.Tensor]:
average_episode_reward = 0
steps = 0
if True:
obs = self.env.reset()
episode_reward = 0
episode_step = 0
with eval_mode(self.agent):
action = self.agent.act(obs, sample=False)
obs, reward, done, info = self.env.step(action)
episode_reward += reward
episode_step += 1
average_episode_reward += episode_reward
steps += 1
average_episode_reward /= float(steps)
return (torch.Tensor(action), )
|
import math
import os
import random
from collections import deque
import numpy as np
import scipy.linalg as sp_la
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from skimage.util.shape import view_as_windows
from torch import distributions as pyd
class eval_mode(object):
def __init__(self, *models):
self.models = models
def __enter__(self):
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(False)
def __exit__(self, *args):
for model, state in zip(self.models, self.prev_states):
model.train(state)
return False
def soft_update_params(net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
def set_seed_everywhere(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def make_dir(*path_parts):
dir_path = os.path.join(*path_parts)
try:
os.mkdir(dir_path)
except OSError:
pass
return dir_path
def tie_weights(src, trg):
assert type(src) == type(trg)
trg.weight = src.weight
trg.bias = src.bias
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
gain = nn.init.calculate_gain('relu')
nn.init.orthogonal_(m.weight.data, gain)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
def mlp(input_dim, hidden_dim, output_dim, hidden_depth, output_mod=None):
if hidden_depth == 0:
mods = [nn.Linear(input_dim, output_dim)]
else:
mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)]
for i in range(hidden_depth - 1):
mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)]
mods.append(nn.Linear(hidden_dim, output_dim))
if output_mod is not None:
mods.append(output_mod)
trunk = nn.Sequential(*mods)
return trunk
def to_np(t):
if t is None:
return None
elif t.nelement() == 0:
return np.array([])
else:
return t.cpu().detach().numpy()
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self._k = k
self._frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=((shp[0] * k,) + shp[1:]),
dtype=env.observation_space.dtype)
self._max_episode_steps = env._max_episode_steps
def reset(self):
obs = self.env.reset()
for _ in range(self._k):
self._frames.append(obs)
return self._get_obs()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._frames.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self):
assert len(self._frames) == self._k
return np.concatenate(list(self._frames), axis=0)
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x):
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y):
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
|
import numpy as np
import kornia
import torch
import torch.nn as nn
import torch.nn.functional as F
class ReplayBuffer(object):
"""Buffer to store environment transitions."""
def __init__(self, obs_shape, action_shape, capacity, image_pad, device):
self.capacity = capacity
self.device = device
self.aug_trans = nn.Sequential(
nn.ReplicationPad2d(image_pad),
kornia.augmentation.RandomCrop((obs_shape[-1], obs_shape[-1])))
self.obses = np.empty((capacity, *obs_shape), dtype=np.uint8)
self.next_obses = np.empty((capacity, *obs_shape), dtype=np.uint8)
self.actions = np.empty((capacity, *action_shape), dtype=np.float32)
self.rewards = np.empty((capacity, 1), dtype=np.float32)
self.not_dones = np.empty((capacity, 1), dtype=np.float32)
self.not_dones_no_max = np.empty((capacity, 1), dtype=np.float32)
self.idx = 0
self.full = False
def __len__(self):
return self.capacity if self.full else self.idx
def add(self, obs, action, reward, next_obs, done, done_no_max):
np.copyto(self.obses[self.idx], obs)
np.copyto(self.actions[self.idx], action)
np.copyto(self.rewards[self.idx], reward)
np.copyto(self.next_obses[self.idx], next_obs)
np.copyto(self.not_dones[self.idx], not done)
np.copyto(self.not_dones_no_max[self.idx], not done_no_max)
self.idx = (self.idx + 1) % self.capacity
self.full = self.full or self.idx == 0
def sample(self, batch_size):
x = self.capacity if self.full else self.idx
idxs = np.random.randint(0,
self.capacity if self.full else self.idx,
size=batch_size)
obses = self.obses[idxs]
next_obses = self.next_obses[idxs]
obses_aug = obses.copy()
next_obses_aug = next_obses.copy()
obses = torch.as_tensor(obses, device=self.device).float()
next_obses = torch.as_tensor(next_obses, device=self.device).float()
obses_aug = torch.as_tensor(obses_aug, device=self.device).float()
next_obses_aug = torch.as_tensor(next_obses_aug,
device=self.device).float()
actions = torch.as_tensor(self.actions[idxs], device=self.device)
rewards = torch.as_tensor(self.rewards[idxs], device=self.device)
not_dones_no_max = torch.as_tensor(self.not_dones_no_max[idxs],
device=self.device)
obses = self.aug_trans(obses)
next_obses = self.aug_trans(next_obses)
obses_aug = self.aug_trans(obses_aug)
next_obses_aug = self.aug_trans(next_obses_aug)
return obses, actions, rewards, next_obses, not_dones_no_max, obses_aug, next_obses_aug
|
import os
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='regnety_120', device=device,
jit=jit, batch_size=batch_size, extra_args=extra_args)
|
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(model_name="resnet50", test=test, device=device, jit=jit,
batch_size=batch_size, weights=models.ResNet50_Weights.IMAGENET1K_V1,
extra_args=extra_args)
|
# Generated by gen_torchvision_benchmark.py
import torch
import torch.optim as optim
import torchvision.models as models
from torch.quantization import quantize_fx
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from typing import Tuple
class Model(BenchmarkModel):
task = COMPUTER_VISION.CLASSIFICATION
# Train batch size: 32
# Source: https://openreview.net/pdf?id=B1Yy1BxCZ
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
if test == "eval" and device != "cpu":
raise NotImplementedError("The eval test only supports CPU.")
if jit and test == "train":
raise NotImplementedError("torchscript operations should only be applied after quantization operations")
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
self.model = models.resnet50().to(self.device)
self.example_inputs = (torch.randn((self.batch_size, 3, 224, 224)).to(self.device),)
self.prep_qat_train()
if self.test == "eval":
self.prep_qat_eval()
self.optimizer = None
def prep_qat_train(self):
qconfig_dict = {"": torch.quantization.get_default_qat_qconfig('fbgemm')}
self.model.train()
self.model = quantize_fx.prepare_qat_fx(self.model, qconfig_dict, self.example_inputs)
def get_module(self):
return self.model, self.example_inputs
def prep_qat_eval(self):
self.model = quantize_fx.convert_fx(self.model)
self.model.eval()
def train(self):
if self.get_optimizer() is None:
self.set_optimizer(optim.Adam(self.model.parameters()))
loss = torch.nn.CrossEntropyLoss()
self.optimizer.zero_grad()
pred = self.model(*self.example_inputs)
y = torch.empty(pred.shape[0], dtype=torch.long, device=self.device).random_(pred.shape[1])
loss(pred, y).backward()
self.optimizer.step()
def eval(self) -> Tuple[torch.Tensor]:
model = self.model
example_inputs = self.example_inputs
example_inputs = example_inputs[0][0].unsqueeze(0)
out = model(example_inputs)
return (out, )
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.DETECTION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(variant="COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml", test=test, device=device,
jit=jit, batch_size=batch_size, extra_args=extra_args)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
"""
https://github.com/phlippe/uvadlc_notebooks_benchmarking/blob/main/PyTorch/Tutorial5_Inception_ResNet_DenseNet.py
"""
from types import SimpleNamespace
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.model import BenchmarkModel
import torch.nn as nn
import torch
import torch.optim as optim
import torch.utils.data as data
class ResNetBlock(nn.Module):
def __init__(self, c_in, act_fn, subsample=False, c_out=-1):
"""
Inputs:
c_in - Number of input features
act_fn - Activation class constructor (e.g. nn.ReLU)
subsample - If True, we want to apply a stride inside the block and reduce the output shape by 2 in height and width
c_out - Number of output features. Note that this is only relevant if subsample is True, as otherwise, c_out = c_in
"""
super().__init__()
if not subsample:
c_out = c_in
# Network representing F
self.net = nn.Sequential(
# No bias needed as the Batch Norm handles it
nn.Conv2d(c_in, c_out, kernel_size=3, padding=1,
stride=1 if not subsample else 2, bias=False),
nn.BatchNorm2d(c_out),
act_fn(),
nn.Conv2d(c_out, c_out, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(c_out)
)
# 1x1 convolution with stride 2 means we take the upper left value, and transform it to new output size
self.downsample = nn.Conv2d(
c_in, c_out, kernel_size=1, stride=2) if subsample else None
self.act_fn = act_fn()
def forward(self, x):
z = self.net(x)
if self.downsample is not None:
x = self.downsample(x)
out = z + x
out = self.act_fn(out)
return out
class ResNetModel(nn.Module):
def __init__(self, num_classes=10, num_blocks=[3, 3, 3], c_hidden=[16, 32, 64], act_fn_name="relu", **kwargs):
"""
Inputs:
num_classes - Number of classification outputs (10 for CIFAR10)
num_blocks - List with the number of ResNet blocks to use. The first block of each group uses downsampling, except the first.
c_hidden - List with the hidden dimensionalities in the different blocks. Usually multiplied by 2 the deeper we go.
act_fn_name - Name of the activation function to use, looked up in "act_fn_by_name"
block_name - Name of the ResNet block, looked up in "resnet_blocks_by_name"
"""
super().__init__()
act_fn_by_name = {
"tanh": nn.Tanh,
"relu": nn.ReLU,
"leakyrelu": nn.LeakyReLU,
"gelu": nn.GELU
}
self.hparams = SimpleNamespace(num_classes=num_classes,
c_hidden=c_hidden,
num_blocks=num_blocks,
act_fn_name=act_fn_name,
act_fn=act_fn_by_name[act_fn_name],
block_class=ResNetBlock)
self._create_network()
self._init_params()
def _create_network(self):
c_hidden = self.hparams.c_hidden
self.input_net = nn.Sequential(
nn.Conv2d(3, c_hidden[0], kernel_size=3,
padding=1, bias=False),
nn.BatchNorm2d(c_hidden[0]),
self.hparams.act_fn()
)
# Creating the ResNet blocks
blocks = []
for block_idx, block_count in enumerate(self.hparams.num_blocks):
for bc in range(block_count):
# Subsample the first block of each group, except the very first one.
subsample = (bc == 0 and block_idx > 0)
blocks.append(
self.hparams.block_class(c_in=c_hidden[block_idx if not subsample else (block_idx - 1)],
act_fn=self.hparams.act_fn,
subsample=subsample,
c_out=c_hidden[block_idx])
)
self.blocks = nn.Sequential(*blocks)
# Mapping to classification output
self.output_net = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(c_hidden[-1], self.hparams.num_classes)
)
def _init_params(self):
# Based on our discussion in Tutorial 4, we should initialize the convolutions according to the activation function
# Fan-out focuses on the gradient distribution, and is commonly used in ResNets
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity=self.hparams.act_fn_name)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.input_net(x)
x = self.blocks(x)
x = self.output_net(x)
return x
class Model(BenchmarkModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 128
DEFAULT_EVAL_BSIZE = 128
def __init__(self, test, device, jit=False, batch_size=DEFAULT_TRAIN_BSIZE, extra_args=[]):
super().__init__(test=test, device=device, jit=jit,
batch_size=batch_size, extra_args=extra_args)
self.model = ResNetModel()
self.model.to(device)
self.example_inputs = (
torch.randn((self.batch_size, 3, 32, 32), device=self.device),
)
self.example_target = torch.randint(0, 10, (self.batch_size,), device=self.device)
dataset = data.TensorDataset(self.example_inputs[0], self.example_target)
self.optimizer = optim.SGD(self.model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4)
self.criterion = nn.CrossEntropyLoss()
(self.images, ) = self.example_inputs
def get_module(self):
return self.model, self.example_inputs
def train(self):
self.model.train()
targets = self.example_target
output = self.model(self.images)
loss = self.criterion(output, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
def eval(self):
self.model.eval()
with torch.no_grad():
out=self.model(self.images)
return (out,)
|
import torch
import os
import itertools
import random
import itertools
from pathlib import Path
from typing import Tuple
from detectron2.checkpoint import DetectionCheckpointer
# TorchBench imports
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(CURRENT_DIR.parent.parent, "data", ".data", "coco2017-minimal")
assert os.path.exists(DATA_DIR), "Couldn't find coco2017 minimal data dir, please run install.py again."
if not 'DETECTRON2_DATASETS' in os.environ:
os.environ['DETECTRON2_DATASETS'] = DATA_DIR
from detectron2.config import instantiate
from detectron2 import model_zoo
from detectron2.utils.events import EventStorage
from torch.utils._pytree import tree_map
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
def prefetch(dataloader, device, precision="fp32"):
r = []
dtype = torch.float16 if precision == "fp16" else torch.float32
for batch in dataloader:
r.append(tree_map(lambda x: x.to(device, dtype=dtype) if isinstance(x, torch.Tensor) else x, batch))
return r
class Model(BenchmarkModel):
task = COMPUTER_VISION.DETECTION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
# Skip correctness check, because the output tensor can't be verified using
# cosine similarity or torch.close()
SKIP_CORRECTNESS_CHECK = True
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
model_cfg = model_zoo.get_config("common/models/mask_rcnn_fpn.py").model
data_cfg = model_zoo.get_config("common/data/coco.py").dataloader
if test == "train":
# use a mini dataset
data_cfg.train.dataset.names = "coco_2017_val_100"
data_cfg.train.total_batch_size = self.batch_size
self.model = instantiate(model_cfg).to(self.device)
train_loader = instantiate(data_cfg.train)
self.example_inputs = prefetch(itertools.islice(train_loader, 100), self.device)
self.optimizer = torch.optim.SGD(self.model.parameters(), 0.)
elif test == "eval":
data_cfg.test.dataset.names = "coco_2017_val_100"
data_cfg.test.batch_size = self.batch_size
self.model = instantiate(model_cfg).to(self.device)
# load model from checkpoint
DetectionCheckpointer(self.model).load(self.model_file)
self.model.eval()
test_loader = instantiate(data_cfg.test)
self.example_inputs = prefetch(itertools.islice(test_loader, 100), self.device)
self.NUM_BATCHES = len(self.example_inputs)
def get_module(self):
return self.model, (self.example_inputs[0], )
def train(self):
self.model.train()
with EventStorage():
for idx in range(self.NUM_BATCHES):
losses = self.model(self.example_inputs[idx])
loss = sum(losses.values())
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
def eval(self) -> Tuple[torch.Tensor]:
self.model.eval()
with torch.no_grad():
for idx in range(self.NUM_BATCHES):
out = self.model(self.example_inputs[idx])
# retrieve output tensors
outputs = []
for item in out:
fields = list(map(lambda x: list(x.get_fields().values()), item.values()))
for boxes in fields:
tensor_box = list(filter(lambda x: isinstance(x, torch.Tensor), boxes))
outputs.extend(tensor_box)
return tuple(outputs)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
"""General-purpose training script for image-to-image translation.
This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and
different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
It first creates model, dataset, and visualizer given the option.
It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.
The script supports continue/resume training. Use '--continue_train' to resume your previous training.
Example:
Train a CycleGAN model:
python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Train a pix2pix model:
python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/train_options.py for more training options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import time
from .options.train_options import TrainOptions
from .data import create_dataset
from .models import create_model
import torch
from torch.utils._pytree import tree_map
from .util.visualizer import Visualizer
def prefetch_device(example_inputs, device):
if isinstance(example_inputs, torch.Tensor):
return example_inputs.to(device=device)
elif isinstance(example_inputs, (tuple, list, dict)):
return tree_map(lambda x: prefetch_device(x, device), example_inputs)
elif isinstance(example_inputs, (str, int, float)):
return example_inputs
assert False, f"Unsupported data type: {type(example_inputs)}"
def prepare_training_loop(args):
new_dataset = []
opt = TrainOptions().parse(args) # get training options
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
# prefetch the dataset to the device
for data in dataset:
new_dataset.append(prefetch_device(data, opt.tb_device))
dataset = new_dataset
dataset_size = len(dataset) # get the number of images in the dataset.
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
def training_loop(niteration):
total_iters = 0 # the total number of training iterations
if niteration is None:
niteration = opt.n_epochs + opt.n_epochs_decay + 1
for epoch in range(opt.epoch_count, niteration): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_iters += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data) # unpack data from dataset and apply preprocessing
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
# only run 1 batch in torchbench
break
iter_data_time = time.time()
# only run 1 iter in torchbench
break
model.update_learning_rate() # update learning rates at the end of every epoch.
return training_loop
|
#!/usr/bin/env python
import torch
import os
from pathlib import Path
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from typing import Tuple
from torchbenchmark import DATA_PATH
from .train_cyclegan import prepare_training_loop
from .test_cyclegan import get_model
def _create_data_dir(suffix):
data_dir = Path(__file__).parent.joinpath(".data", suffix)
data_dir.mkdir(parents=True, exist_ok=True)
return data_dir
class Model(BenchmarkModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
# TODO: Customizing the optimizer is nontrivial, perhaps a next step.
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
checkpoints_dir = _create_data_dir("checkpoints")
results_dir = _create_data_dir("results")
checkpoints_arg = f"--checkpoints_dir {checkpoints_dir}"
results_arg = f"--results_dir {results_dir}"
data_root = os.path.join(DATA_PATH, "pytorch_CycleGAN_and_pix2pix_inputs")
device_arg = ""
if self.device == "cpu":
device_arg = "--gpu_ids -1"
elif self.device == "cuda":
device_arg = "--gpu_ids 0"
if self.test == "train":
train_args = f"--tb_device {self.device} --dataroot {data_root}/datasets/horse2zebra --name horse2zebra --model cycle_gan --display_id 0 --n_epochs 3 " + \
f"--n_epochs_decay 3 {device_arg} {checkpoints_arg}"
self.training_loop = prepare_training_loop(train_args.split(' '))
args = f"--dataroot {data_root}/datasets/horse2zebra/testA --name horse2zebra_pretrained --model test " + \
f"--no_dropout {device_arg} {checkpoints_arg} {results_arg}"
self.model, self.input = get_model(args, self.device)
def get_module(self):
return self.model, self.input
def set_train(self):
# another model instance is used for training
# and the train mode is on by default
pass
def train(self):
# the training process is not patched to use scripted models
# training_loop has its own count logic inside. It actually runs 7 epochs
# (with each 'epoch' being limited to a small set of data)
# it would be more in symmetry with the rest of torchbenchmark if it ran just one step
# rather than 7 epochs, but changing it now would potentially cause
# discontinuity with existing/historical measurement
self.training_loop(None)
def eval(self) -> Tuple[torch.Tensor]:
model, example_inputs = self.get_module()
out = model(*example_inputs)
return (out, )
|
"""General-purpose test script for image-to-image translation.
Once you have trained your model with train.py, you can use this script to test the model.
It will load a saved model from '--checkpoints_dir' and save the results to '--results_dir'.
It first creates model and dataset given the option. It will hard-code some parameters.
It then runs inference for '--num_test' images and save results to an HTML file.
Example (You need to train models first or download pre-trained models from our website):
Test a CycleGAN model (both sides):
python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Test a CycleGAN model (one side only):
python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
The option '--model test' is used for generating CycleGAN results only for one side.
This option will automatically set '--dataset_mode single', which only loads the images from one set.
On the contrary, using '--model cycle_gan' requires loading and generating results in both directions,
which is sometimes unnecessary. The results will be saved at ./results/.
Use '--results_dir <directory_path_to_save_result>' to specify the results directory.
Test a pix2pix model:
python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/test_options.py for more test options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import os
from .options.test_options import TestOptions
from .data import create_dataset
from .models import create_model
from .util.visualizer import save_images
from .util import html
import torch
from pathlib import Path
def get_model(args, device):
opt = TestOptions().parse(args.split(' '))
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
model = create_model(opt) # create a model given opt.model and other options
if len(opt.gpu_ids) > 0:
# When opt.gpu_ids > 0, netG is converted to torch.nn.DataParallel
model = model.netG.module
else:
model = model.netG
root = str(Path(__file__).parent)
data = torch.load(f'{root}/example_input.pt')
input = data['A'].to(device)
return model, (input,)
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
model = create_model(opt) # create a model given opt.model and other options
# model.setup(opt) # regular setup: load and print networks; create schedulers
# create a website
web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory
if opt.load_iter > 0: # load_iter is 0 by default
web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter)
print('creating web directory', web_dir)
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
# test with eval mode. This only affects layers like batchnorm and dropout.
# For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
# For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
if opt.eval:
model.eval()
for i, data in enumerate(dataset):
if i >= opt.num_test: # only apply our model to opt.num_test images.
break
model.set_input(data) # unpack data from data loader
torch.save(data, 'example_input.pt')
model.test() # run inference
visuals = model.get_current_visuals() # get image results
img_path = model.get_image_paths() # get image paths
if i % 5 == 0: # save images to an HTML file
print('processing (%04d)-th image... %s' % (i, img_path))
save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
webpage.save() # save the HTML
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""This class includes test options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser) # define shared options
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
# rewrite devalue values
parser.set_defaults(model='test')
# To avoid cropping, the load_size should be the same as crop_size
parser.set_defaults(load_size=parser.get_default('crop_size'))
self.isTrain = False
return parser
|
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
"""This class includes training options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
# add torchbench options
parser.add_argument('--tb_device', type=str, required=True, help="TorchBench device")
# visdom and HTML visualization parameters
parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
# network saving and loading parameters
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
# training parameters
parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
self.isTrain = True
return parser
|
"""This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
|
import argparse
import os
from ..util import util
import torch
from ..models import get_option_setter
from ..data import get_option_setter as get_option_setter_data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=0, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self, args=None):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args(args)
# modify model-related parser options
model_name = opt.model
model_option_setter = get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args(args) # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = get_option_setter_data(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args(args)
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self, args=None):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options(args)
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
|
import random
import torch
class ImagePool():
"""This class implements an image buffer that stores previously generated images.
This buffer enables us to update discriminators using a history of generated images
rather than the ones produced by the latest generators.
"""
def __init__(self, pool_size):
"""Initialize the ImagePool class
Parameters:
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
"""
self.pool_size = pool_size
if self.pool_size > 0: # create an empty pool
self.num_imgs = 0
self.images = []
def query(self, images):
"""Return an image from the pool.
Parameters:
images: the latest generated images from the generator
Returns images from the buffer.
By 50/100, the buffer will return input images.
By 50/100, the buffer will return images previously stored in the buffer,
and insert the current images to the buffer.
"""
if self.pool_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else: # by another 50% chance, the buffer will return the current image
return_images.append(image)
return_images = torch.cat(return_images, 0) # collect all the images and return
return return_images
|
"""This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
|
import dominate
from dominate.tags import meta, h3, table, tr, td, p, a, img, br
import os
class HTML:
"""This HTML class allows us to save images and write texts into a single HTML file.
It consists of functions such as <add_header> (add a text header to the HTML file),
<add_images> (add a row of images to the HTML file), and <save> (save the HTML to the disk).
It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
"""
def __init__(self, web_dir, title, refresh=0):
"""Initialize the HTML classes
Parameters:
web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/
title (str) -- the webpage name
refresh (int) -- how often the website refresh itself; if 0; no refreshing
"""
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if refresh > 0:
with self.doc.head:
meta(http_equiv="refresh", content=str(refresh))
def get_image_dir(self):
"""Return the directory that stores images"""
return self.img_dir
def add_header(self, text):
"""Insert a header to the HTML file
Parameters:
text (str) -- the header text
"""
with self.doc:
h3(text)
def add_images(self, ims, txts, links, width=400):
"""add images to the HTML file
Parameters:
ims (str list) -- a list of image paths
txts (str list) -- a list of image names shown on the website
links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
"""
self.t = table(border=1, style="table-layout: fixed;") # Insert a table
self.doc.add(self.t)
with self.t:
with tr():
for im, txt, link in zip(ims, txts, links):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
with a(href=os.path.join('images', link)):
img(style="width:%dpx" % width, src=os.path.join('images', im))
br()
p(txt)
def save(self):
"""save the current content to the HMTL file"""
html_file = '%s/index.html' % self.web_dir
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
if __name__ == '__main__': # we show an example usage here.
html = HTML('web/', 'test_html')
html.add_header('hello world')
ims, txts, links = [], [], []
for n in range(4):
ims.append('image_%d.png' % n)
txts.append('text_%d' % n)
links.append('image_%d.png' % n)
html.add_images(ims, txts, links)
html.save()
|
"""This package includes a miscellaneous collection of useful helper functions."""
|
from __future__ import print_function
import os
import tarfile
import requests
from warnings import warn
from zipfile import ZipFile
from bs4 import BeautifulSoup
from os.path import abspath, isdir, join, basename
class GetData(object):
"""A Python script for downloading CycleGAN or pix2pix datasets.
Parameters:
technique (str) -- One of: 'cyclegan' or 'pix2pix'.
verbose (bool) -- If True, print additional information.
Examples:
>>> from util.get_data import GetData
>>> gd = GetData(technique='cyclegan')
>>> new_data_path = gd.get(save_path='./datasets') # options will be displayed.
Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh'
and 'scripts/download_cyclegan_model.sh'.
"""
def __init__(self, technique='cyclegan', verbose=True):
url_dict = {
'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/',
'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets'
}
self.url = url_dict.get(technique.lower())
self._verbose = verbose
def _print(self, text):
if self._verbose:
print(text)
@staticmethod
def _get_options(r):
soup = BeautifulSoup(r.text, 'lxml')
options = [h.text for h in soup.find_all('a', href=True)
if h.text.endswith(('.zip', 'tar.gz'))]
return options
def _present_options(self):
r = requests.get(self.url)
options = self._get_options(r)
print('Options:\n')
for i, o in enumerate(options):
print("{0}: {1}".format(i, o))
choice = input("\nPlease enter the number of the "
"dataset above you wish to download:")
return options[int(choice)]
def _download_data(self, dataset_url, save_path):
if not isdir(save_path):
os.makedirs(save_path)
base = basename(dataset_url)
temp_save_path = join(save_path, base)
with open(temp_save_path, "wb") as f:
r = requests.get(dataset_url)
f.write(r.content)
if base.endswith('.tar.gz'):
obj = tarfile.open(temp_save_path)
elif base.endswith('.zip'):
obj = ZipFile(temp_save_path, 'r')
else:
raise ValueError("Unknown File Type: {0}.".format(base))
self._print("Unpacking Data...")
obj.extractall(save_path)
obj.close()
os.remove(temp_save_path)
def get(self, save_path, dataset=None):
"""
Download a dataset.
Parameters:
save_path (str) -- A directory to save the data to.
dataset (str) -- (optional). A specific dataset to download.
Note: this must include the file extension.
If None, options will be presented for you
to choose from.
Returns:
save_path_full (str) -- the absolute path to the downloaded data.
"""
if dataset is None:
selected_dataset = self._present_options()
else:
selected_dataset = dataset
save_path_full = join(save_path, selected_dataset.split('.')[0])
if isdir(save_path_full):
warn("\n'{0}' already exists. Voiding Download.".format(
save_path_full))
else:
self._print('Downloading Data...')
url = "{0}/{1}".format(self.url, selected_dataset)
self._download_data(url, save_path=save_path)
return abspath(save_path_full)
|
import numpy as np
import os
import sys
import ntpath
import time
from . import util, html
from subprocess import Popen, PIPE
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
self.port = opt.display_port
self.saved = False
if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>
import visdom
self.ncols = opt.display_ncols
self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
if not self.vis.check_connection():
self.create_visdom_connections()
if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
util.mkdirs([self.web_dir, self.img_dir])
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def reset(self):
"""Reset the self.saved status"""
self.saved = False
def create_visdom_connections(self):
"""If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
print('Command: %s' % cmd)
Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
def display_current_results(self, visuals, epoch, save_result):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
if self.display_id > 0: # show images in the browser using visdom
ncols = self.ncols
if ncols > 0: # show all the images in one visdom panel
ncols = min(ncols, len(visuals))
h, w = next(iter(visuals.values())).shape[:2]
table_css = """<style>
table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}
table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
</style>""" % (w, h) # create a table css
# create a table of images.
title = self.name
label_html = ''
label_html_row = ''
images = []
idx = 0
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
label_html_row += '<td>%s</td>' % label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if idx % ncols == 0:
label_html += '<tr>%s</tr>' % label_html_row
label_html_row = ''
white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
while idx % ncols != 0:
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if label_html_row != '':
label_html += '<tr>%s</tr>' % label_html_row
try:
self.vis.images(images, nrow=ncols, win=self.display_id + 1,
padding=2, opts=dict(title=title + ' images'))
label_html = '<table>%s</table>' % label_html
self.vis.text(table_css + label_html, win=self.display_id + 2,
opts=dict(title=title + ' labels'))
except VisdomExceptionBase:
self.create_visdom_connections()
else: # show each image in a separate visdom panel;
idx = 1
try:
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),
win=self.display_id + idx)
idx += 1
except VisdomExceptionBase:
self.create_visdom_connections()
if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
self.saved = True
# save images to the disk
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims, txts, links = [], [], []
for label, image_numpy in visuals.items():
image_numpy = util.tensor2im(image)
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
def plot_current_losses(self, epoch, counter_ratio, losses):
"""display the current losses on visdom display: dictionary of error labels and values
Parameters:
epoch (int) -- current epoch
counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
"""
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
try:
self.vis.line(
X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
Y=np.array(self.plot_data['Y']),
opts={
'title': self.name + ' loss over time',
'legend': self.plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id)
except VisdomExceptionBase:
self.create_visdom_connections()
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
|
from .base_model import BaseModel
from . import networks
class TestModel(BaseModel):
""" This TesteModel can be used to generate CycleGAN results for only one direction.
This model will automatically set '--dataset_mode single', which only loads the images from one collection.
See the test instruction for more details.
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
The model can only be used during test time. It requires '--dataset_mode single'.
You need to specify the network using the option '--model_suffix'.
"""
assert not is_train, 'TestModel cannot be used during training time'
parser.set_defaults(dataset_mode='single')
parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
assert(not opt.isTrain)
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = []
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real', 'fake']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
self.model_names = ['G' + opt.model_suffix] # only generator is needed.
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
# assigns the model to self.netG_[suffix] so that it can be loaded
# please see <BaseModel.load_networks>
setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self.
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
We need to use 'single_dataset' dataset mode. It only load images from one domain.
"""
self.real = input['A'].to(self.device)
self.image_paths = input['A_paths']
def forward(self):
"""Run forward pass."""
self.fake = self.netG(self.real) # G(real)
def optimize_parameters(self):
"""No optimization for test model."""
pass
|
"""Model class template
This module provides a template for users to implement custom models.
You can specify '--model template' to use this model.
The class name should be consistent with both the filename and its model option.
The filename should be <model>_dataset.py
The class name should be <Model>Dataset.py
It implements a simple image-to-image translation baseline based on regression loss.
Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
min_<netG> ||netG(data_A) - data_B||_1
You need to implement the following functions:
<modify_commandline_options>: Add model-specific options and rewrite default values for existing options.
<__init__>: Initialize this model class.
<set_input>: Unpack input data and perform data pre-processing.
<forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
<optimize_parameters>: Update network weights; it will be called in every training iteration.
"""
import torch
from .base_model import BaseModel
from . import networks
class TemplateModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new model-specific options and rewrite default values for existing options.
Parameters:
parser -- the option parser
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
if is_train:
parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
# specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
self.loss_names = ['loss_G']
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
self.visual_names = ['data_A', 'data_B', 'output']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
self.model_names = ['G']
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
if self.isTrain: # only defined during training time
# define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
# We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
self.criterionLoss = torch.nn.L1Loss()
# define and initialize optimizers. You can define one optimizer for each network.
# If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = [self.optimizer]
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
AtoB = self.opt.direction == 'AtoB' # use <direction> to swap data_A and data_B
self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A
self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B
self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
def forward(self):
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
self.output = self.netG(self.data_A) # generate output image given the input data_A
def backward(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# caculate the intermediate results if necessary; here self.output has been computed during function <forward>
# calculate loss given the input and intermediate results
self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
def optimize_parameters(self):
"""Update network weights; it will be called in every training iteration."""
self.forward() # first call forward to calculate intermediate results
self.optimizer.zero_grad() # clear network G's existing gradients
self.backward() # calculate gradients for network G
self.optimizer.step() # update gradients for network G
|
import torch
import itertools
from ..util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
class CycleGANModel(BaseModel):
"""
This class implements the CycleGAN model, for learning image-to-image translation without paired data.
The model training requires '--dataset_mode unaligned' dataset.
By default, it uses a '--netG resnet_9blocks' ResNet generator,
a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
and a least-square GANs objective ('--gan_mode lsgan').
CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
A (source domain), B (target domain).
Generators: G_A: A -> B; G_B: B -> A.
Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
Dropout is not used in the original CycleGAN paper.
"""
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
return parser
def __init__(self, opt):
"""Initialize the CycleGAN class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
visual_names_A = ['real_A', 'fake_B', 'rec_A']
visual_names_B = ['real_B', 'fake_A', 'rec_B']
if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
visual_names_A.append('idt_B')
visual_names_B.append('idt_A')
self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B']
# define networks (both Generators and discriminators)
# The naming is different from those used in the paper.
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define discriminators
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
assert(opt.input_nc == opt.output_nc)
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG_A(self.real_A) # G_A(A)
self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
self.fake_A = self.netG_B(self.real_B) # G_B(B)
self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
def backward_D_basic(self, netD, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
return loss_D
def backward_D_A(self):
"""Calculate GAN loss for discriminator D_A"""
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
"""Calculate GAN loss for discriminator D_B"""
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
"""Calculate the loss for generators G_A and G_B"""
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed: ||G_A(B) - B||
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed: ||G_B(A) - A||
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
# GAN loss D_B(G_B(B))
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
# Forward cycle loss || G_B(G_A(A)) - A||
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Backward cycle loss || G_A(G_B(B)) - B||
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# combined loss and calculate gradients
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
self.loss_G.backward()
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # compute fake images and reconstruction images.
# G_A and G_B
self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
self.backward_G() # calculate gradients for G_A and G_B
self.optimizer_G.step() # update G_A and G_B's weights
# D_A and D_B
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.backward_D_A() # calculate gradients for D_A
self.backward_D_B() # calculate graidents for D_B
self.optimizer_D.step() # update D_A and D_B's weights
|
import torch
from .base_model import BaseModel
from . import networks
class Pix2PixModel(BaseModel):
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
self.model_names = ['G', 'D']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap images in domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG(self.real_A) # G(A)
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# Fake; stop backprop to the generator by detaching fake_B
fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
pred_fake = self.netD(fake_AB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# Real
real_AB = torch.cat((self.real_A, self.real_B), 1)
pred_real = self.netD(real_AB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# combine loss and calculate gradients
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
|
"""This package contains modules related to objective functions, optimizations, and network architectures.
To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
You need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate loss, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
In the function <__init__>, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): define networks used in our training.
-- self.visual_names (str list): specify the images that you want to display and save.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
Now you can use the model class by specifying flag '--model dummy'.
See our template model class 'template_model.py' for more details.
"""
import importlib
from .base_model import BaseModel
def find_model_using_name(model_name):
"""Import the module "models/[model_name]_model.py".
In the file, the class called DatasetNameModel() will
be instantiated. It has to be a subclass of BaseModel,
and it is case-insensitive.
"""
model_filename = f'{__package__}.{model_name}_model'
modellib = importlib.import_module(model_filename)
model = None
target_model_name = model_name.replace('_', '') + 'model'
for name, cls in modellib.__dict__.items():
if name.lower() == target_model_name.lower() \
and issubclass(cls, BaseModel):
model = cls
if model is None:
print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
exit(0)
return model
def get_option_setter(model_name):
"""Return the static method <modify_commandline_options> of the model class."""
model_class = find_model_using_name(model_name)
return model_class.modify_commandline_options
def create_model(opt):
"""Create a model given the option.
This function warps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from models import create_model
>>> model = create_model(opt)
"""
model = find_model_using_name(opt.model)
instance = model(opt)
return instance
|
from .pix2pix_model import Pix2PixModel
import torch
from skimage import color # used for lab2rgb
import numpy as np
class ColorizationModel(Pix2PixModel):
"""This is a subclass of Pix2PixModel for image colorization (black & white image -> colorful images).
The model training requires '-dataset_model colorization' dataset.
It trains a pix2pix model, mapping from L channel to ab channels in Lab color space.
By default, the colorization dataset will automatically set '--input_nc 1' and '--output_nc 2'.
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
By default, we use 'colorization' dataset for this model.
See the original pix2pix paper (https://arxiv.org/pdf/1611.07004.pdf) and colorization results (Figure 9 in the paper)
"""
Pix2PixModel.modify_commandline_options(parser, is_train)
parser.set_defaults(dataset_mode='colorization')
return parser
def __init__(self, opt):
"""Initialize the class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
For visualization, we set 'visual_names' as 'real_A' (input real image),
'real_B_rgb' (ground truth RGB image), and 'fake_B_rgb' (predicted RGB image)
We convert the Lab image 'real_B' (inherited from Pix2pixModel) to a RGB image 'real_B_rgb'.
we convert the Lab image 'fake_B' (inherited from Pix2pixModel) to a RGB image 'fake_B_rgb'.
"""
# reuse the pix2pix model
Pix2PixModel.__init__(self, opt)
# specify the images to be visualized.
self.visual_names = ['real_A', 'real_B_rgb', 'fake_B_rgb']
def lab2rgb(self, L, AB):
"""Convert an Lab tensor image to a RGB numpy output
Parameters:
L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array)
AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array)
Returns:
rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array)
"""
AB2 = AB * 110.0
L2 = (L + 1.0) * 50.0
Lab = torch.cat([L2, AB2], dim=1)
Lab = Lab[0].data.cpu().float().numpy()
Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0))
rgb = color.lab2rgb(Lab) * 255
return rgb
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
self.real_B_rgb = self.lab2rgb(self.real_A, self.real_B)
self.fake_B_rgb = self.lab2rgb(self.real_A, self.fake_B)
|
import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this function, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): define networks used in our training.
-- self.visual_names (str list): specify the images that you want to display and save.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
self.load_networks(load_suffix)
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, net):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
modules = dict(net.named_modules())
for key in list(state_dict.keys()):
path, field = key.rsplit('.', 1)
if modules[path].__class__.__name__.startswith('InstanceNorm'):
if field in ['running_mean', 'running_var']:
if getattr(modules[path], field) is None:
state_dict.pop(key)
if field == 'num_batches_tracked':
state_dict.pop(key)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
self.__patch_instance_norm_state_dict(state_dict, net)
net.load_state_dict(state_dict)
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
|
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x): return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
|
# Simple script to make sure basic usage
# such as training, testing, saving and loading
# runs without errors.
import os
def run(command):
print(command)
exit_status = os.system(command)
if exit_status > 0:
exit(1)
if __name__ == '__main__':
# download mini datasets
if not os.path.exists('./datasets/mini'):
run('bash ./datasets/download_cyclegan_dataset.sh mini')
if not os.path.exists('./datasets/mini_pix2pix'):
run('bash ./datasets/download_cyclegan_dataset.sh mini_pix2pix')
# pretrained cyclegan model
if not os.path.exists('./checkpoints/horse2zebra_pretrained/latest_net_G.pth'):
run('bash ./scripts/download_cyclegan_model.sh horse2zebra')
run('python test.py --model test --dataroot ./datasets/mini --name horse2zebra_pretrained --no_dropout --num_test 1 --no_dropout')
# pretrained pix2pix model
if not os.path.exists('./checkpoints/facades_label2photo_pretrained/latest_net_G.pth'):
run('bash ./scripts/download_pix2pix_model.sh facades_label2photo')
if not os.path.exists('./datasets/facades'):
run('bash ./datasets/download_pix2pix_dataset.sh facades')
run('python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained --num_test 1')
# cyclegan train/test
run('python train.py --model cycle_gan --name temp_cyclegan --dataroot ./datasets/mini --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --print_freq 1 --display_id -1')
run('python test.py --model test --name temp_cyclegan --dataroot ./datasets/mini --num_test 1 --model_suffix "_A" --no_dropout')
# pix2pix train/test
run('python train.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 5 --save_latest_freq 10 --display_id -1')
run('python test.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --num_test 1')
# template train/test
run('python train.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --display_id -1')
run('python test.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --num_test 1')
# colorization train/test (optional)
if not os.path.exists('./datasets/mini_colorization'):
run('bash ./datasets/download_cyclegan_dataset.sh mini_colorization')
run('python train.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 5 --display_id -1')
run('python test.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --num_test 1')
|
# The following code is modified from https://github.com/shelhamer/clockwork-fcn
import sys
import os
import glob
import numpy as np
from PIL import Image
class cityscapes:
def __init__(self, data_path):
# data_path something like /data2/cityscapes
self.dir = data_path
self.classes = ['road', 'sidewalk', 'building', 'wall', 'fence',
'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain',
'sky', 'person', 'rider', 'car', 'truck',
'bus', 'train', 'motorcycle', 'bicycle']
self.mean = np.array((72.78044, 83.21195, 73.45286), dtype=np.float32)
# import cityscapes label helper and set up label mappings
sys.path.insert(0, '{}/scripts/helpers/'.format(self.dir))
labels = __import__('labels')
self.id2trainId = {label.id: label.trainId for label in labels.labels} # dictionary mapping from raw IDs to train IDs
self.trainId2color = {label.trainId: label.color for label in labels.labels} # dictionary mapping train IDs to colors as 3-tuples
def get_dset(self, split):
'''
List images as (city, id) for the specified split
TODO(shelhamer) generate splits from cityscapes itself, instead of
relying on these separately made text files.
'''
if split == 'train':
dataset = open('{}/ImageSets/segFine/train.txt'.format(self.dir)).read().splitlines()
else:
dataset = open('{}/ImageSets/segFine/val.txt'.format(self.dir)).read().splitlines()
return [(item.split('/')[0], item.split('/')[1]) for item in dataset]
def load_image(self, split, city, idx):
im = Image.open('{}/leftImg8bit_sequence/{}/{}/{}_leftImg8bit.png'.format(self.dir, split, city, idx))
return im
def assign_trainIds(self, label):
"""
Map the given label IDs to the train IDs appropriate for training
Use the label mapping provided in labels.py from the cityscapes scripts
"""
label = np.array(label, dtype=np.float32)
if sys.version_info[0] < 3:
for k, v in self.id2trainId.iteritems():
label[label == k] = v
else:
for k, v in self.id2trainId.items():
label[label == k] = v
return label
def load_label(self, split, city, idx):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
"""
label = Image.open('{}/gtFine/{}/{}/{}_gtFine_labelIds.png'.format(self.dir, split, city, idx))
label = self.assign_trainIds(label) # get proper labels for eval
label = np.array(label, dtype=np.uint8)
label = label[np.newaxis, ...]
return label
def preprocess(self, im):
"""
Preprocess loaded image (by load_image) for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
in_ = np.array(im, dtype=np.float32)
in_ = in_[:, :, ::-1]
in_ -= self.mean
in_ = in_.transpose((2, 0, 1))
return in_
def palette(self, label):
'''
Map trainIds to colors as specified in labels.py
'''
if label.ndim == 3:
label = label[0]
color = np.empty((label.shape[0], label.shape[1], 3))
if sys.version_info[0] < 3:
for k, v in self.trainId2color.iteritems():
color[label == k, :] = v
else:
for k, v in self.trainId2color.items():
color[label == k, :] = v
return color
def make_boundaries(label, thickness=None):
"""
Input is an image label, output is a numpy array mask encoding the boundaries of the objects
Extract pixels at the true boundary by dilation - erosion of label.
Don't just pick the void label as it is not exclusive to the boundaries.
"""
assert(thickness is not None)
import skimage.morphology as skm
void = 255
mask = np.logical_and(label > 0, label != void)[0]
selem = skm.disk(thickness)
boundaries = np.logical_xor(skm.dilation(mask, selem),
skm.erosion(mask, selem))
return boundaries
def list_label_frames(self, split):
"""
Select labeled frames from a split for evaluation
collected as (city, shot, idx) tuples
"""
def file2idx(f):
"""Helper to convert file path into frame ID"""
city, shot, frame = (os.path.basename(f).split('_')[:3])
return "_".join([city, shot, frame])
frames = []
cities = [os.path.basename(f) for f in glob.glob('{}/gtFine/{}/*'.format(self.dir, split))]
for c in cities:
files = sorted(glob.glob('{}/gtFine/{}/{}/*labelIds.png'.format(self.dir, split, c)))
frames.extend([file2idx(f) for f in files])
return frames
def collect_frame_sequence(self, split, idx, length):
"""
Collect sequence of frames preceding (and including) a labeled frame
as a list of Images.
Note: 19 preceding frames are provided for each labeled frame.
"""
SEQ_LEN = length
city, shot, frame = idx.split('_')
frame = int(frame)
frame_seq = []
for i in range(frame - SEQ_LEN, frame + 1):
frame_path = '{0}/leftImg8bit_sequence/val/{1}/{1}_{2}_{3:0>6d}_leftImg8bit.png'.format(
self.dir, city, shot, i)
frame_seq.append(Image.open(frame_path))
return frame_seq
|
# The following code is modified from https://github.com/shelhamer/clockwork-fcn
import numpy as np
def get_out_scoremap(net):
return net.blobs['score'].data[0].argmax(axis=0).astype(np.uint8)
def feed_net(net, in_):
"""
Load prepared input into net.
"""
net.blobs['data'].reshape(1, *in_.shape)
net.blobs['data'].data[...] = in_
def segrun(net, in_):
feed_net(net, in_)
net.forward()
return get_out_scoremap(net)
def fast_hist(a, b, n):
k = np.where((a >= 0) & (a < n))[0]
bc = np.bincount(n * a[k].astype(int) + b[k], minlength=n**2)
if len(bc) != n**2:
# ignore this example if dimension mismatch
return 0
return bc.reshape(n, n)
def get_scores(hist):
# Mean pixel accuracy
acc = np.diag(hist).sum() / (hist.sum() + 1e-12)
# Per class accuracy
cl_acc = np.diag(hist) / (hist.sum(1) + 1e-12)
# Per class IoU
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist) + 1e-12)
return acc, np.nanmean(cl_acc), np.nanmean(iu), cl_acc, iu
|
import os
import caffe
import argparse
import numpy as np
import scipy.misc
from PIL import Image
from util import segrun, fast_hist, get_scores
from cityscapes import cityscapes
parser = argparse.ArgumentParser()
parser.add_argument("--cityscapes_dir", type=str, required=True, help="Path to the original cityscapes dataset")
parser.add_argument("--result_dir", type=str, required=True, help="Path to the generated images to be evaluated")
parser.add_argument("--output_dir", type=str, required=True, help="Where to save the evaluation results")
parser.add_argument("--caffemodel_dir", type=str, default='./scripts/eval_cityscapes/caffemodel/', help="Where the FCN-8s caffemodel stored")
parser.add_argument("--gpu_id", type=int, default=0, help="Which gpu id to use")
parser.add_argument("--split", type=str, default='val', help="Data split to be evaluated")
parser.add_argument("--save_output_images", type=int, default=0, help="Whether to save the FCN output images")
args = parser.parse_args()
def main():
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
if args.save_output_images > 0:
output_image_dir = args.output_dir + 'image_outputs/'
if not os.path.isdir(output_image_dir):
os.makedirs(output_image_dir)
CS = cityscapes(args.cityscapes_dir)
n_cl = len(CS.classes)
label_frames = CS.list_label_frames(args.split)
caffe.set_device(args.gpu_id)
caffe.set_mode_gpu()
net = caffe.Net(args.caffemodel_dir + '/deploy.prototxt',
args.caffemodel_dir + 'fcn-8s-cityscapes.caffemodel',
caffe.TEST)
hist_perframe = np.zeros((n_cl, n_cl))
for i, idx in enumerate(label_frames):
if i % 10 == 0:
print('Evaluating: %d/%d' % (i, len(label_frames)))
city = idx.split('_')[0]
# idx is city_shot_frame
label = CS.load_label(args.split, city, idx)
im_file = args.result_dir + '/' + idx + '_leftImg8bit.png'
im = np.array(Image.open(im_file))
im = scipy.misc.imresize(im, (label.shape[1], label.shape[2]))
out = segrun(net, CS.preprocess(im))
hist_perframe += fast_hist(label.flatten(), out.flatten(), n_cl)
if args.save_output_images > 0:
label_im = CS.palette(label)
pred_im = CS.palette(out)
scipy.misc.imsave(output_image_dir + '/' + str(i) + '_pred.jpg', pred_im)
scipy.misc.imsave(output_image_dir + '/' + str(i) + '_gt.jpg', label_im)
scipy.misc.imsave(output_image_dir + '/' + str(i) + '_input.jpg', im)
mean_pixel_acc, mean_class_acc, mean_class_iou, per_class_acc, per_class_iou = get_scores(hist_perframe)
with open(args.output_dir + '/evaluation_results.txt', 'w') as f:
f.write('Mean pixel accuracy: %f\n' % mean_pixel_acc)
f.write('Mean class accuracy: %f\n' % mean_class_acc)
f.write('Mean class IoU: %f\n' % mean_class_iou)
f.write('************ Per class numbers below ************\n')
for i, cl in enumerate(CS.classes):
while len(cl) < 15:
cl = cl + ' '
f.write('%s: acc = %f, iou = %f\n' % (cl, per_class_acc[i], per_class_iou[i]))
main()
|
# HED batch processing script; modified from https://github.com/s9xie/hed/blob/master/examples/hed/HED-tutorial.ipynb
# Step 1: download the hed repo: https://github.com/s9xie/hed
# Step 2: download the models and protoxt, and put them under {caffe_root}/examples/hed/
# Step 3: put this script under {caffe_root}/examples/hed/
# Step 4: run the following script:
# python batch_hed.py --images_dir=/data/to/path/photos/ --hed_mat_dir=/data/to/path/hed_mat_files/
# The code sometimes crashes after computation is done. Error looks like "Check failed: ... driver shutting down". You can just kill the job.
# For large images, it will produce gpu memory issue. Therefore, you better resize the images before running this script.
# Step 5: run the MATLAB post-processing script "PostprocessHED.m"
import caffe
import numpy as np
from PIL import Image
import os
import argparse
import sys
import scipy.io as sio
def parse_args():
parser = argparse.ArgumentParser(description='batch proccesing: photos->edges')
parser.add_argument('--caffe_root', dest='caffe_root', help='caffe root', default='../../', type=str)
parser.add_argument('--caffemodel', dest='caffemodel', help='caffemodel', default='./hed_pretrained_bsds.caffemodel', type=str)
parser.add_argument('--prototxt', dest='prototxt', help='caffe prototxt file', default='./deploy.prototxt', type=str)
parser.add_argument('--images_dir', dest='images_dir', help='directory to store input photos', type=str)
parser.add_argument('--hed_mat_dir', dest='hed_mat_dir', help='directory to store output hed edges in mat file', type=str)
parser.add_argument('--border', dest='border', help='padding border', type=int, default=128)
parser.add_argument('--gpu_id', dest='gpu_id', help='gpu id', type=int, default=1)
args = parser.parse_args()
return args
args = parse_args()
for arg in vars(args):
print('[%s] =' % arg, getattr(args, arg))
# Make sure that caffe is on the python path:
caffe_root = args.caffe_root # this file is expected to be in {caffe_root}/examples/hed/
sys.path.insert(0, caffe_root + 'python')
if not os.path.exists(args.hed_mat_dir):
print('create output directory %s' % args.hed_mat_dir)
os.makedirs(args.hed_mat_dir)
imgList = os.listdir(args.images_dir)
nImgs = len(imgList)
print('#images = %d' % nImgs)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
# load net
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
# pad border
border = args.border
for i in range(nImgs):
if i % 500 == 0:
print('processing image %d/%d' % (i, nImgs))
im = Image.open(os.path.join(args.images_dir, imgList[i]))
in_ = np.array(im, dtype=np.float32)
in_ = np.pad(in_, ((border, border), (border, border), (0, 0)), 'reflect')
in_ = in_[:, :, 0:3]
in_ = in_[:, :, ::-1]
in_ -= np.array((104.00698793, 116.66876762, 122.67891434))
in_ = in_.transpose((2, 0, 1))
# remove the following two lines if testing with cpu
# shape for input (data blob is N x C x H x W), set data
net.blobs['data'].reshape(1, *in_.shape)
net.blobs['data'].data[...] = in_
# run net and take argmax for prediction
net.forward()
fuse = net.blobs['sigmoid-fuse'].data[0][0, :, :]
# get rid of the border
fuse = fuse[(border+35):(-border+35), (border+35):(-border+35)]
# save hed file to the disk
name, ext = os.path.splitext(imgList[i])
sio.savemat(os.path.join(args.hed_mat_dir, name + '.mat'), {'edge_predict': fuse})
|
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from skimage import color # require skimage
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
class ColorizationDataset(BaseDataset):
"""This dataset class can load a set of natural images in RGB, and convert RGB format into (L, ab) pairs in Lab color space.
This dataset is required by pix2pix-based colorization model ('--model colorization')
"""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
By default, the number of channels for input image is 1 (L) and
the number of channels for output image is 2 (ab). The direction is from A to B
"""
parser.set_defaults(input_nc=1, output_nc=2, direction='AtoB')
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir = os.path.join(opt.dataroot, opt.phase)
self.AB_paths = sorted(make_dataset(self.dir, opt.max_dataset_size))
assert(opt.input_nc == 1 and opt.output_nc == 2 and opt.direction == 'AtoB')
self.transform = get_transform(self.opt, convert=False)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - the L channel of an image
B (tensor) - - the ab channels of the same image
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
path = self.AB_paths[index]
im = Image.open(path).convert('RGB')
im = self.transform(im)
im = np.array(im)
lab = color.rgb2lab(im).astype(np.float32)
lab_t = transforms.ToTensor()(lab)
A = lab_t[[0], ...] / 50.0 - 1.0
B = lab_t[[1, 2], ...] / 110.0
return {'A': A, 'B': B, 'A_paths': path, 'B_paths': path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
|
"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
self.root = opt.dataroot
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, grayscale=False, method=transforms.InterpolationMode.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, method))
elif 'scale_width' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
if 'crop' in opt.preprocess:
if params is None:
transform_list.append(transforms.RandomCrop(opt.crop_size))
else:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
if opt.preprocess == 'none':
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
if not opt.no_flip:
if params is None:
transform_list.append(transforms.RandomHorizontalFlip())
elif params['flip']:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.Resampling.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), method)
def __scale_width(img, target_size, crop_size, method=Image.Resampling.BICUBIC):
ow, oh = img.size
if ow == target_size and oh >= crop_size:
return img
w = target_size
h = int(max(target_size * oh / ow, crop_size))
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
|
"""Dataset class template
This module provides a template for users to implement custom datasets.
You can specify '--dataset_mode template' to use this dataset.
The class name should be consistent with both the filename and its dataset_mode option.
The filename should be <dataset_mode>_dataset.py
The class name should be <Dataset_mode>Dataset.py
You need to implement the following functions:
-- <modify_commandline_options>: Add dataset-specific options and rewrite default values for existing options.
-- <__init__>: Initialize this dataset class.
-- <__getitem__>: Return a data point and its metadata information.
-- <__len__>: Return the number of images.
"""
from data.base_dataset import BaseDataset, get_transform
# from data.image_folder import make_dataset
# from PIL import Image
class TemplateDataset(BaseDataset):
"""A template dataset class for you to implement custom datasets."""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
"""
# save the option and dataset root
BaseDataset.__init__(self, opt)
# get the image paths of your dataset;
self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
# define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
self.transform = get_transform(opt)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index -- a random integer for data indexing
Returns:
a dictionary of data with their names. It usually contains the data itself and its metadata information.
Step 1: get a random image path: e.g., path = self.image_paths[index]
Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
Step 4: return a data point as a dictionary.
"""
path = 'temp' # needs to be a string
data_A = None # needs to be a tensor
data_B = None # needs to be a tensor
return {'data_A': data_A, 'data_B': data_B, 'path': path}
def __len__(self):
"""Return the total number of images."""
return len(self.image_paths)
|
"""This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point from data loader.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
See our template dataset class 'template_dataset.py' for more details.
"""
import importlib
import torch.utils.data
from .base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = f'{__package__}.{dataset_name}_dataset'
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()
return dataset
class CustomDatasetDataLoader():
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads)
)
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
if i * self.opt.batch_size >= self.opt.max_dataset_size:
break
yield data
|
"""A modified image folder class
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
so that this class can load images from both current directory and its subdirectories.
"""
import torch.utils.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " +
",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
|
from .base_dataset import BaseDataset, get_transform
from .image_folder import make_dataset
from PIL import Image
class SingleDataset(BaseDataset):
"""This dataset class can load a set of images specified by the path --dataroot /path/to/data.
It can be used for generating CycleGAN results only for one side with the model option '-model test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.transform = get_transform(opt, grayscale=(input_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A and A_paths
A(tensor) - - an image in one domain
A_paths(str) - - the path of the image
"""
A_path = self.A_paths[index]
A_img = Image.open(A_path).convert('RGB')
A = self.transform(A_img)
return {'A': A, 'A_paths': A_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.A_paths)
|
import os.path
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_dataset
from PIL import Image
class AlignedDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths
assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
# read a image given a random integer index
AB_path = self.AB_paths[index]
AB = Image.open(AB_path).convert('RGB')
# split AB image into A and B
w, h = AB.size
w2 = int(w / 2)
A = AB.crop((0, 0, w2, h))
B = AB.crop((w2, 0, w, h))
# apply the same transform to both A and B
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
A = A_transform(A)
B = B_transform(B)
return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
|
import os.path
from .base_dataset import BaseDataset, get_transform
from .image_folder import make_dataset
from PIL import Image
import random
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
# apply image transformation
A = self.transform_A(A_img)
B = self.transform_B(B_img)
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='vit_small_patch16_224', device=device,
jit=jit, batch_size=batch_size, extra_args=extra_args)
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
# Original train batch size per device: 8
# Source: https://github.com/huggingface/transformers/blob/master/examples/flax/language-modeling/run_t5_mlm_flax.py#L83
DEFAULT_TRAIN_BSIZE = 8
# Original eval batch size per device: 8
# Downscale to 1 to fit in Nvidia T4 of the infra
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(name="hf_T5_base", test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.CLASSIFICATION
# Original train batch size 128, hardware Nvidia rtx 3090
# Source: https://gist.github.com/rwightman/bb59f9e245162cee0e38bd66bd8cd77f#file-bench_by_train-csv-L147
# Eval batch size 256, hardware Nvidia rtx 3090
# Source: https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/results/model_benchmark_amp_nchw_rtx3090.csv
# Downscale to 128 to fit T4
DEFAULT_TRAIN_BSIZE = 128
DEFAULT_EVAL_BSIZE = 128
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='dm_nfnet_f0', device=device,
jit=jit, batch_size=batch_size, extra_args=extra_args)
|
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from functorch import make_functional_with_buffers, vmap, grad
import functools
from pathlib import Path
from typing import Tuple
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
def loss_for_task(net, n_inner_iter, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss = F.cross_entropy(logits, y)
return loss
new_params = params
for _ in range(n_inner_iter):
grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
# The final set of adapted parameters will induce some
# final loss and accuracy on the query dataset.
# These will be used to update the model's meta-parameters.
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
qry_acc = (qry_logits.argmax(
dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
# TODO: There _should_ be a way to plug in an optim here, but this
# can be a next step. For now, the optim is not customizable.
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
n_way = 5
inplace_relu = True
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=False),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=False),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=False),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way)).to(device)
self.model = net
root = str(Path(__file__).parent.parent)
self.meta_inputs = torch.load(f'{root}/maml_omniglot/batch.pt')
self.meta_inputs = tuple([torch.from_numpy(i).to(self.device) for i in self.meta_inputs])
self.example_inputs = (self.meta_inputs[0][0],)
def get_module(self):
return self.model, self.example_inputs
def train(self):
model = self.model
model.train()
fnet, params, buffers = make_functional_with_buffers(self.model)
net = (params, buffers, fnet)
meta_opt = optim.Adam(params, lr=1e-3)
# Sample a batch of support and query images and labels.
x_spt, y_spt, x_qry, y_qry = self.meta_inputs
task_num, setsz, c_, h, w = x_spt.size()
n_inner_iter = 5
meta_opt.zero_grad()
# In parallel, trains one model per task. There is a support (x, y)
# for each task and a query (x, y) for each task.
compute_loss_for_task = functools.partial(loss_for_task, net, n_inner_iter)
qry_losses, qry_accs = vmap(compute_loss_for_task)(x_spt, y_spt, x_qry, y_qry)
# Compute the maml loss by summing together the returned losses.
qry_losses.sum().backward()
meta_opt.step()
def eval(self) -> Tuple[torch.Tensor]:
model, (example_input,) = self.get_module()
model.eval()
with torch.no_grad():
out = model(example_input)
return (out, )
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
from dalle2_pytorch import DALLE2, Unet, Decoder, DiffusionPriorNetwork, DiffusionPrior, OpenAIClipAdapter
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(BenchmarkModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, batch_size=None, jit=False, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
if self.device == "cpu":
raise NotImplementedError("DALL-E 2 Not Supported on CPU")
self.clip = OpenAIClipAdapter().to(self.device)
self.sample_text = self.example_input = torch.randint(0, 49408, (self.batch_size, 256)).to(self.device)
self.sample_images = torch.randn(self.batch_size, 3, 256, 256).to(self.device)
prior_network = DiffusionPriorNetwork(
dim = 512,
depth = 6,
dim_head = 64,
heads = 8
).to(self.device)
diffusion_prior = DiffusionPrior(
net = prior_network,
clip = self.clip,
timesteps = 1,
cond_drop_prob = 0.2
).to(self.device)
unet1 = Unet(
dim = 128,
image_embed_dim = 512,
cond_dim = 128,
channels = 3,
dim_mults=(1, 2, 4, 8),
text_embed_dim = 512,
cond_on_text_encodings = True # set to True for any unets that need to be conditioned on text encodings (ex. first unet in cascade)
).to(self.device)
unet2 = Unet(
dim = 16,
image_embed_dim = 512,
cond_dim = 128,
channels = 3,
dim_mults = (1, 2, 4, 8, 16)
).to(self.device)
decoder = Decoder(
unet = (unet1, unet2),
image_sizes = (128, 256),
clip = self.clip,
timesteps = 1,
sample_timesteps = (1, 1),
image_cond_drop_prob = 0.1,
text_cond_drop_prob = 0.5
).to(self.device)
self.model = DALLE2(prior=diffusion_prior, decoder=decoder).to(self.device)
if test == "train":
self.model.prior.train()
self.model.decoder.train()
elif test == "eval":
self.model.prior.eval()
self.model.decoder.eval()
def get_module(self):
return self.model, (self.example_input,)
def set_module(self, new_model):
self.model = new_model
def eval(self):
model, inputs = self.get_module()
with torch.no_grad():
images = model(*inputs)
return (images,)
def train(self):
# openai pretrained clip - defaults to ViT-B/32
clip = self.clip
# prior networks (with transformer)
diffusion_prior = self.model.prior
loss = diffusion_prior(self.sample_text, self.sample_images)
loss.backward()
# decoder (with unet)
decoder = self.model.decoder
loss = decoder(self.sample_images, self.sample_text, unet_number=1)
loss.backward()
loss = decoder(self.sample_images, self.sample_text, unet_number=2)
loss.backward()
|
import os
import patch
import subprocess
import sys
def patch_dalle2():
import dalle2_pytorch
current_dir = os.path.dirname(os.path.abspath(__file__))
dalle2_dir = os.path.dirname(dalle2_pytorch.__file__)
dalle2_patch = patch.fromfile(os.path.join(current_dir, "dalle2_pytorch.patch"))
if not dalle2_patch.apply(strip=1, root=dalle2_dir):
print("Failed to patch dalle2_pytorch/dalle2_pytorch.py. Exit.")
exit(1)
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
# DALLE2_pytorch requires embedding-reader
# https://github.com/lucidrains/DALLE2-pytorch/blob/00e07b7d61e21447d55e6d06d5c928cf8b67601d/setup.py#L34
# embedding-reader requires an old version of pandas and pyarrow
# https://github.com/rom1504/embedding-reader/blob/a4fd55830a502685600ed8ef07947cd1cb92b083/requirements.txt#L5
# So we need to reinstall a newer version of pandas and pyarrow, to be compatible with other models
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-U', 'pandas', 'pyarrow'])
if __name__ == '__main__':
pip_install_requirements()
patch_dalle2() |
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(model_name="mobilenet_v3_large", test=test, device=device, jit=jit,
batch_size=batch_size, weights=models.MobileNet_V3_Large_Weights.IMAGENET1K_V1, extra_args=extra_args)
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 8
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(name="hf_Albert", test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.