python_code
stringlengths 0
229k
|
---|
"""
Support TorchDynamo(https://github.com/facebookresearch/torchdynamo) backends
"""
import argparse
import contextlib
import distutils.util
from typing import List
import torch
import torch._dynamo as torchdynamo
from torchbenchmark.util.model import is_staged_train_test
def parse_torchdynamo_args(dynamo_args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser()
available_backends = torchdynamo.list_backends(exclude_tags=None)
parser.add_argument(
"--torchdynamo", choices=available_backends, help="Specify torchdynamo backends"
)
parser.add_argument(
"--tritonmm", type=str, help="torchinductor.config.triton.mm configuration"
)
parser.add_argument(
"--dynamic_shapes",
action='store_true',
help="dynamic shape and symbolic tracing",
)
parser.add_argument(
"--pt2_debug_log",
action='store_true',
help="enable debug log for PT2 (dynamo, inductor, AOTAutograd)",
)
parser.add_argument(
"--full_graph",
action='store_true',
help="capture full graph and no python",
)
parser.add_argument(
"--optimize_dynamo_ddp",
action='store_true',
help="enable extra optimizations for DDP + dynamo"
)
parser.add_argument(
"--torchinductor_cudagraph",
type=distutils.util.strtobool,
default="true",
)
parser.add_argument(
"--torchinductor_fallback_random",
type=distutils.util.strtobool,
default="false",
)
parser.add_argument(
"--torchinductor_enable_group_fusion",
action='store_true',
help="enable group fusion in Inductor"
)
parser.add_argument(
"--torchinductor_enable_batch_fusion",
action='store_true',
help="enable batch fusion in Inductor"
)
parser.add_argument(
"--torchinductor_enable_split_cat_fx_pass",
action='store_true',
help="enable split_cat_fx_pass in Inductor"
)
parser.add_argument(
"--dynamo_disable_optimizer_step",
type=distutils.util.strtobool,
default="false",
)
args, extra_args = parser.parse_known_args(dynamo_args)
return args, extra_args
def apply_torchdynamo_args(model: 'torchbenchmark.util.model.BenchmarkModel', args: argparse.Namespace, precision: str):
if args.torchdynamo == "fx2trt" and precision == "fp16":
dynamo_optimizer = torchdynamo.optimize(torchdynamo.optimizations.backends.fx2trt_compiler_fp16)
else:
dynamo_kwargs = {}
if args.dynamic_shapes:
dynamo_kwargs["dynamic"] = True
if args.full_graph:
dynamo_kwargs["nopython"] = True
dynamo_optimizer = torchdynamo.optimize(args.torchdynamo, **dynamo_kwargs)
if args.pt2_debug_log:
import logging
torch._logging.set_logs(dynamo=logging.DEBUG, inductor=logging.DEBUG, aot=logging.DEBUG)
if args.torchdynamo == "inductor":
import torch._inductor as torchinductor
torchinductor.config.triton.cudagraphs = bool(args.torchinductor_cudagraph)
# Setup torchinductor.config.triton.mm
if args.tritonmm == "triton":
torchinductor.config.triton.mm = "triton"
# currently can't pass correctness with use_bmm = True
# torchinductor.config.triton.use_bmm = True
if args.torchinductor_enable_group_fusion:
torchinductor.config.group_fusion = True
if args.torchinductor_enable_batch_fusion:
torchinductor.config.pattern_matcher = True
torchinductor.config.batch_fusion = True
if args.torchinductor_enable_split_cat_fx_pass:
torchinductor.config.split_cat_fx_passes = True
# used for correctness checks, to avoid triton rand() behaving differently from torch rand().
torchinductor.config.fallback_random = bool(args.torchinductor_fallback_random)
if bool(args.dynamo_disable_optimizer_step):
found_optimizer_step = False
try:
model.cfg.optimizer.step = torch._dynamo.disable(model.cfg.optimizer.step)
found_optimizer_step = True
except AttributeError:
pass
try:
model.optimizer.step = torch._dynamo.disable(model.optimizer.step)
found_optimizer_step = True
except AttributeError:
pass
if not found_optimizer_step:
warnings.warn("--dynamo_disable_optimizer_step is set to True, but the optimizer could not be found on this model")
if model.test == "train":
if is_staged_train_test(model):
model.forward = dynamo_optimizer(model.forward)
else:
model.train = dynamo_optimizer(model.train)
else:
model.eval = dynamo_optimizer(model.eval)
if args.optimize_dynamo_ddp:
@contextlib.contextmanager
def optimize_ddp_ctx(val: bool):
old_value = torchdynamo.config.optimize_ddp
try:
torchdynamo.config.optimize_ddp = val
yield
finally:
torchdynamo.config.optimize_ddp = old_value
model.add_context(lambda: optimize_ddp_ctx(True))
torchdynamo.reset()
|
"""
Utils for managing backends
"""
import functools
BACKENDS = dict()
def create_backend(fn):
@functools.wraps(fn)
def inner(model: 'torchbenchmark.util.model.BenchmarkModel', **kwargs):
if model is None:
return None
try:
return fn(model, **kwargs)
except KeyboardInterrupt:
raise
except Exception as e:
print(f"{fn.__name__} error: {e}")
raise
BACKENDS[fn.__name__] = inner
return inner
def list_backends():
"""
Return valid strings that can be passed to:
@torchdynamo.optimize(<backend>)
def foo(...):
....
"""
return sorted(BACKENDS.keys())
# register the backends
from .jit import torchscript
from .ait import fx2ait
from .trt import fx2trt, torch_trt
from .cudagraph import cudagraph
__all__ = [list_backends, create_backend ]
|
import torch
from torchbenchmark.util.backends import create_backend
from typing import List
WARMUP_ITER = 3
@create_backend
def cudagraph(model: 'torchbenchmark.util.model.BenchmarkModel', backend_args: List[str]):
cudagraph_func_name = f"cudagraph_{model.test}"
assert hasattr(model, cudagraph_func_name), f"CUDA Graph only works on models implement {cudagraph_func_name}()"
if model.test == "train":
assert hasattr(model, "SKIP_ZERO_GRAD"), f"The model must support skipping zero grad in its train test."
def _cudagraph():
# CUDAGraph can't be copied/pickled, disable copying in correctness checking
model.DEEPCOPY = False
model.SKIP_ZERO_GRAD = True
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
for _ in range(WARMUP_ITER):
if model.test == "train":
model.opt.zero_grad(set_to_none=True)
model.invoke()
torch.cuda.current_stream().wait_stream(s)
# capture
cuda_graph = torch.cuda.CUDAGraph()
if model.test == "train":
model.opt.zero_grad(set_to_none=True)
with torch.cuda.graph(cuda_graph):
model.invoke()
model.g = cuda_graph
if model.test == "train":
model.train = getattr(model, cudagraph_func_name)
elif model.test == "eval":
model.eval = getattr(model, cudagraph_func_name)
else:
assert False, f"Expected model test train or eval, get {model.test}"
return _cudagraph, backend_args
|
from typing import List
import torch
import argparse
from torchbenchmark.util.backends import create_backend
from torchbenchmark.util.env_check import is_hf_model
def parse_torch_trt_args(backend_args: List[str]):
"""Parses CLI-provided backend arguments to extract Torch-TRT keywords
Returns kwargs dictionary and remainder arguments which were unrecognized
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"--truncate_long_and_double",
default=None,
action="store_true",
help="Whether to automatically truncate long and double operations",
)
arg_parser.add_argument(
"--workspace_size", type=int, help="Size of workspace allotted to TensorRT"
)
arg_parser.add_argument(
"--min_block_size",
type=int,
help="Minimum number of operations in an accelerated TRT block",
)
arg_parser.add_argument(
"--ir",
type=str,
help="Which internal representation to use: {'ts', 'dynamo_compile', 'fx_ts_compat', ...}",
)
args, unknown = arg_parser.parse_known_args(backend_args)
# Remove unspecified arguments from the args dictionary
# (Only pass through user-specified args)
parsed_args = vars(args)
for key in list(parsed_args.keys()):
if parsed_args[key] is None:
del parsed_args[key]
return parsed_args, unknown
@create_backend
def fx2trt(model: "torchbenchmark.util.model.BenchmarkModel", backend_args: List[str]):
FP16 = True if model.dargs.precision == "fp16" else False
HF_MODEL = True if is_hf_model(model) else False
assert (
model.device == "cuda" and model.test == "eval"
), f"fx2trt only works on CUDA inference tests."
def _fx2trt():
from torch_tensorrt.fx import compile
from torch_tensorrt.fx.utils import LowerPrecision
module, example_inputs = model.get_module()
precision = LowerPrecision.FP16 if FP16 else LowerPrecision.FP32
if HF_MODEL:
from transformers.utils.fx import symbolic_trace as hf_symbolic_trace
traced_model = hf_symbolic_trace(
module, batch_size=model.batch_size, sequence_lenghth=model.max_length
)
trt_model = compile(
traced_model,
example_inputs,
max_batch_size=model.batch_size,
lower_precision=precision,
explicit_batch_dimension=True,
max_workspace_size=20 << 30,
)
else:
trt_model = compile(
module=module,
input=example_inputs,
max_batch_size=model.batch_size,
lower_precision=precision,
)
model.set_module(trt_model)
return _fx2trt, backend_args
@create_backend
def torch_trt(
model: "torchbenchmark.util.model.BenchmarkModel", backend_args: List[str]
):
"""Backend for Torch-TRT
Can be directly invoked from the command line, for example via:
python run.py resnet18 -d cuda -t eval --backend torch_trt --precision fp32 --truncate_long_and_double
Options include:
--truncate_long_and_double: Whether to automatically truncate long and double operations
--min_block_size: Minimum number of operations in an accelerated TRT block
--workspace_size: Size of workspace allotted to TensorRT
--ir: Which internal representation to use: {"ts", "dynamo_compile", "fx_ts_compat", ...}
"""
FP16 = True if model.dargs.precision == "fp16" else False
assert (
model.device == "cuda" and model.test == "eval"
), f"Torch-TRT only works on CUDA inference tests."
# Extract relevant Torch-TRT arguments from the provided CLI arguments
torch_trt_kwargs, backend_args = parse_torch_trt_args(backend_args)
def _torch_trt():
"""Helper function for invoking Torch-TRT"""
import torch_tensorrt
module, example_inputs = model.get_module()
torch_dtype_precision = torch.half if FP16 else torch.float32
print(
f"Compiling {model.name} with batch size {model.batch_size}, precision {model.dargs.precision}, "
+ f"and {'default' if 'ir' not in torch_trt_kwargs else torch_trt_kwargs['ir']} IR"
)
trt_module = torch_tensorrt.compile(
module,
inputs=example_inputs,
enabled_precisions={torch_dtype_precision},
**torch_trt_kwargs,
)
model.set_module(trt_module)
return _torch_trt, backend_args
|
"""
Utilities to measure metrics of a model.
"""
import torch
import time
import dataclasses
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.util.experiment.instantiator import TorchBenchModelConfig
from torchbenchmark import ModelTask
from typing import List, Union, Tuple, Optional
WARMUP_ROUNDS = 10
BENCHMARK_ITERS = 15
MEMPROF_ITER = 2
NANOSECONDS_PER_MILLISECONDS = 1_000_000.0
@dataclasses.dataclass
class TorchBenchModelMetrics:
latencies: List[float]
throughputs: List[float]
cpu_peak_mem: Optional[float]
gpu_peak_mem: Optional[float]
pt2_compilation_time: Optional[float]
pt2_graph_breaks: Optional[float]
model_flops: Optional[float]
def get_latencies(func, device: str, nwarmup=WARMUP_ROUNDS, num_iter=BENCHMARK_ITERS) -> List[float]:
"Run one step of the model, and return the latency in milliseconds."
# Warm-up `nwarmup` rounds
for _i in range(nwarmup):
func()
result_summary = []
for _i in range(num_iter):
if device == "cuda":
torch.cuda.synchronize()
# Collect time_ns() instead of time() which does not provide better precision than 1
# second according to https://docs.python.org/3/library/time.html#time.time.
t0 = time.time_ns()
func()
torch.cuda.synchronize() # Wait for the events to be recorded!
t1 = time.time_ns()
else:
t0 = time.time_ns()
func()
t1 = time.time_ns()
result_summary.append((t1 - t0) / NANOSECONDS_PER_MILLISECONDS)
return result_summary
def get_peak_memory(func, device: str, num_iter=MEMPROF_ITER, export_metrics_file='', metrics_needed=[], metrics_gpu_backend='dcgm', cpu_monitored_pid=None) -> Tuple[Optional[float], Optional[str], Optional[float]]:
"Run one step of the model, and return the peak memory in MB."
from components.model_analyzer.TorchBenchAnalyzer import ModelAnalyzer
new_metrics_needed = [_ for _ in metrics_needed if _ in ['cpu_peak_mem', 'gpu_peak_mem']]
if not new_metrics_needed:
raise ValueError(f"Expected metrics_needed to be non-empty, get: {metrics_needed}")
mem_model_analyzer = ModelAnalyzer(export_metrics_file, new_metrics_needed, metrics_gpu_backend, cpu_monitored_pid)
continue_num_iter = BENCHMARK_ITERS - num_iter
def work_func():
if device == "cuda":
torch.cuda.synchronize()
func()
torch.cuda.synchronize()
else:
func()
t0 = time.time_ns()
work_func()
t1 = time.time_ns()
# if total execution time is less than 15ms, we run the model for BENCHMARK_ITERS times
# to get more accurate peak memory
if (t1 - t0) < 15 * NANOSECONDS_PER_MILLISECONDS:
num_iter = BENCHMARK_ITERS
else:
num_iter = MEMPROF_ITER
mem_model_analyzer.start_monitor()
for _i in range(num_iter):
work_func()
mem_model_analyzer.stop_monitor()
mem_model_analyzer.aggregate()
device_id = None
gpu_peak_mem = None
cpu_peak_mem = None
if 'gpu_peak_mem' in metrics_needed:
device_id, gpu_peak_mem = mem_model_analyzer.calculate_gpu_peak_mem()
if 'cpu_peak_mem' in metrics_needed:
cpu_peak_mem = mem_model_analyzer.calculate_cpu_peak_mem()
if export_metrics_file:
mem_model_analyzer.update_export_name("_peak_memory")
mem_model_analyzer.export_all_records_to_csv()
return cpu_peak_mem, device_id, gpu_peak_mem
def get_model_flops(model: Union[BenchmarkModel, ModelTask]) -> float:
"Run one step of the model, and return the model total flops."
from torch.utils.flop_counter import FlopCounterMode
flop_counter = FlopCounterMode()
def work_func():
if model.device == "cuda":
torch.cuda.synchronize()
model.invoke()
torch.cuda.synchronize()
else:
model.invoke()
with flop_counter:
work_func()
total_flops = sum([v for _, v in flop_counter.flop_counts["Global"].items()])
return total_flops
def get_model_test_metrics(model: Union[BenchmarkModel, ModelTask], metrics=[], export_metrics_file=False, metrics_gpu_backend='nvml', nwarmup=WARMUP_ROUNDS, num_iter=BENCHMARK_ITERS) -> TorchBenchModelMetrics:
import os
latencies = None
throughputs = None
cpu_peak_mem = None
gpu_peak_mem = None
pt2_compilation_time = None
pt2_graph_breaks = None
model_flops = None
if not (isinstance(model, BenchmarkModel) or isinstance(model, ModelTask)):
raise ValueError(f"Expected BenchmarkModel or ModelTask, get type: {type(model)}")
model_pid = os.getpid() if isinstance(model, BenchmarkModel) else model.worker.proc_pid()
device = model.device if isinstance(model, BenchmarkModel) else model.get_model_attribute("device")
if 'latencies' in metrics or 'throughputs' in metrics:
latencies = get_latencies(model.invoke, device, nwarmup=nwarmup, num_iter=num_iter)
if 'cpu_peak_mem' in metrics or 'gpu_peak_mem' in metrics:
cpu_peak_mem, _device_id, gpu_peak_mem = get_peak_memory(model.invoke, device, export_metrics_file=export_metrics_file, metrics_needed=metrics, metrics_gpu_backend=metrics_gpu_backend, cpu_monitored_pid=model_pid)
if 'throughputs' in metrics:
throughputs = [model.batch_size * 1000 / latency for latency in latencies]
if 'pt2_compilation_time' in metrics:
pt2_compilation_time = model.get_model_attribute('pt2_compilation_time') \
if isinstance(model, ModelTask) else model.pt2_compilation_time
if 'pt2_graph_breaks' in metrics:
pt2_graph_breaks = model.get_model_attribute('pt2_graph_breaks') \
if isinstance(model, ModelTask) else model.pt2_graph_breaks
if 'model_flops' in metrics:
model_flops = get_model_flops(model)
return TorchBenchModelMetrics(latencies, throughputs, cpu_peak_mem, gpu_peak_mem, pt2_compilation_time, pt2_graph_breaks, model_flops)
def get_model_accuracy(model_config: TorchBenchModelConfig, isolated: bool=True) -> str:
import copy
from torchbenchmark.util.experiment.instantiator import load_model_isolated, load_model
# Try load minimal batch size, if fail, load the default batch size
accuracy_model_config = copy.deepcopy(model_config)
if not "--accuracy" in accuracy_model_config.extra_args:
accuracy_model_config.extra_args = ["--accuracy"] + accuracy_model_config.extra_args
if isolated:
model = load_model_isolated(accuracy_model_config)
accuracy = model.get_model_attribute("accuracy")
del model
return accuracy
else:
model = load_model(model_config)
return model.accuracy
|
"""
Utilities to instantiate TorchBench models in the same process or child process.
Functions in this file don't handle exceptions.
They expect callers handle all exceptions.
"""
import os
import importlib
import dataclasses
from typing import Optional, List, Dict
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark import _list_model_paths, load_model_by_name, ModelTask
WORKER_TIMEOUT = 3600 # seconds
BS_FIELD_NAME = "batch_size"
@dataclasses.dataclass
class TorchBenchModelConfig:
name: str
test: str
device: str
batch_size: Optional[int]
extra_args: List[str]
extra_env: Optional[Dict[str, str]] = None
def _set_extra_env(extra_env):
if not extra_env:
return
for env_key in extra_env:
os.environ[env_key] = extra_env[env_key]
def inject_model_invoke(model_task: ModelTask, inject_function):
model_task.replace_invoke(inject_function.__module__, inject_function.__name__)
def load_model_isolated(config: TorchBenchModelConfig, timeout: float=WORKER_TIMEOUT) -> ModelTask:
""" Load and return the model in a subprocess. """
task = ModelTask(config.name, timeout=timeout, extra_env=config.extra_env)
if not task.model_details.exists:
raise ValueError(f"Failed to import model task: {config.name}. Please run the model manually to make sure it succeeds, or report a bug.")
task.make_model_instance(test=config.test, device=config.device, batch_size=config.batch_size, extra_args=config.extra_args)
task_batch_size = task.get_model_attribute(BS_FIELD_NAME)
# check batch size if not measuring accuracy
if config.batch_size and (not config.batch_size == task_batch_size) and not task.get_model_attribute('accuracy'):
raise ValueError(f"User specify batch size {config.batch_size}," +
f"but model {task.name} runs with batch size {task_batch_size}. Please report a bug.")
return task
def load_model(config: TorchBenchModelConfig) -> BenchmarkModel:
"""Load and return a model instance in the same process. """
Model = load_model_by_name(config.name)
model_instance = Model(test=config.test, device=config.device, batch_size=config.batch_size, extra_args=config.extra_args)
# check name
if not model_instance.name == config.name:
raise ValueError(f"Required model {config.name}, loaded {model_instance.name}.")
# check batch size if not measuring accuracy
if config.batch_size and (not config.batch_size == model_instance.batch_size) and not model_instance.dargs.accuracy:
raise ValueError(f"User specify batch size {config.batch_size}," +
f"but model {model_instance.name} runs with batch size {model_instance.batch_size}. Please report a bug.")
_set_extra_env(config.extra_env)
return model_instance
def list_devices() -> List[str]:
"""Return a list of available devices."""
devices = ["cpu"]
import torch
if torch.cuda.is_available():
devices.append("cuda")
return devices
def list_tests() -> List[str]:
"""Return a list of available tests."""
return ["train", "eval"]
def list_models() -> List[str]:
"""Return a list of names of all TorchBench models"""
model_paths = _list_model_paths()
model_names = list(map(lambda x: os.path.basename(x), model_paths))
return model_names
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 2
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_BigBird", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name) |
import dataclasses
@dataclasses.dataclass
class DRQConfig:
env = "cartpole_swingup"
# IMPORTANT: if action_repeat is used the effective number of env steps needs to be
# multiplied by action_repeat in the result graphs.
# This is a common practice for a fair comparison.
# See the 2nd paragraph in Appendix C of SLAC: https://arxiv.org/pdf/1907.00953.pdf
# See Dreamer TF2's implementation: https://github.com/danijar/dreamer/blob/02f0210f5991c7710826ca7881f19c64a012290c/dreamer.py#L340
action_repeat = 4
# train
num_train_steps = 1
num_train_iters = 1
# num_seed_steps can't be zero
# and steps in train must be bigger than num_seed_steps
num_seed_steps = 1
replay_buffer_capacity = 100000
seed = 1
# eval
eval_frequency = 5000
# observation
image_size = 84
image_pad = 4
frame_stack = 3
# global params
lr = 1e-3
# IMPORTANT: please use a batch size of 512 to reproduce the results in the paper. Hovewer, with a smaller batch size it still works well.
batch_size = 128
# Agent configurations
discount = 0.99
init_temperature = 0.1
actor_update_frequency = 2
critic_tau = 0.01
critic_target_update_frequency = 2
# Actor configurations
hidden_dim = 1024
hidden_depth = 2
log_std_bounds = [-10, 2]
# Encoder configurations
feature_dim = 50
# obs data, relative to __file__
obs_path = "obs.pkl"
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
from . import drqutils
class Encoder(nn.Module):
"""Convolutional encoder for image-based observations."""
def __init__(self, obs_shape, feature_dim):
super().__init__()
assert len(obs_shape) == 3
self.num_layers = 4
self.num_filters = 32
self.output_dim = 35
self.output_logits = False
self.feature_dim = feature_dim
self.convs = nn.ModuleList([
nn.Conv2d(obs_shape[0], self.num_filters, 3, stride=2),
nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1),
nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1),
nn.Conv2d(self.num_filters, self.num_filters, 3, stride=1)
])
self.head = nn.Sequential(
nn.Linear(self.num_filters * 35 * 35, self.feature_dim),
nn.LayerNorm(self.feature_dim))
self.outputs = dict()
def forward_conv(self, obs):
obs = obs / 255.
self.outputs['obs'] = obs
conv = torch.relu(self.convs[0](obs))
self.outputs['conv1'] = conv
for i in range(1, self.num_layers):
conv = torch.relu(self.convs[i](conv))
self.outputs['conv%s' % (i + 1)] = conv
# Changed view to reshape here to support channels last input
# TODO: upstream this change to https://github.com/denisyarats/drq/blob/master/drq.py#L48
h = conv.reshape(conv.size(0), -1)
return h
def forward(self, obs, detach=False):
h = self.forward_conv(obs)
if detach:
h = h.detach()
out = self.head(h)
if not self.output_logits:
out = torch.tanh(out)
self.outputs['out'] = out
return out
def copy_conv_weights_from(self, source):
"""Tie convolutional layers"""
for i in range(self.num_layers):
drqutils.tie_weights(src=source.convs[i], trg=self.convs[i])
def log(self, logger, step):
pass
class Actor(nn.Module):
"""torch.distributions implementation of an diagonal Gaussian policy."""
def __init__(self, encoder_cfg, action_shape, hidden_dim, hidden_depth,
log_std_bounds):
super().__init__()
self.encoder = Encoder(*encoder_cfg)
self.log_std_bounds = log_std_bounds
self.trunk = drqutils.mlp(self.encoder.feature_dim, hidden_dim,
2 * action_shape[0], hidden_depth)
self.outputs = dict()
self.apply(drqutils.weight_init)
def forward(self, obs, detach_encoder=False):
obs = self.encoder(obs, detach=detach_encoder)
mu, log_std = self.trunk(obs).chunk(2, dim=-1)
# constrain log_std inside [log_std_min, log_std_max]
log_std = torch.tanh(log_std)
log_std_min, log_std_max = self.log_std_bounds
log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std +
1)
std = log_std.exp()
self.outputs['mu'] = mu
self.outputs['std'] = std
dist = drqutils.SquashedNormal(mu, std)
return dist
def log(self, logger, step):
pass
class Critic(nn.Module):
"""Critic network, employes double Q-learning."""
def __init__(self, encoder_cfg, action_shape, hidden_dim, hidden_depth):
super().__init__()
self.encoder = Encoder(*encoder_cfg)
self.Q1 = drqutils.mlp(self.encoder.feature_dim + action_shape[0],
hidden_dim, 1, hidden_depth)
self.Q2 = drqutils.mlp(self.encoder.feature_dim + action_shape[0],
hidden_dim, 1, hidden_depth)
self.outputs = dict()
self.apply(drqutils.weight_init)
def forward(self, obs, action, detach_encoder=False):
assert obs.size(0) == action.size(0)
obs = self.encoder(obs, detach=detach_encoder)
obs_action = torch.cat([obs, action], dim=-1)
q1 = self.Q1(obs_action)
q2 = self.Q2(obs_action)
self.outputs['q1'] = q1
self.outputs['q2'] = q2
return q1, q2
def log(self, logger, step):
pass
class DRQAgent:
"""Data regularized Q: actor-critic method for learning from pixels."""
def __init__(self, cfg, device, obs_shape, action_shape, action_range):
self.action_range = action_range
self.device = torch.device(device)
self.discount = cfg.discount
self.critic_tau = cfg.critic_tau
self.actor_update_frequency = cfg.actor_update_frequency
self.critic_target_update_frequency = cfg.critic_target_update_frequency
self.batch_size = cfg.batch_size
encoder_cfg = (obs_shape, cfg.feature_dim)
self.actor = Actor(encoder_cfg=encoder_cfg,
action_shape=action_shape,
hidden_dim=cfg.hidden_dim,
hidden_depth=cfg.hidden_depth,
log_std_bounds=cfg.log_std_bounds).to(self.device)
self.critic = Critic(encoder_cfg=encoder_cfg,
action_shape=action_shape,
hidden_dim=cfg.hidden_dim,
hidden_depth=cfg.hidden_depth).to(self.device)
self.critic_target = Critic(encoder_cfg=encoder_cfg,
action_shape=action_shape,
hidden_dim=cfg.hidden_dim,
hidden_depth=cfg.hidden_depth).to(self.device)
self.critic_target.load_state_dict(self.critic.state_dict())
# tie conv layers between actor and critic
self.actor.encoder.copy_conv_weights_from(self.critic.encoder)
self.log_alpha = torch.tensor(np.log(cfg.init_temperature)).to(device)
self.log_alpha.requires_grad = True
# set target entropy to -|A|
self.target_entropy = -action_shape[0]
# optimizers
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),
lr=cfg.lr)
self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=cfg.lr)
self.train()
self.critic_target.train()
def train(self, training=True):
self.training = training
self.actor.train(training)
self.critic.train(training)
@property
def alpha(self):
return self.log_alpha.exp()
def act(self, obs, sample=False):
obs = torch.FloatTensor(obs).to(self.device)
obs = obs.unsqueeze(0)
dist = self.actor(obs)
action = dist.sample() if sample else dist.mean
action = action.clamp(*self.action_range)
assert action.ndim == 2 and action.shape[0] == 1
return drqutils.to_np(action[0])
def update_critic(self, obs, obs_aug, action, reward, next_obs,
next_obs_aug, not_done, logger, step):
with torch.no_grad():
dist = self.actor(next_obs)
next_action = dist.rsample()
log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)
target_Q1, target_Q2 = self.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1,
target_Q2) - self.alpha.detach() * log_prob
target_Q = reward + (not_done * self.discount * target_V)
dist_aug = self.actor(next_obs_aug)
next_action_aug = dist_aug.rsample()
log_prob_aug = dist_aug.log_prob(next_action_aug).sum(-1,
keepdim=True)
target_Q1, target_Q2 = self.critic_target(next_obs_aug,
next_action_aug)
target_V = torch.min(
target_Q1, target_Q2) - self.alpha.detach() * log_prob_aug
target_Q_aug = reward + (not_done * self.discount * target_V)
target_Q = (target_Q + target_Q_aug) / 2
# get current Q estimates
current_Q1, current_Q2 = self.critic(obs, action)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(
current_Q2, target_Q)
Q1_aug, Q2_aug = self.critic(obs_aug, action)
critic_loss += F.mse_loss(Q1_aug, target_Q) + F.mse_loss(
Q2_aug, target_Q)
# logger.log('train_critic/loss', critic_loss, step)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
self.critic.log(logger, step)
def update_actor_and_alpha(self, obs, logger, step):
# detach conv filters, so we don't update them with the actor loss
dist = self.actor(obs, detach_encoder=True)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
# detach conv filters, so we don't update them with the actor loss
actor_Q1, actor_Q2 = self.critic(obs, action, detach_encoder=True)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_loss = (self.alpha.detach() * log_prob - actor_Q).mean()
# optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.actor.log(logger, step)
self.log_alpha_optimizer.zero_grad()
alpha_loss = (self.alpha *
(-log_prob - self.target_entropy).detach()).mean()
alpha_loss.backward()
self.log_alpha_optimizer.step()
def update(self, replay_buffer, logger, step):
obs, action, reward, next_obs, not_done, obs_aug, next_obs_aug = replay_buffer.sample(
self.batch_size)
self.update_critic(obs, obs_aug, action, reward, next_obs,
next_obs_aug, not_done, logger, step)
if step % self.actor_update_frequency == 0:
self.update_actor_and_alpha(obs, logger, step)
if step % self.critic_target_update_frequency == 0:
drqutils.soft_update_params(self.critic, self.critic_target,
self.critic_tau)
|
import math
import os
import random
from collections import deque
import numpy as np
import scipy.linalg as sp_la
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from skimage.util.shape import view_as_windows
from torch import distributions as pyd
class eval_mode:
def __init__(self, *models):
self.models = models
def __enter__(self):
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(False)
def __exit__(self, *args):
for model, state in zip(self.models, self.prev_states):
model.train(state)
return False
def soft_update_params(net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
def set_seed_everywhere(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def make_dir(*path_parts):
dir_path = os.path.join(*path_parts)
try:
os.mkdir(dir_path)
except OSError:
pass
return dir_path
def tie_weights(src, trg):
assert type(src) == type(trg)
trg.weight = src.weight
trg.bias = src.bias
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
gain = nn.init.calculate_gain('relu')
nn.init.orthogonal_(m.weight.data, gain)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
def mlp(input_dim, hidden_dim, output_dim, hidden_depth, output_mod=None):
if hidden_depth == 0:
mods = [nn.Linear(input_dim, output_dim)]
else:
mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)]
for i in range(hidden_depth - 1):
mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)]
mods.append(nn.Linear(hidden_dim, output_dim))
if output_mod is not None:
mods.append(output_mod)
trunk = nn.Sequential(*mods)
return trunk
def to_np(t):
if t is None:
return None
elif t.nelement() == 0:
return np.array([])
else:
return t.cpu().detach().numpy()
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self._k = k
self._frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=((shp[0] * k,) + shp[1:]),
dtype=env.observation_space.dtype)
self._max_episode_steps = env._max_episode_steps
def reset(self):
obs = self.env.reset()
for _ in range(self._k):
self._frames.append(obs)
return self._get_obs()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._frames.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self):
assert len(self._frames) == self._k
return np.concatenate(list(self._frames), axis=0)
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x):
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y):
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
|
import copy
import math
import pickle as pkl
import numpy as np
import torch
import os
import sys
import torch.nn as nn
from typing import Tuple
import torch.nn.functional as F
from gym import spaces
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import REINFORCEMENT_LEARNING
from .drqutils import FrameStack, set_seed_everywhere, eval_mode
from .drq import DRQAgent
from .config import DRQConfig
from .replay_buffer import ReplayBuffer
class MockEnv:
def __init__(self, obs):
self._norm_action_space = spaces.Box(
low=-1.0,
high=1.0,
shape=[1],
dtype=np.float32)
self._observation_space = spaces.Box(
low=0,
high=255,
shape=[9, 84, 84],
dtype=np.uint8
)
self.obs = obs
self._max_episode_steps = 250
self.metadata = {'render.modes': []}
self.reward_range = (-float('inf'), float('inf'))
def step(self, action):
reward = 0.0
done = False
info_state = [0.016243, 3.1355, -0.0052817, -0.01073]
info = dict()
info["internal_state"] = info_state
info["discount"] = 1.0
return (self.obs, reward, done, info)
def seed(self, seed=None):
self._norm_action_space.seed(seed)
self._observation_space.seed(seed)
def reset(self):
return self.obs
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._norm_action_space
def make_env(cfg):
if cfg.env == 'ball_in_cup_catch':
domain_name = 'ball_in_cup'
task_name = 'catch'
elif cfg.env == 'point_mass_easy':
domain_name = 'point_mass'
task_name = 'easy'
else:
domain_name = cfg.env.split('_')[0]
task_name = '_'.join(cfg.env.split('_')[1:])
# per dreamer: https://github.com/danijar/dreamer/blob/02f0210f5991c7710826ca7881f19c64a012290c/wrappers.py#L26
camera_id = 2 if domain_name == 'quadruped' else 0
current_dir = os.path.dirname(os.path.realpath(__file__))
mockobs = pkl.load(open(os.path.join(current_dir, cfg.obs_path), "rb"))
low = np.amin(mockobs)
high = np.amax(mockobs)
mockobs = np.random.randint(low=11, high=228, size=mockobs.shape, dtype=np.uint8)
env = MockEnv(mockobs)
env = FrameStack(env, k=cfg.frame_stack)
env.seed(cfg.seed)
assert env.action_space.low.min() >= -1
assert env.action_space.high.max() <= 1
return env
class Model(BenchmarkModel):
task = REINFORCEMENT_LEARNING.OTHER_RL
# Batch size is not adjustable in this model
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
CANNOT_SET_CUSTOM_OPTIMIZER = True
# this model will cause infinite loop if deep-copied
DEEPCOPY = False
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.cfg = DRQConfig()
set_seed_everywhere(self.cfg.seed)
self.env = make_env(self.cfg)
obs_shape = self.env.observation_space.shape
action_shape = self.env.action_space.shape
action_range = [
float(self.env.action_space.low.min()),
float(self.env.action_space.high.max())
]
self.agent = DRQAgent(self.cfg, self.device, obs_shape, action_shape, action_range)
self.replay_buffer = ReplayBuffer(self.env.observation_space.shape,
self.env.action_space.shape,
self.cfg.replay_buffer_capacity,
self.cfg.image_pad, self.device)
self.step = 0
def get_module(self):
obs = self.env.reset()
obs = torch.FloatTensor(obs).to(self.device)
obs = obs.unsqueeze(0)
return self.agent.actor, (obs, )
def set_module(self, new_model):
self.agent.actor = new_model
def train(self):
episode, episode_reward, episode_step, done = 0, 0, 1, True
if True:
obs = self.env.reset()
done = False
episode_reward = 0
episode_step = 0
episode += 1
if self.step < self.cfg.num_seed_steps:
action = self.env.action_space.sample()
else:
with eval_mode(self.agent):
action = self.agent.act(obs, sample=True)
# run training update
if self.step >= self.cfg.num_seed_steps:
for _ in range(self.cfg.num_train_iters):
self.agent.update(self.replay_buffer, None,
self.step)
next_obs, reward, done, info = self.env.step(action)
# allow infinite bootstrap
done = float(done)
done_no_max = 0 if episode_step + 1 == self.env._max_episode_steps else done
episode_reward += reward
self.replay_buffer.add(obs, action, reward, next_obs, done,
done_no_max)
obs = next_obs
episode_step += 1
self.step += 1
def eval(self) -> Tuple[torch.Tensor]:
average_episode_reward = 0
steps = 0
if True:
obs = self.env.reset()
episode_reward = 0
episode_step = 0
with eval_mode(self.agent):
action = self.agent.act(obs, sample=False)
obs, reward, done, info = self.env.step(action)
episode_reward += reward
episode_step += 1
average_episode_reward += episode_reward
steps += 1
average_episode_reward /= float(steps)
return (torch.Tensor(action), )
|
import numpy as np
import kornia
import torch
import torch.nn as nn
import torch.nn.functional as F
class ReplayBuffer:
"""Buffer to store environment transitions."""
def __init__(self, obs_shape, action_shape, capacity, image_pad, device):
self.capacity = capacity
self.device = device
self.aug_trans = nn.Sequential(
nn.ReplicationPad2d(image_pad),
kornia.augmentation.RandomCrop((obs_shape[-1], obs_shape[-1])))
self.obses = np.empty((capacity, *obs_shape), dtype=np.uint8)
self.next_obses = np.empty((capacity, *obs_shape), dtype=np.uint8)
self.actions = np.empty((capacity, *action_shape), dtype=np.float32)
self.rewards = np.empty((capacity, 1), dtype=np.float32)
self.not_dones = np.empty((capacity, 1), dtype=np.float32)
self.not_dones_no_max = np.empty((capacity, 1), dtype=np.float32)
self.idx = 0
self.full = False
def __len__(self):
return self.capacity if self.full else self.idx
def add(self, obs, action, reward, next_obs, done, done_no_max):
np.copyto(self.obses[self.idx], obs)
np.copyto(self.actions[self.idx], action)
np.copyto(self.rewards[self.idx], reward)
np.copyto(self.next_obses[self.idx], next_obs)
np.copyto(self.not_dones[self.idx], not done)
np.copyto(self.not_dones_no_max[self.idx], not done_no_max)
self.idx = (self.idx + 1) % self.capacity
self.full = self.full or self.idx == 0
def sample(self, batch_size):
x = self.capacity if self.full else self.idx
idxs = np.random.randint(0,
self.capacity if self.full else self.idx,
size=batch_size)
obses = self.obses[idxs]
next_obses = self.next_obses[idxs]
obses_aug = obses.copy()
next_obses_aug = next_obses.copy()
obses = torch.as_tensor(obses, device=self.device).float()
next_obses = torch.as_tensor(next_obses, device=self.device).float()
obses_aug = torch.as_tensor(obses_aug, device=self.device).float()
next_obses_aug = torch.as_tensor(next_obses_aug,
device=self.device).float()
actions = torch.as_tensor(self.actions[idxs], device=self.device)
rewards = torch.as_tensor(self.rewards[idxs], device=self.device)
not_dones_no_max = torch.as_tensor(self.not_dones_no_max[idxs],
device=self.device)
obses = self.aug_trans(obses)
next_obses = self.aug_trans(next_obses)
obses_aug = self.aug_trans(obses_aug)
next_obses_aug = self.aug_trans(next_obses_aug)
return obses, actions, rewards, next_obses, not_dones_no_max, obses_aug, next_obses_aug
|
import subprocess
import sys
from utils import s3_utils
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
s3_utils.checkout_s3_data("MODEL_PKLS", "drq/obs.pkl", decompress=False)
|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='regnety_120', device=device,
batch_size=batch_size, extra_args=extra_args)
|
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_name="resnet50", test=test, device=device,
batch_size=batch_size, weights=models.ResNet50_Weights.IMAGENET1K_V1,
extra_args=extra_args)
|
# Generated by gen_torchvision_benchmark.py
import torch
import torch.optim as optim
import torchvision.models as models
from torch.quantization import quantize_fx
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from typing import Tuple
class Model(BenchmarkModel):
task = COMPUTER_VISION.CLASSIFICATION
# Train batch size: 32
# Source: https://openreview.net/pdf?id=B1Yy1BxCZ
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
if test == "eval" and device != "cpu":
raise NotImplementedError("The eval test only supports CPU.")
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.model = models.resnet50().to(self.device)
self.example_inputs = (torch.randn((self.batch_size, 3, 224, 224)).to(self.device),)
self.prep_qat_train()
if self.test == "eval":
self.prep_qat_eval()
self.optimizer = None
def prep_qat_train(self):
qconfig_dict = {"": torch.quantization.get_default_qat_qconfig('fbgemm')}
self.model.train()
self.model = quantize_fx.prepare_qat_fx(self.model, qconfig_dict, self.example_inputs)
def get_module(self):
return self.model, self.example_inputs
def prep_qat_eval(self):
self.model = quantize_fx.convert_fx(self.model)
self.model.eval()
def train(self):
if self.get_optimizer() is None:
self.set_optimizer(optim.Adam(self.model.parameters()))
loss = torch.nn.CrossEntropyLoss()
self.optimizer.zero_grad()
pred = self.model(*self.example_inputs)
y = torch.empty(pred.shape[0], dtype=torch.long, device=self.device).random_(pred.shape[1])
loss(pred, y).backward()
self.optimizer.step()
def eval(self) -> Tuple[torch.Tensor]:
model = self.model
example_inputs = self.example_inputs
example_inputs = example_inputs[0][0].unsqueeze(0)
out = model(example_inputs)
return (out, )
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.DETECTION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(variant="COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
"""
https://github.com/phlippe/uvadlc_notebooks_benchmarking/blob/main/PyTorch/Tutorial5_Inception_ResNet_DenseNet.py
"""
from types import SimpleNamespace
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.model import BenchmarkModel
import torch.nn as nn
import torch
import torch.optim as optim
import torch.utils.data as data
class ResNetBlock(nn.Module):
def __init__(self, c_in, act_fn, subsample=False, c_out=-1):
"""
Inputs:
c_in - Number of input features
act_fn - Activation class constructor (e.g. nn.ReLU)
subsample - If True, we want to apply a stride inside the block and reduce the output shape by 2 in height and width
c_out - Number of output features. Note that this is only relevant if subsample is True, as otherwise, c_out = c_in
"""
super().__init__()
if not subsample:
c_out = c_in
# Network representing F
self.net = nn.Sequential(
# No bias needed as the Batch Norm handles it
nn.Conv2d(c_in, c_out, kernel_size=3, padding=1,
stride=1 if not subsample else 2, bias=False),
nn.BatchNorm2d(c_out),
act_fn(),
nn.Conv2d(c_out, c_out, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(c_out)
)
# 1x1 convolution with stride 2 means we take the upper left value, and transform it to new output size
self.downsample = nn.Conv2d(
c_in, c_out, kernel_size=1, stride=2) if subsample else None
self.act_fn = act_fn()
def forward(self, x):
z = self.net(x)
if self.downsample is not None:
x = self.downsample(x)
out = z + x
out = self.act_fn(out)
return out
class ResNetModel(nn.Module):
def __init__(self, num_classes=10, num_blocks=[3, 3, 3], c_hidden=[16, 32, 64], act_fn_name="relu", **kwargs):
"""
Inputs:
num_classes - Number of classification outputs (10 for CIFAR10)
num_blocks - List with the number of ResNet blocks to use. The first block of each group uses downsampling, except the first.
c_hidden - List with the hidden dimensionalities in the different blocks. Usually multiplied by 2 the deeper we go.
act_fn_name - Name of the activation function to use, looked up in "act_fn_by_name"
block_name - Name of the ResNet block, looked up in "resnet_blocks_by_name"
"""
super().__init__()
act_fn_by_name = {
"tanh": nn.Tanh,
"relu": nn.ReLU,
"leakyrelu": nn.LeakyReLU,
"gelu": nn.GELU
}
self.hparams = SimpleNamespace(num_classes=num_classes,
c_hidden=c_hidden,
num_blocks=num_blocks,
act_fn_name=act_fn_name,
act_fn=act_fn_by_name[act_fn_name],
block_class=ResNetBlock)
self._create_network()
self._init_params()
def _create_network(self):
c_hidden = self.hparams.c_hidden
self.input_net = nn.Sequential(
nn.Conv2d(3, c_hidden[0], kernel_size=3,
padding=1, bias=False),
nn.BatchNorm2d(c_hidden[0]),
self.hparams.act_fn()
)
# Creating the ResNet blocks
blocks = []
for block_idx, block_count in enumerate(self.hparams.num_blocks):
for bc in range(block_count):
# Subsample the first block of each group, except the very first one.
subsample = (bc == 0 and block_idx > 0)
blocks.append(
self.hparams.block_class(c_in=c_hidden[block_idx if not subsample else (block_idx - 1)],
act_fn=self.hparams.act_fn,
subsample=subsample,
c_out=c_hidden[block_idx])
)
self.blocks = nn.Sequential(*blocks)
# Mapping to classification output
self.output_net = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(c_hidden[-1], self.hparams.num_classes)
)
def _init_params(self):
# Based on our discussion in Tutorial 4, we should initialize the convolutions according to the activation function
# Fan-out focuses on the gradient distribution, and is commonly used in ResNets
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity=self.hparams.act_fn_name)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.input_net(x)
x = self.blocks(x)
x = self.output_net(x)
return x
class Model(BenchmarkModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 128
DEFAULT_EVAL_BSIZE = 128
def __init__(self, test, device, batch_size=DEFAULT_TRAIN_BSIZE, extra_args=[]):
super().__init__(test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
self.model = ResNetModel()
self.model.to(device)
self.example_inputs = (
torch.randn((self.batch_size, 3, 32, 32), device=self.device),
)
self.example_target = torch.randint(0, 10, (self.batch_size,), device=self.device)
dataset = data.TensorDataset(self.example_inputs[0], self.example_target)
self.optimizer = optim.SGD(self.model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4)
self.criterion = nn.CrossEntropyLoss()
(self.images, ) = self.example_inputs
def get_module(self):
return self.model, self.example_inputs
def train(self):
self.model.train()
targets = self.example_target
output = self.model(self.images)
loss = self.criterion(output, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
def eval(self):
self.model.eval()
with torch.no_grad():
out=self.model(self.images)
return (out,)
|
import torch
import os
import itertools
import random
import itertools
from pathlib import Path
from typing import Tuple
from detectron2.checkpoint import DetectionCheckpointer
# TorchBench imports
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(CURRENT_DIR.parent.parent, "data", ".data", "coco2017-minimal")
assert os.path.exists(DATA_DIR), "Couldn't find coco2017 minimal data dir, please run install.py again."
if not 'DETECTRON2_DATASETS' in os.environ:
os.environ['DETECTRON2_DATASETS'] = DATA_DIR
from detectron2.config import instantiate
from detectron2 import model_zoo
from detectron2.utils.events import EventStorage
from torch.utils._pytree import tree_map
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
def prefetch(dataloader, device, precision="fp32"):
r = []
dtype = torch.float16 if precision == "fp16" else torch.float32
for batch in dataloader:
r.append(tree_map(lambda x: x.to(device, dtype=dtype) if isinstance(x, torch.Tensor) else x, batch))
return r
class Model(BenchmarkModel):
task = COMPUTER_VISION.DETECTION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
# Skip correctness check, because the output tensor can't be verified using
# cosine similarity or torch.close()
SKIP_CORRECTNESS_CHECK = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
model_cfg = model_zoo.get_config("common/models/mask_rcnn_fpn.py").model
data_cfg = model_zoo.get_config("common/data/coco.py").dataloader
if test == "train":
# use a mini dataset
data_cfg.train.dataset.names = "coco_2017_val_100"
data_cfg.train.total_batch_size = self.batch_size
self.model = instantiate(model_cfg).to(self.device)
train_loader = instantiate(data_cfg.train)
self.example_inputs = prefetch(itertools.islice(train_loader, 100), self.device)
self.optimizer = torch.optim.SGD(self.model.parameters(), 0.)
elif test == "eval":
data_cfg.test.dataset.names = "coco_2017_val_100"
data_cfg.test.batch_size = self.batch_size
self.model = instantiate(model_cfg).to(self.device)
# load model from checkpoint
DetectionCheckpointer(self.model).load(self.model_file)
self.model.eval()
test_loader = instantiate(data_cfg.test)
self.example_inputs = prefetch(itertools.islice(test_loader, 100), self.device)
self.NUM_BATCHES = len(self.example_inputs)
def get_module(self):
return self.model, (self.example_inputs[0], )
def train(self):
self.model.train()
with EventStorage():
for idx in range(self.NUM_BATCHES):
losses = self.model(self.example_inputs[idx])
loss = sum(losses.values())
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
def eval(self) -> Tuple[torch.Tensor]:
self.model.eval()
with torch.no_grad():
for idx in range(self.NUM_BATCHES):
out = self.model(self.example_inputs[idx])
# retrieve output tensors
outputs = []
for item in out:
fields = list(map(lambda x: list(x.get_fields().values()), item.values()))
for boxes in fields:
tensor_box = list(filter(lambda x: isinstance(x, torch.Tensor), boxes))
outputs.extend(tensor_box)
return tuple(outputs)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
"""General-purpose training script for image-to-image translation.
This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and
different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
It first creates model, dataset, and visualizer given the option.
It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.
The script supports continue/resume training. Use '--continue_train' to resume your previous training.
Example:
Train a CycleGAN model:
python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Train a pix2pix model:
python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/train_options.py for more training options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import time
from .options.train_options import TrainOptions
from .data import create_dataset
from .models import create_model
import torch
from torch.utils._pytree import tree_map
from .util.visualizer import Visualizer
def prefetch_device(example_inputs, device):
if isinstance(example_inputs, torch.Tensor):
return example_inputs.to(device=device)
elif isinstance(example_inputs, (tuple, list, dict)):
return tree_map(lambda x: prefetch_device(x, device), example_inputs)
elif isinstance(example_inputs, (str, int, float)):
return example_inputs
assert False, f"Unsupported data type: {type(example_inputs)}"
def prepare_training_loop(args):
new_dataset = []
opt = TrainOptions().parse(args) # get training options
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
# prefetch the dataset to the device
for data in dataset:
new_dataset.append(prefetch_device(data, opt.tb_device))
dataset = new_dataset
dataset_size = len(dataset) # get the number of images in the dataset.
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
def training_loop(niteration):
total_iters = 0 # the total number of training iterations
if niteration is None:
niteration = opt.n_epochs + opt.n_epochs_decay + 1
for epoch in range(opt.epoch_count, niteration): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_iters += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data) # unpack data from dataset and apply preprocessing
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
# only run 1 batch in torchbench
break
iter_data_time = time.time()
# only run 1 iter in torchbench
break
model.update_learning_rate() # update learning rates at the end of every epoch.
return training_loop
|
#!/usr/bin/env python
import torch
import os
from pathlib import Path
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from typing import Tuple
from torchbenchmark import DATA_PATH
from .train_cyclegan import prepare_training_loop
from .test_cyclegan import get_model
def _create_data_dir(suffix):
data_dir = Path(__file__).parent.joinpath(".data", suffix)
data_dir.mkdir(parents=True, exist_ok=True)
return data_dir
class Model(BenchmarkModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
# TODO: Customizing the optimizer is nontrivial, perhaps a next step.
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
checkpoints_dir = _create_data_dir("checkpoints")
results_dir = _create_data_dir("results")
checkpoints_arg = f"--checkpoints_dir {checkpoints_dir}"
results_arg = f"--results_dir {results_dir}"
data_root = os.path.join(DATA_PATH, "pytorch_CycleGAN_and_pix2pix_inputs")
device_arg = ""
if self.device == "cpu":
device_arg = "--gpu_ids -1"
elif self.device == "cuda":
device_arg = "--gpu_ids 0"
if self.test == "train":
train_args = f"--tb_device {self.device} --dataroot {data_root}/datasets/horse2zebra --name horse2zebra --model cycle_gan --display_id 0 --n_epochs 3 " + \
f"--n_epochs_decay 3 {device_arg} {checkpoints_arg}"
self.training_loop = prepare_training_loop(train_args.split(' '))
args = f"--dataroot {data_root}/datasets/horse2zebra/testA --name horse2zebra_pretrained --model test " + \
f"--no_dropout {device_arg} {checkpoints_arg} {results_arg}"
self.model, self.input = get_model(args, self.device)
def get_module(self):
return self.model, self.input
def set_train(self):
# another model instance is used for training
# and the train mode is on by default
pass
def train(self):
# the training process is not patched to use scripted models
# training_loop has its own count logic inside. It actually runs 7 epochs
# (with each 'epoch' being limited to a small set of data)
# it would be more in symmetry with the rest of torchbenchmark if it ran just one step
# rather than 7 epochs, but changing it now would potentially cause
# discontinuity with existing/historical measurement
self.training_loop(None)
def eval(self) -> Tuple[torch.Tensor]:
model, example_inputs = self.get_module()
out = model(*example_inputs)
return (out, )
|
"""General-purpose test script for image-to-image translation.
Once you have trained your model with train.py, you can use this script to test the model.
It will load a saved model from '--checkpoints_dir' and save the results to '--results_dir'.
It first creates model and dataset given the option. It will hard-code some parameters.
It then runs inference for '--num_test' images and save results to an HTML file.
Example (You need to train models first or download pre-trained models from our website):
Test a CycleGAN model (both sides):
python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Test a CycleGAN model (one side only):
python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
The option '--model test' is used for generating CycleGAN results only for one side.
This option will automatically set '--dataset_mode single', which only loads the images from one set.
On the contrary, using '--model cycle_gan' requires loading and generating results in both directions,
which is sometimes unnecessary. The results will be saved at ./results/.
Use '--results_dir <directory_path_to_save_result>' to specify the results directory.
Test a pix2pix model:
python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/test_options.py for more test options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import os
from .options.test_options import TestOptions
from .data import create_dataset
from .models import create_model
from .util.visualizer import save_images
from .util import html
import torch
from pathlib import Path
def get_model(args, device):
opt = TestOptions().parse(args.split(' '))
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
model = create_model(opt) # create a model given opt.model and other options
if len(opt.gpu_ids) > 0:
# When opt.gpu_ids > 0, netG is converted to torch.nn.DataParallel
model = model.netG.module
else:
model = model.netG
root = str(Path(__file__).parent)
data = torch.load(f'{root}/example_input.pt')
input = data['A'].to(device)
return model, (input,)
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
model = create_model(opt) # create a model given opt.model and other options
# model.setup(opt) # regular setup: load and print networks; create schedulers
# create a website
web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory
if opt.load_iter > 0: # load_iter is 0 by default
web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter)
print('creating web directory', web_dir)
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
# test with eval mode. This only affects layers like batchnorm and dropout.
# For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
# For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
if opt.eval:
model.eval()
for i, data in enumerate(dataset):
if i >= opt.num_test: # only apply our model to opt.num_test images.
break
model.set_input(data) # unpack data from data loader
torch.save(data, 'example_input.pt')
model.test() # run inference
visuals = model.get_current_visuals() # get image results
img_path = model.get_image_paths() # get image paths
if i % 5 == 0: # save images to an HTML file
print('processing (%04d)-th image... %s' % (i, img_path))
save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
webpage.save() # save the HTML
|
import subprocess
import sys
from utils import s3_utils
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
s3_utils.checkout_s3_data("INPUT_TARBALLS", "pytorch_CycleGAN_and_pix2pix_inputs.tar.gz", decompress=True)
pip_install_requirements()
|
from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""This class includes test options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser) # define shared options
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
# rewrite devalue values
parser.set_defaults(model='test')
# To avoid cropping, the load_size should be the same as crop_size
parser.set_defaults(load_size=parser.get_default('crop_size'))
self.isTrain = False
return parser
|
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
"""This class includes training options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
# add torchbench options
parser.add_argument('--tb_device', type=str, required=True, help="TorchBench device")
# visdom and HTML visualization parameters
parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
# network saving and loading parameters
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
# training parameters
parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
self.isTrain = True
return parser
|
"""This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
|
import argparse
import os
from ..util import util
import torch
from ..models import get_option_setter
from ..data import get_option_setter as get_option_setter_data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=0, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self, args=None):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args(args)
# modify model-related parser options
model_name = opt.model
model_option_setter = get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args(args) # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = get_option_setter_data(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args(args)
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self, args=None):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options(args)
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
|
import random
import torch
class ImagePool():
"""This class implements an image buffer that stores previously generated images.
This buffer enables us to update discriminators using a history of generated images
rather than the ones produced by the latest generators.
"""
def __init__(self, pool_size):
"""Initialize the ImagePool class
Parameters:
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
"""
self.pool_size = pool_size
if self.pool_size > 0: # create an empty pool
self.num_imgs = 0
self.images = []
def query(self, images):
"""Return an image from the pool.
Parameters:
images: the latest generated images from the generator
Returns images from the buffer.
By 50/100, the buffer will return input images.
By 50/100, the buffer will return images previously stored in the buffer,
and insert the current images to the buffer.
"""
if self.pool_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else: # by another 50% chance, the buffer will return the current image
return_images.append(image)
return_images = torch.cat(return_images, 0) # collect all the images and return
return return_images
|
"""This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
|
import dominate
from dominate.tags import meta, h3, table, tr, td, p, a, img, br
import os
class HTML:
"""This HTML class allows us to save images and write texts into a single HTML file.
It consists of functions such as <add_header> (add a text header to the HTML file),
<add_images> (add a row of images to the HTML file), and <save> (save the HTML to the disk).
It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
"""
def __init__(self, web_dir, title, refresh=0):
"""Initialize the HTML classes
Parameters:
web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/
title (str) -- the webpage name
refresh (int) -- how often the website refresh itself; if 0; no refreshing
"""
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if refresh > 0:
with self.doc.head:
meta(http_equiv="refresh", content=str(refresh))
def get_image_dir(self):
"""Return the directory that stores images"""
return self.img_dir
def add_header(self, text):
"""Insert a header to the HTML file
Parameters:
text (str) -- the header text
"""
with self.doc:
h3(text)
def add_images(self, ims, txts, links, width=400):
"""add images to the HTML file
Parameters:
ims (str list) -- a list of image paths
txts (str list) -- a list of image names shown on the website
links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
"""
self.t = table(border=1, style="table-layout: fixed;") # Insert a table
self.doc.add(self.t)
with self.t:
with tr():
for im, txt, link in zip(ims, txts, links):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
with a(href=os.path.join('images', link)):
img(style="width:%dpx" % width, src=os.path.join('images', im))
br()
p(txt)
def save(self):
"""save the current content to the HMTL file"""
html_file = '%s/index.html' % self.web_dir
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
if __name__ == '__main__': # we show an example usage here.
html = HTML('web/', 'test_html')
html.add_header('hello world')
ims, txts, links = [], [], []
for n in range(4):
ims.append('image_%d.png' % n)
txts.append('text_%d' % n)
links.append('image_%d.png' % n)
html.add_images(ims, txts, links)
html.save()
|
"""This package includes a miscellaneous collection of useful helper functions."""
|
from __future__ import print_function
import os
import tarfile
import requests
from warnings import warn
from zipfile import ZipFile
from bs4 import BeautifulSoup
from os.path import abspath, isdir, join, basename
class GetData:
"""A Python script for downloading CycleGAN or pix2pix datasets.
Parameters:
technique (str) -- One of: 'cyclegan' or 'pix2pix'.
verbose (bool) -- If True, print additional information.
Examples:
>>> from util.get_data import GetData
>>> gd = GetData(technique='cyclegan')
>>> new_data_path = gd.get(save_path='./datasets') # options will be displayed.
Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh'
and 'scripts/download_cyclegan_model.sh'.
"""
def __init__(self, technique='cyclegan', verbose=True):
url_dict = {
'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/',
'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets'
}
self.url = url_dict.get(technique.lower())
self._verbose = verbose
def _print(self, text):
if self._verbose:
print(text)
@staticmethod
def _get_options(r):
soup = BeautifulSoup(r.text, 'lxml')
options = [h.text for h in soup.find_all('a', href=True)
if h.text.endswith(('.zip', 'tar.gz'))]
return options
def _present_options(self):
r = requests.get(self.url)
options = self._get_options(r)
print('Options:\n')
for i, o in enumerate(options):
print("{0}: {1}".format(i, o))
choice = input("\nPlease enter the number of the "
"dataset above you wish to download:")
return options[int(choice)]
def _download_data(self, dataset_url, save_path):
if not isdir(save_path):
os.makedirs(save_path)
base = basename(dataset_url)
temp_save_path = join(save_path, base)
with open(temp_save_path, "wb") as f:
r = requests.get(dataset_url)
f.write(r.content)
if base.endswith('.tar.gz'):
obj = tarfile.open(temp_save_path)
elif base.endswith('.zip'):
obj = ZipFile(temp_save_path, 'r')
else:
raise ValueError("Unknown File Type: {0}.".format(base))
self._print("Unpacking Data...")
obj.extractall(save_path)
obj.close()
os.remove(temp_save_path)
def get(self, save_path, dataset=None):
"""
Download a dataset.
Parameters:
save_path (str) -- A directory to save the data to.
dataset (str) -- (optional). A specific dataset to download.
Note: this must include the file extension.
If None, options will be presented for you
to choose from.
Returns:
save_path_full (str) -- the absolute path to the downloaded data.
"""
if dataset is None:
selected_dataset = self._present_options()
else:
selected_dataset = dataset
save_path_full = join(save_path, selected_dataset.split('.')[0])
if isdir(save_path_full):
warn("\n'{0}' already exists. Voiding Download.".format(
save_path_full))
else:
self._print('Downloading Data...')
url = "{0}/{1}".format(self.url, selected_dataset)
self._download_data(url, save_path=save_path)
return abspath(save_path_full)
|
import numpy as np
import os
import sys
import ntpath
import time
from . import util, html
from subprocess import Popen, PIPE
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
self.port = opt.display_port
self.saved = False
if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>
import visdom
self.ncols = opt.display_ncols
self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
if not self.vis.check_connection():
self.create_visdom_connections()
if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
util.mkdirs([self.web_dir, self.img_dir])
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def reset(self):
"""Reset the self.saved status"""
self.saved = False
def create_visdom_connections(self):
"""If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
print('Command: %s' % cmd)
Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
def display_current_results(self, visuals, epoch, save_result):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
if self.display_id > 0: # show images in the browser using visdom
ncols = self.ncols
if ncols > 0: # show all the images in one visdom panel
ncols = min(ncols, len(visuals))
h, w = next(iter(visuals.values())).shape[:2]
table_css = """<style>
table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}
table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
</style>""" % (w, h) # create a table css
# create a table of images.
title = self.name
label_html = ''
label_html_row = ''
images = []
idx = 0
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
label_html_row += '<td>%s</td>' % label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if idx % ncols == 0:
label_html += '<tr>%s</tr>' % label_html_row
label_html_row = ''
white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
while idx % ncols != 0:
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if label_html_row != '':
label_html += '<tr>%s</tr>' % label_html_row
try:
self.vis.images(images, nrow=ncols, win=self.display_id + 1,
padding=2, opts=dict(title=title + ' images'))
label_html = '<table>%s</table>' % label_html
self.vis.text(table_css + label_html, win=self.display_id + 2,
opts=dict(title=title + ' labels'))
except VisdomExceptionBase:
self.create_visdom_connections()
else: # show each image in a separate visdom panel;
idx = 1
try:
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),
win=self.display_id + idx)
idx += 1
except VisdomExceptionBase:
self.create_visdom_connections()
if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
self.saved = True
# save images to the disk
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims, txts, links = [], [], []
for label, image_numpy in visuals.items():
image_numpy = util.tensor2im(image)
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
def plot_current_losses(self, epoch, counter_ratio, losses):
"""display the current losses on visdom display: dictionary of error labels and values
Parameters:
epoch (int) -- current epoch
counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
"""
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
try:
self.vis.line(
X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
Y=np.array(self.plot_data['Y']),
opts={
'title': self.name + ' loss over time',
'legend': self.plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id)
except VisdomExceptionBase:
self.create_visdom_connections()
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
|
from .base_model import BaseModel
from . import networks
class TestModel(BaseModel):
""" This TesteModel can be used to generate CycleGAN results for only one direction.
This model will automatically set '--dataset_mode single', which only loads the images from one collection.
See the test instruction for more details.
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
The model can only be used during test time. It requires '--dataset_mode single'.
You need to specify the network using the option '--model_suffix'.
"""
assert not is_train, 'TestModel cannot be used during training time'
parser.set_defaults(dataset_mode='single')
parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
assert(not opt.isTrain)
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = []
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real', 'fake']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
self.model_names = ['G' + opt.model_suffix] # only generator is needed.
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
# assigns the model to self.netG_[suffix] so that it can be loaded
# please see <BaseModel.load_networks>
setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self.
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
We need to use 'single_dataset' dataset mode. It only load images from one domain.
"""
self.real = input['A'].to(self.device)
self.image_paths = input['A_paths']
def forward(self):
"""Run forward pass."""
self.fake = self.netG(self.real) # G(real)
def optimize_parameters(self):
"""No optimization for test model."""
pass
|
"""Model class template
This module provides a template for users to implement custom models.
You can specify '--model template' to use this model.
The class name should be consistent with both the filename and its model option.
The filename should be <model>_dataset.py
The class name should be <Model>Dataset.py
It implements a simple image-to-image translation baseline based on regression loss.
Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
min_<netG> ||netG(data_A) - data_B||_1
You need to implement the following functions:
<modify_commandline_options>: Add model-specific options and rewrite default values for existing options.
<__init__>: Initialize this model class.
<set_input>: Unpack input data and perform data pre-processing.
<forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
<optimize_parameters>: Update network weights; it will be called in every training iteration.
"""
import torch
from .base_model import BaseModel
from . import networks
class TemplateModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new model-specific options and rewrite default values for existing options.
Parameters:
parser -- the option parser
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
if is_train:
parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
# specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
self.loss_names = ['loss_G']
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
self.visual_names = ['data_A', 'data_B', 'output']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
self.model_names = ['G']
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
if self.isTrain: # only defined during training time
# define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
# We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
self.criterionLoss = torch.nn.L1Loss()
# define and initialize optimizers. You can define one optimizer for each network.
# If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = [self.optimizer]
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
AtoB = self.opt.direction == 'AtoB' # use <direction> to swap data_A and data_B
self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A
self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B
self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
def forward(self):
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
self.output = self.netG(self.data_A) # generate output image given the input data_A
def backward(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# caculate the intermediate results if necessary; here self.output has been computed during function <forward>
# calculate loss given the input and intermediate results
self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
def optimize_parameters(self):
"""Update network weights; it will be called in every training iteration."""
self.forward() # first call forward to calculate intermediate results
self.optimizer.zero_grad() # clear network G's existing gradients
self.backward() # calculate gradients for network G
self.optimizer.step() # update gradients for network G
|
import torch
import itertools
from ..util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
class CycleGANModel(BaseModel):
"""
This class implements the CycleGAN model, for learning image-to-image translation without paired data.
The model training requires '--dataset_mode unaligned' dataset.
By default, it uses a '--netG resnet_9blocks' ResNet generator,
a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
and a least-square GANs objective ('--gan_mode lsgan').
CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
A (source domain), B (target domain).
Generators: G_A: A -> B; G_B: B -> A.
Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
Dropout is not used in the original CycleGAN paper.
"""
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
return parser
def __init__(self, opt):
"""Initialize the CycleGAN class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
visual_names_A = ['real_A', 'fake_B', 'rec_A']
visual_names_B = ['real_B', 'fake_A', 'rec_B']
if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
visual_names_A.append('idt_B')
visual_names_B.append('idt_A')
self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B']
# define networks (both Generators and discriminators)
# The naming is different from those used in the paper.
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define discriminators
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
assert(opt.input_nc == opt.output_nc)
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG_A(self.real_A) # G_A(A)
self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
self.fake_A = self.netG_B(self.real_B) # G_B(B)
self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
def backward_D_basic(self, netD, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
return loss_D
def backward_D_A(self):
"""Calculate GAN loss for discriminator D_A"""
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
"""Calculate GAN loss for discriminator D_B"""
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
"""Calculate the loss for generators G_A and G_B"""
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed: ||G_A(B) - B||
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed: ||G_B(A) - A||
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
# GAN loss D_B(G_B(B))
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
# Forward cycle loss || G_B(G_A(A)) - A||
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Backward cycle loss || G_A(G_B(B)) - B||
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# combined loss and calculate gradients
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
self.loss_G.backward()
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # compute fake images and reconstruction images.
# G_A and G_B
self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
self.backward_G() # calculate gradients for G_A and G_B
self.optimizer_G.step() # update G_A and G_B's weights
# D_A and D_B
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.backward_D_A() # calculate gradients for D_A
self.backward_D_B() # calculate graidents for D_B
self.optimizer_D.step() # update D_A and D_B's weights
|
import torch
from .base_model import BaseModel
from . import networks
class Pix2PixModel(BaseModel):
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
self.model_names = ['G', 'D']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap images in domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG(self.real_A) # G(A)
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# Fake; stop backprop to the generator by detaching fake_B
fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
pred_fake = self.netD(fake_AB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# Real
real_AB = torch.cat((self.real_A, self.real_B), 1)
pred_real = self.netD(real_AB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# combine loss and calculate gradients
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
|
"""This package contains modules related to objective functions, optimizations, and network architectures.
To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
You need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate loss, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
In the function <__init__>, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): define networks used in our training.
-- self.visual_names (str list): specify the images that you want to display and save.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
Now you can use the model class by specifying flag '--model dummy'.
See our template model class 'template_model.py' for more details.
"""
import importlib
from .base_model import BaseModel
def find_model_using_name(model_name):
"""Import the module "models/[model_name]_model.py".
In the file, the class called DatasetNameModel() will
be instantiated. It has to be a subclass of BaseModel,
and it is case-insensitive.
"""
model_filename = f'{__package__}.{model_name}_model'
modellib = importlib.import_module(model_filename)
model = None
target_model_name = model_name.replace('_', '') + 'model'
for name, cls in modellib.__dict__.items():
if name.lower() == target_model_name.lower() \
and issubclass(cls, BaseModel):
model = cls
if model is None:
print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
exit(0)
return model
def get_option_setter(model_name):
"""Return the static method <modify_commandline_options> of the model class."""
model_class = find_model_using_name(model_name)
return model_class.modify_commandline_options
def create_model(opt):
"""Create a model given the option.
This function warps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from models import create_model
>>> model = create_model(opt)
"""
model = find_model_using_name(opt.model)
instance = model(opt)
return instance
|
from .pix2pix_model import Pix2PixModel
import torch
from skimage import color # used for lab2rgb
import numpy as np
class ColorizationModel(Pix2PixModel):
"""This is a subclass of Pix2PixModel for image colorization (black & white image -> colorful images).
The model training requires '-dataset_model colorization' dataset.
It trains a pix2pix model, mapping from L channel to ab channels in Lab color space.
By default, the colorization dataset will automatically set '--input_nc 1' and '--output_nc 2'.
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
By default, we use 'colorization' dataset for this model.
See the original pix2pix paper (https://arxiv.org/pdf/1611.07004.pdf) and colorization results (Figure 9 in the paper)
"""
Pix2PixModel.modify_commandline_options(parser, is_train)
parser.set_defaults(dataset_mode='colorization')
return parser
def __init__(self, opt):
"""Initialize the class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
For visualization, we set 'visual_names' as 'real_A' (input real image),
'real_B_rgb' (ground truth RGB image), and 'fake_B_rgb' (predicted RGB image)
We convert the Lab image 'real_B' (inherited from Pix2pixModel) to a RGB image 'real_B_rgb'.
we convert the Lab image 'fake_B' (inherited from Pix2pixModel) to a RGB image 'fake_B_rgb'.
"""
# reuse the pix2pix model
Pix2PixModel.__init__(self, opt)
# specify the images to be visualized.
self.visual_names = ['real_A', 'real_B_rgb', 'fake_B_rgb']
def lab2rgb(self, L, AB):
"""Convert an Lab tensor image to a RGB numpy output
Parameters:
L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array)
AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array)
Returns:
rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array)
"""
AB2 = AB * 110.0
L2 = (L + 1.0) * 50.0
Lab = torch.cat([L2, AB2], dim=1)
Lab = Lab[0].data.cpu().float().numpy()
Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0))
rgb = color.lab2rgb(Lab) * 255
return rgb
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
self.real_B_rgb = self.lab2rgb(self.real_A, self.real_B)
self.fake_B_rgb = self.lab2rgb(self.real_A, self.fake_B)
|
import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this function, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): define networks used in our training.
-- self.visual_names (str list): specify the images that you want to display and save.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
self.load_networks(load_suffix)
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, net):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
modules = dict(net.named_modules())
for key in list(state_dict.keys()):
path, field = key.rsplit('.', 1)
if modules[path].__class__.__name__.startswith('InstanceNorm'):
if field in ['running_mean', 'running_var']:
if getattr(modules[path], field) is None:
state_dict.pop(key)
if field == 'num_batches_tracked':
state_dict.pop(key)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
self.__patch_instance_norm_state_dict(state_dict, net)
net.load_state_dict(state_dict)
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
|
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x): return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
|
# Simple script to make sure basic usage
# such as training, testing, saving and loading
# runs without errors.
import os
def run(command):
print(command)
exit_status = os.system(command)
if exit_status > 0:
exit(1)
if __name__ == '__main__':
# download mini datasets
if not os.path.exists('./datasets/mini'):
run('bash ./datasets/download_cyclegan_dataset.sh mini')
if not os.path.exists('./datasets/mini_pix2pix'):
run('bash ./datasets/download_cyclegan_dataset.sh mini_pix2pix')
# pretrained cyclegan model
if not os.path.exists('./checkpoints/horse2zebra_pretrained/latest_net_G.pth'):
run('bash ./scripts/download_cyclegan_model.sh horse2zebra')
run('python test.py --model test --dataroot ./datasets/mini --name horse2zebra_pretrained --no_dropout --num_test 1 --no_dropout')
# pretrained pix2pix model
if not os.path.exists('./checkpoints/facades_label2photo_pretrained/latest_net_G.pth'):
run('bash ./scripts/download_pix2pix_model.sh facades_label2photo')
if not os.path.exists('./datasets/facades'):
run('bash ./datasets/download_pix2pix_dataset.sh facades')
run('python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained --num_test 1')
# cyclegan train/test
run('python train.py --model cycle_gan --name temp_cyclegan --dataroot ./datasets/mini --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --print_freq 1 --display_id -1')
run('python test.py --model test --name temp_cyclegan --dataroot ./datasets/mini --num_test 1 --model_suffix "_A" --no_dropout')
# pix2pix train/test
run('python train.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 5 --save_latest_freq 10 --display_id -1')
run('python test.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --num_test 1')
# template train/test
run('python train.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --display_id -1')
run('python test.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --num_test 1')
# colorization train/test (optional)
if not os.path.exists('./datasets/mini_colorization'):
run('bash ./datasets/download_cyclegan_dataset.sh mini_colorization')
run('python train.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 5 --display_id -1')
run('python test.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --num_test 1')
|
# The following code is modified from https://github.com/shelhamer/clockwork-fcn
import sys
import os
import glob
import numpy as np
from PIL import Image
class cityscapes:
def __init__(self, data_path):
# data_path something like /data2/cityscapes
self.dir = data_path
self.classes = ['road', 'sidewalk', 'building', 'wall', 'fence',
'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain',
'sky', 'person', 'rider', 'car', 'truck',
'bus', 'train', 'motorcycle', 'bicycle']
self.mean = np.array((72.78044, 83.21195, 73.45286), dtype=np.float32)
# import cityscapes label helper and set up label mappings
sys.path.insert(0, '{}/scripts/helpers/'.format(self.dir))
labels = __import__('labels')
self.id2trainId = {label.id: label.trainId for label in labels.labels} # dictionary mapping from raw IDs to train IDs
self.trainId2color = {label.trainId: label.color for label in labels.labels} # dictionary mapping train IDs to colors as 3-tuples
def get_dset(self, split):
'''
List images as (city, id) for the specified split
TODO(shelhamer) generate splits from cityscapes itself, instead of
relying on these separately made text files.
'''
if split == 'train':
dataset = open('{}/ImageSets/segFine/train.txt'.format(self.dir)).read().splitlines()
else:
dataset = open('{}/ImageSets/segFine/val.txt'.format(self.dir)).read().splitlines()
return [(item.split('/')[0], item.split('/')[1]) for item in dataset]
def load_image(self, split, city, idx):
im = Image.open('{}/leftImg8bit_sequence/{}/{}/{}_leftImg8bit.png'.format(self.dir, split, city, idx))
return im
def assign_trainIds(self, label):
"""
Map the given label IDs to the train IDs appropriate for training
Use the label mapping provided in labels.py from the cityscapes scripts
"""
label = np.array(label, dtype=np.float32)
if sys.version_info[0] < 3:
for k, v in self.id2trainId.iteritems():
label[label == k] = v
else:
for k, v in self.id2trainId.items():
label[label == k] = v
return label
def load_label(self, split, city, idx):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
"""
label = Image.open('{}/gtFine/{}/{}/{}_gtFine_labelIds.png'.format(self.dir, split, city, idx))
label = self.assign_trainIds(label) # get proper labels for eval
label = np.array(label, dtype=np.uint8)
label = label[np.newaxis, ...]
return label
def preprocess(self, im):
"""
Preprocess loaded image (by load_image) for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
in_ = np.array(im, dtype=np.float32)
in_ = in_[:, :, ::-1]
in_ -= self.mean
in_ = in_.transpose((2, 0, 1))
return in_
def palette(self, label):
'''
Map trainIds to colors as specified in labels.py
'''
if label.ndim == 3:
label = label[0]
color = np.empty((label.shape[0], label.shape[1], 3))
if sys.version_info[0] < 3:
for k, v in self.trainId2color.iteritems():
color[label == k, :] = v
else:
for k, v in self.trainId2color.items():
color[label == k, :] = v
return color
def make_boundaries(label, thickness=None):
"""
Input is an image label, output is a numpy array mask encoding the boundaries of the objects
Extract pixels at the true boundary by dilation - erosion of label.
Don't just pick the void label as it is not exclusive to the boundaries.
"""
assert(thickness is not None)
import skimage.morphology as skm
void = 255
mask = np.logical_and(label > 0, label != void)[0]
selem = skm.disk(thickness)
boundaries = np.logical_xor(skm.dilation(mask, selem),
skm.erosion(mask, selem))
return boundaries
def list_label_frames(self, split):
"""
Select labeled frames from a split for evaluation
collected as (city, shot, idx) tuples
"""
def file2idx(f):
"""Helper to convert file path into frame ID"""
city, shot, frame = (os.path.basename(f).split('_')[:3])
return "_".join([city, shot, frame])
frames = []
cities = [os.path.basename(f) for f in glob.glob('{}/gtFine/{}/*'.format(self.dir, split))]
for c in cities:
files = sorted(glob.glob('{}/gtFine/{}/{}/*labelIds.png'.format(self.dir, split, c)))
frames.extend([file2idx(f) for f in files])
return frames
def collect_frame_sequence(self, split, idx, length):
"""
Collect sequence of frames preceding (and including) a labeled frame
as a list of Images.
Note: 19 preceding frames are provided for each labeled frame.
"""
SEQ_LEN = length
city, shot, frame = idx.split('_')
frame = int(frame)
frame_seq = []
for i in range(frame - SEQ_LEN, frame + 1):
frame_path = '{0}/leftImg8bit_sequence/val/{1}/{1}_{2}_{3:0>6d}_leftImg8bit.png'.format(
self.dir, city, shot, i)
frame_seq.append(Image.open(frame_path))
return frame_seq
|
# The following code is modified from https://github.com/shelhamer/clockwork-fcn
import numpy as np
def get_out_scoremap(net):
return net.blobs['score'].data[0].argmax(axis=0).astype(np.uint8)
def feed_net(net, in_):
"""
Load prepared input into net.
"""
net.blobs['data'].reshape(1, *in_.shape)
net.blobs['data'].data[...] = in_
def segrun(net, in_):
feed_net(net, in_)
net.forward()
return get_out_scoremap(net)
def fast_hist(a, b, n):
k = np.where((a >= 0) & (a < n))[0]
bc = np.bincount(n * a[k].astype(int) + b[k], minlength=n**2)
if len(bc) != n**2:
# ignore this example if dimension mismatch
return 0
return bc.reshape(n, n)
def get_scores(hist):
# Mean pixel accuracy
acc = np.diag(hist).sum() / (hist.sum() + 1e-12)
# Per class accuracy
cl_acc = np.diag(hist) / (hist.sum(1) + 1e-12)
# Per class IoU
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist) + 1e-12)
return acc, np.nanmean(cl_acc), np.nanmean(iu), cl_acc, iu
|
import os
import caffe
import argparse
import numpy as np
import scipy.misc
from PIL import Image
from util import segrun, fast_hist, get_scores
from cityscapes import cityscapes
parser = argparse.ArgumentParser()
parser.add_argument("--cityscapes_dir", type=str, required=True, help="Path to the original cityscapes dataset")
parser.add_argument("--result_dir", type=str, required=True, help="Path to the generated images to be evaluated")
parser.add_argument("--output_dir", type=str, required=True, help="Where to save the evaluation results")
parser.add_argument("--caffemodel_dir", type=str, default='./scripts/eval_cityscapes/caffemodel/', help="Where the FCN-8s caffemodel stored")
parser.add_argument("--gpu_id", type=int, default=0, help="Which gpu id to use")
parser.add_argument("--split", type=str, default='val', help="Data split to be evaluated")
parser.add_argument("--save_output_images", type=int, default=0, help="Whether to save the FCN output images")
args = parser.parse_args()
def main():
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
if args.save_output_images > 0:
output_image_dir = args.output_dir + 'image_outputs/'
if not os.path.isdir(output_image_dir):
os.makedirs(output_image_dir)
CS = cityscapes(args.cityscapes_dir)
n_cl = len(CS.classes)
label_frames = CS.list_label_frames(args.split)
caffe.set_device(args.gpu_id)
caffe.set_mode_gpu()
net = caffe.Net(args.caffemodel_dir + '/deploy.prototxt',
args.caffemodel_dir + 'fcn-8s-cityscapes.caffemodel',
caffe.TEST)
hist_perframe = np.zeros((n_cl, n_cl))
for i, idx in enumerate(label_frames):
if i % 10 == 0:
print('Evaluating: %d/%d' % (i, len(label_frames)))
city = idx.split('_')[0]
# idx is city_shot_frame
label = CS.load_label(args.split, city, idx)
im_file = args.result_dir + '/' + idx + '_leftImg8bit.png'
im = np.array(Image.open(im_file))
im = scipy.misc.imresize(im, (label.shape[1], label.shape[2]))
out = segrun(net, CS.preprocess(im))
hist_perframe += fast_hist(label.flatten(), out.flatten(), n_cl)
if args.save_output_images > 0:
label_im = CS.palette(label)
pred_im = CS.palette(out)
scipy.misc.imsave(output_image_dir + '/' + str(i) + '_pred.jpg', pred_im)
scipy.misc.imsave(output_image_dir + '/' + str(i) + '_gt.jpg', label_im)
scipy.misc.imsave(output_image_dir + '/' + str(i) + '_input.jpg', im)
mean_pixel_acc, mean_class_acc, mean_class_iou, per_class_acc, per_class_iou = get_scores(hist_perframe)
with open(args.output_dir + '/evaluation_results.txt', 'w') as f:
f.write('Mean pixel accuracy: %f\n' % mean_pixel_acc)
f.write('Mean class accuracy: %f\n' % mean_class_acc)
f.write('Mean class IoU: %f\n' % mean_class_iou)
f.write('************ Per class numbers below ************\n')
for i, cl in enumerate(CS.classes):
while len(cl) < 15:
cl = cl + ' '
f.write('%s: acc = %f, iou = %f\n' % (cl, per_class_acc[i], per_class_iou[i]))
main()
|
# HED batch processing script; modified from https://github.com/s9xie/hed/blob/master/examples/hed/HED-tutorial.ipynb
# Step 1: download the hed repo: https://github.com/s9xie/hed
# Step 2: download the models and protoxt, and put them under {caffe_root}/examples/hed/
# Step 3: put this script under {caffe_root}/examples/hed/
# Step 4: run the following script:
# python batch_hed.py --images_dir=/data/to/path/photos/ --hed_mat_dir=/data/to/path/hed_mat_files/
# The code sometimes crashes after computation is done. Error looks like "Check failed: ... driver shutting down". You can just kill the job.
# For large images, it will produce gpu memory issue. Therefore, you better resize the images before running this script.
# Step 5: run the MATLAB post-processing script "PostprocessHED.m"
import caffe
import numpy as np
from PIL import Image
import os
import argparse
import sys
import scipy.io as sio
def parse_args():
parser = argparse.ArgumentParser(description='batch proccesing: photos->edges')
parser.add_argument('--caffe_root', dest='caffe_root', help='caffe root', default='../../', type=str)
parser.add_argument('--caffemodel', dest='caffemodel', help='caffemodel', default='./hed_pretrained_bsds.caffemodel', type=str)
parser.add_argument('--prototxt', dest='prototxt', help='caffe prototxt file', default='./deploy.prototxt', type=str)
parser.add_argument('--images_dir', dest='images_dir', help='directory to store input photos', type=str)
parser.add_argument('--hed_mat_dir', dest='hed_mat_dir', help='directory to store output hed edges in mat file', type=str)
parser.add_argument('--border', dest='border', help='padding border', type=int, default=128)
parser.add_argument('--gpu_id', dest='gpu_id', help='gpu id', type=int, default=1)
args = parser.parse_args()
return args
args = parse_args()
for arg in vars(args):
print('[%s] =' % arg, getattr(args, arg))
# Make sure that caffe is on the python path:
caffe_root = args.caffe_root # this file is expected to be in {caffe_root}/examples/hed/
sys.path.insert(0, caffe_root + 'python')
if not os.path.exists(args.hed_mat_dir):
print('create output directory %s' % args.hed_mat_dir)
os.makedirs(args.hed_mat_dir)
imgList = os.listdir(args.images_dir)
nImgs = len(imgList)
print('#images = %d' % nImgs)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
# load net
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
# pad border
border = args.border
for i in range(nImgs):
if i % 500 == 0:
print('processing image %d/%d' % (i, nImgs))
im = Image.open(os.path.join(args.images_dir, imgList[i]))
in_ = np.array(im, dtype=np.float32)
in_ = np.pad(in_, ((border, border), (border, border), (0, 0)), 'reflect')
in_ = in_[:, :, 0:3]
in_ = in_[:, :, ::-1]
in_ -= np.array((104.00698793, 116.66876762, 122.67891434))
in_ = in_.transpose((2, 0, 1))
# remove the following two lines if testing with cpu
# shape for input (data blob is N x C x H x W), set data
net.blobs['data'].reshape(1, *in_.shape)
net.blobs['data'].data[...] = in_
# run net and take argmax for prediction
net.forward()
fuse = net.blobs['sigmoid-fuse'].data[0][0, :, :]
# get rid of the border
fuse = fuse[(border+35):(-border+35), (border+35):(-border+35)]
# save hed file to the disk
name, ext = os.path.splitext(imgList[i])
sio.savemat(os.path.join(args.hed_mat_dir, name + '.mat'), {'edge_predict': fuse})
|
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from skimage import color # require skimage
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
class ColorizationDataset(BaseDataset):
"""This dataset class can load a set of natural images in RGB, and convert RGB format into (L, ab) pairs in Lab color space.
This dataset is required by pix2pix-based colorization model ('--model colorization')
"""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
By default, the number of channels for input image is 1 (L) and
the number of channels for output image is 2 (ab). The direction is from A to B
"""
parser.set_defaults(input_nc=1, output_nc=2, direction='AtoB')
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir = os.path.join(opt.dataroot, opt.phase)
self.AB_paths = sorted(make_dataset(self.dir, opt.max_dataset_size))
assert(opt.input_nc == 1 and opt.output_nc == 2 and opt.direction == 'AtoB')
self.transform = get_transform(self.opt, convert=False)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - the L channel of an image
B (tensor) - - the ab channels of the same image
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
path = self.AB_paths[index]
im = Image.open(path).convert('RGB')
im = self.transform(im)
im = np.array(im)
lab = color.rgb2lab(im).astype(np.float32)
lab_t = transforms.ToTensor()(lab)
A = lab_t[[0], ...] / 50.0 - 1.0
B = lab_t[[1, 2], ...] / 110.0
return {'A': A, 'B': B, 'A_paths': path, 'B_paths': path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
|
"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
self.root = opt.dataroot
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, grayscale=False, method=transforms.InterpolationMode.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, method))
elif 'scale_width' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
if 'crop' in opt.preprocess:
if params is None:
transform_list.append(transforms.RandomCrop(opt.crop_size))
else:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
if opt.preprocess == 'none':
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
if not opt.no_flip:
if params is None:
transform_list.append(transforms.RandomHorizontalFlip())
elif params['flip']:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.Resampling.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), method)
def __scale_width(img, target_size, crop_size, method=Image.Resampling.BICUBIC):
ow, oh = img.size
if ow == target_size and oh >= crop_size:
return img
w = target_size
h = int(max(target_size * oh / ow, crop_size))
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
|
"""Dataset class template
This module provides a template for users to implement custom datasets.
You can specify '--dataset_mode template' to use this dataset.
The class name should be consistent with both the filename and its dataset_mode option.
The filename should be <dataset_mode>_dataset.py
The class name should be <Dataset_mode>Dataset.py
You need to implement the following functions:
-- <modify_commandline_options>: Add dataset-specific options and rewrite default values for existing options.
-- <__init__>: Initialize this dataset class.
-- <__getitem__>: Return a data point and its metadata information.
-- <__len__>: Return the number of images.
"""
from data.base_dataset import BaseDataset, get_transform
# from data.image_folder import make_dataset
# from PIL import Image
class TemplateDataset(BaseDataset):
"""A template dataset class for you to implement custom datasets."""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
"""
# save the option and dataset root
BaseDataset.__init__(self, opt)
# get the image paths of your dataset;
self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
# define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
self.transform = get_transform(opt)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index -- a random integer for data indexing
Returns:
a dictionary of data with their names. It usually contains the data itself and its metadata information.
Step 1: get a random image path: e.g., path = self.image_paths[index]
Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
Step 4: return a data point as a dictionary.
"""
path = 'temp' # needs to be a string
data_A = None # needs to be a tensor
data_B = None # needs to be a tensor
return {'data_A': data_A, 'data_B': data_B, 'path': path}
def __len__(self):
"""Return the total number of images."""
return len(self.image_paths)
|
"""This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point from data loader.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
See our template dataset class 'template_dataset.py' for more details.
"""
import importlib
import torch.utils.data
from .base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = f'{__package__}.{dataset_name}_dataset'
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()
return dataset
class CustomDatasetDataLoader():
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads)
)
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
if i * self.opt.batch_size >= self.opt.max_dataset_size:
break
yield data
|
"""A modified image folder class
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
so that this class can load images from both current directory and its subdirectories.
"""
import torch.utils.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " +
",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
|
from .base_dataset import BaseDataset, get_transform
from .image_folder import make_dataset
from PIL import Image
class SingleDataset(BaseDataset):
"""This dataset class can load a set of images specified by the path --dataroot /path/to/data.
It can be used for generating CycleGAN results only for one side with the model option '-model test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.transform = get_transform(opt, grayscale=(input_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A and A_paths
A(tensor) - - an image in one domain
A_paths(str) - - the path of the image
"""
A_path = self.A_paths[index]
A_img = Image.open(A_path).convert('RGB')
A = self.transform(A_img)
return {'A': A, 'A_paths': A_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.A_paths)
|
import os.path
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_dataset
from PIL import Image
class AlignedDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths
assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
# read a image given a random integer index
AB_path = self.AB_paths[index]
AB = Image.open(AB_path).convert('RGB')
# split AB image into A and B
w, h = AB.size
w2 = int(w / 2)
A = AB.crop((0, 0, w2, h))
B = AB.crop((w2, 0, w, h))
# apply the same transform to both A and B
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
A = A_transform(A)
B = B_transform(B)
return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
|
import os.path
from .base_dataset import BaseDataset, get_transform
from .image_folder import make_dataset
from PIL import Image
import random
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
# apply image transformation
A = self.transform_A(A_img)
B = self.transform_B(B_img)
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='vit_small_patch16_224', device=device,
batch_size=batch_size, extra_args=extra_args)
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
# Original train batch size per device: 8
# Source: https://github.com/huggingface/transformers/blob/master/examples/flax/language-modeling/run_t5_mlm_flax.py#L83
DEFAULT_TRAIN_BSIZE = 8
# Original eval batch size per device: 8
# Downscale to 1 to fit in Nvidia T4 of the infra
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_T5_base", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.CLASSIFICATION
# Original train batch size 128, hardware Nvidia rtx 3090
# Source: https://gist.github.com/rwightman/bb59f9e245162cee0e38bd66bd8cd77f#file-bench_by_train-csv-L147
# Eval batch size 256, hardware Nvidia rtx 3090
# Source: https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/results/model_benchmark_amp_nchw_rtx3090.csv
# Downscale to 128 to fit T4
DEFAULT_TRAIN_BSIZE = 128
DEFAULT_EVAL_BSIZE = 128
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='dm_nfnet_f0', device=device,
batch_size=batch_size, extra_args=extra_args)
|
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from functorch import make_functional_with_buffers, vmap, grad
import functools
from pathlib import Path
from typing import Tuple
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
def loss_for_task(net, n_inner_iter, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss = F.cross_entropy(logits, y)
return loss
new_params = params
for _ in range(n_inner_iter):
grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
# The final set of adapted parameters will induce some
# final loss and accuracy on the query dataset.
# These will be used to update the model's meta-parameters.
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
qry_acc = (qry_logits.argmax(
dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
# TODO: There _should_ be a way to plug in an optim here, but this
# can be a next step. For now, the optim is not customizable.
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
n_way = 5
inplace_relu = True
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=False),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=False),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=False),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way)).to(device)
self.model = net
root = str(Path(__file__).parent.parent)
self.meta_inputs = torch.load(f'{root}/maml_omniglot/batch.pt')
self.meta_inputs = tuple([torch.from_numpy(i).to(self.device) for i in self.meta_inputs])
self.example_inputs = (self.meta_inputs[0][0],)
def get_module(self):
return self.model, self.example_inputs
def train(self):
model = self.model
model.train()
fnet, params, buffers = make_functional_with_buffers(self.model)
net = (params, buffers, fnet)
meta_opt = optim.Adam(params, lr=1e-3)
# Sample a batch of support and query images and labels.
x_spt, y_spt, x_qry, y_qry = self.meta_inputs
task_num, setsz, c_, h, w = x_spt.size()
n_inner_iter = 5
meta_opt.zero_grad()
# In parallel, trains one model per task. There is a support (x, y)
# for each task and a query (x, y) for each task.
compute_loss_for_task = functools.partial(loss_for_task, net, n_inner_iter)
qry_losses, qry_accs = vmap(compute_loss_for_task)(x_spt, y_spt, x_qry, y_qry)
# Compute the maml loss by summing together the returned losses.
qry_losses.sum().backward()
meta_opt.step()
def eval(self) -> Tuple[torch.Tensor]:
model, (example_input,) = self.get_module()
model.eval()
with torch.no_grad():
out = model(example_input)
return (out, )
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
from dalle2_pytorch import DALLE2, Unet, Decoder, DiffusionPriorNetwork, DiffusionPrior, OpenAIClipAdapter
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(BenchmarkModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
if self.device == "cpu":
raise NotImplementedError("DALL-E 2 Not Supported on CPU")
self.clip = OpenAIClipAdapter().to(self.device)
self.sample_text = self.example_input = torch.randint(0, 49408, (self.batch_size, 256)).to(self.device)
self.sample_images = torch.randn(self.batch_size, 3, 256, 256).to(self.device)
prior_network = DiffusionPriorNetwork(
dim = 512,
depth = 6,
dim_head = 64,
heads = 8
).to(self.device)
diffusion_prior = DiffusionPrior(
net = prior_network,
clip = self.clip,
timesteps = 1,
cond_drop_prob = 0.2
).to(self.device)
unet1 = Unet(
dim = 128,
image_embed_dim = 512,
cond_dim = 128,
channels = 3,
dim_mults=(1, 2, 4, 8),
text_embed_dim = 512,
cond_on_text_encodings = True # set to True for any unets that need to be conditioned on text encodings (ex. first unet in cascade)
).to(self.device)
unet2 = Unet(
dim = 16,
image_embed_dim = 512,
cond_dim = 128,
channels = 3,
dim_mults = (1, 2, 4, 8, 16)
).to(self.device)
decoder = Decoder(
unet = (unet1, unet2),
image_sizes = (128, 256),
clip = self.clip,
timesteps = 1,
sample_timesteps = (1, 1),
image_cond_drop_prob = 0.1,
text_cond_drop_prob = 0.5
).to(self.device)
self.model = DALLE2(prior=diffusion_prior, decoder=decoder).to(self.device)
if test == "train":
self.model.prior.train()
self.model.decoder.train()
elif test == "eval":
self.model.prior.eval()
self.model.decoder.eval()
def get_module(self):
return self.model, (self.example_input,)
def set_module(self, new_model):
self.model = new_model
def eval(self):
model, inputs = self.get_module()
with torch.no_grad():
images = model(*inputs)
return (images,)
def train(self):
# openai pretrained clip - defaults to ViT-B/32
clip = self.clip
# prior networks (with transformer)
diffusion_prior = self.model.prior
loss = diffusion_prior(self.sample_text, self.sample_images)
loss.backward()
# decoder (with unet)
decoder = self.model.decoder
loss = decoder(self.sample_images, self.sample_text, unet_number=1)
loss.backward()
loss = decoder(self.sample_images, self.sample_text, unet_number=2)
loss.backward()
|
import os
import patch
import subprocess
import sys
def patch_dalle2():
import dalle2_pytorch
current_dir = os.path.dirname(os.path.abspath(__file__))
dalle2_dir = os.path.dirname(dalle2_pytorch.__file__)
dalle2_patch = patch.fromfile(os.path.join(current_dir, "dalle2_pytorch.patch"))
if not dalle2_patch.apply(strip=1, root=dalle2_dir):
print("Failed to patch dalle2_pytorch/dalle2_pytorch.py. Exit.")
exit(1)
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
# DALLE2_pytorch requires embedding-reader
# https://github.com/lucidrains/DALLE2-pytorch/blob/00e07b7d61e21447d55e6d06d5c928cf8b67601d/setup.py#L34
# embedding-reader requires an old version of pandas and pyarrow
# https://github.com/rom1504/embedding-reader/blob/a4fd55830a502685600ed8ef07947cd1cb92b083/requirements.txt#L5
# So we need to reinstall a newer version of pandas and pyarrow, to be compatible with other models
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-U', 'pandas', 'pyarrow'])
if __name__ == '__main__':
pip_install_requirements()
patch_dalle2() |
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_name="mobilenet_v3_large", test=test, device=device,
batch_size=batch_size, weights=models.MobileNet_V3_Large_Weights.IMAGENET1K_V1, extra_args=extra_args)
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 8
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_Albert", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='efficientnet_b0', device=device,
batch_size=batch_size, extra_args=extra_args)
|
"""
Maskrcnn model from torchvision
"""
import torch
import os
import itertools
import random
import numpy as np
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from pathlib import Path
from typing import Tuple
# Model specific imports
import torchvision
from .coco_utils import ConvertCocoPolysToMask
from torchvision.datasets.coco import CocoDetection
# silence some spam
from pycocotools import coco
coco.print = lambda *args: None
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(CURRENT_DIR.parent.parent, "data", ".data", "coco2017-minimal")
assert os.path.exists(DATA_DIR), "Couldn't find coco2017 minimal data dir, please run install.py again."
COCO_DATA_KEY = "coco_2017_val_100"
COCO_DATA = {
"coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json")
}
def _collate_fn(batch):
return tuple(zip(*batch))
def _prefetch(loader, device):
items = []
for images, targets in loader:
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
items.append((images, targets))
return items
class Model(BenchmarkModel):
task = COMPUTER_VISION.DETECTION
# MaskRCNN doesn't actually take the inputs in batches; it takes a list
# of tensors which individually are CHW
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
NUM_OF_BATCHES = 1
ALLOW_CUSTOMIZE_BSIZE = False
def __init__(self, test, device, batch_size=None, extra_args=[], model_kwargs={}):
# reduce the eval batch size when running on CPU
# see: https://github.com/pytorch/benchmark/issues/895
if device == "cpu":
self.DEFAULT_EVAL_BSIZE = 1
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.model = torchvision.models.detection.maskrcnn_resnet50_fpn(
weights=torchvision.models.detection.MaskRCNN_ResNet50_FPN_Weights.COCO_V1, **model_kwargs
).to(self.device)
# setup optimizer
# optimizer parameters copied from
# https://github.com/pytorch/vision/blob/30f4d108319b0cd28ae5662947e300aad98c32e9/references/detection/train.py#L77
lr = 0.02
momentum = 0.9
weight_decay = 1e-4
params = [p for p in self.model.parameters() if p.requires_grad]
self.optimizer = torch.optim.SGD(params, lr=lr, momentum=momentum, weight_decay=weight_decay)
transforms = ConvertCocoPolysToMask()
dataset = CocoDetection(root=os.path.join(DATA_DIR, COCO_DATA[COCO_DATA_KEY][0]),
annFile=os.path.join(DATA_DIR, COCO_DATA[COCO_DATA_KEY][1]),
transforms=transforms)
sampler = torch.utils.data.SequentialSampler(dataset)
self.data_loader = _prefetch(torch.utils.data.DataLoader(dataset, batch_size=self.batch_size,
sampler=sampler,
collate_fn=_collate_fn), self.device)
def get_module(self):
self.model.eval()
for (example_inputs, _example_targets) in self.data_loader:
return self.model, (example_inputs, )
def train(self):
self.model.train()
for _batch_id, (images, targets) in zip(range(self.NUM_OF_BATCHES), self.data_loader):
# images = list(image.to(self.device) for image in images)
# targets = [{k: v.to(self.device) for k, v in t.items()} for t in targets]
loss_dict = self.model(images, targets)
losses = sum(loss for loss in loss_dict.values())
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
def eval(self) -> Tuple[torch.Tensor]:
self.model.eval()
with torch.no_grad():
for _batch_id, (images, _targets) in zip(range(self.NUM_OF_BATCHES), self.data_loader):
out = self.model(images)
out = list(map(lambda x: x.values(), out))
return tuple(itertools.chain(*out))
|
import sys
import subprocess
from utils import s3_utils
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
s3_utils.checkout_s3_data("INPUT_TARBALLS", "coco2017-minimal.tar.gz", decompress=True)
pip_install_requirements()
|
import torch
from pycocotools import mask as coco_mask
from torchvision.transforms import functional as F
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask:
def __call__(self, image, target):
w, h = image.size
image_id = target[0]["image_id"] if target else []
image_id = torch.tensor([image_id])
anno = target
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] for obj in anno])
target["area"] = area
target["iscrowd"] = iscrowd
# Convert image from PIL to tensor
image = F.pil_to_tensor(image)
image = F.convert_image_dtype(image)
return image, target
|
import os
import logging
import torch
from pathlib import Path
from contextlib import suppress
# TorchBench imports
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
# effdet imports
from effdet import create_model, create_loader
from effdet.data import resolve_input_config
# timm imports
from timm.models.layers import set_layer_config
from timm.optim import create_optimizer
from timm.utils import ModelEmaV2, NativeScaler
from timm.scheduler import create_scheduler
# local imports
from .args import get_args
from .train import train_epoch, validate
from .loader import create_datasets_and_loaders
from torch.utils._pytree import tree_map
from typing import Tuple
# setup coco2017 input path
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(CURRENT_DIR.parent.parent, "data", ".data", "coco2017-minimal", "coco")
def prefetch(loader, device, num_of_batches):
prefetched_loader = []
for _bid, (input, target) in zip(range(num_of_batches), loader):
prefetched_loader.append((tree_map(lambda x: x.to(device, dtype=torch.float32) if isinstance(x, torch.Tensor) else x, input),
tree_map(lambda x: x.to(device, dtype=torch.float32) if isinstance(x, torch.Tensor) else x, target)))
return prefetched_loader
class Model(BenchmarkModel):
task = COMPUTER_VISION.DETECTION
# Original Train batch size 32 on 2x RTX 3090 (24 GB cards)
# Downscale to batch size 16 on single GPU
DEFAULT_TRAIN_BSIZE = 16
DEFAULT_EVAL_BSIZE = 128
# prefetch only 1 batch
NUM_OF_BATCHES = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
if not device == "cuda":
# Only implemented on CUDA because the original model code explicitly calls the `Tensor.cuda()` API
# https://github.com/rwightman/efficientdet-pytorch/blob/9cb43186711d28bd41f82f132818c65663b33c1f/effdet/data/loader.py#L114
raise NotImplementedError("The original model code forces the use of CUDA.")
# generate arguments
args = get_args()
# setup train and eval batch size
args.batch_size = self.batch_size
# Disable distributed
args.distributed = False
args.device = self.device
args.torchscript = False
args.world_size = 1
args.rank = 0
args.pretrained_backbone = not args.no_pretrained_backbone
args.prefetcher = not args.no_prefetcher
args.root = DATA_DIR
with set_layer_config(scriptable=args.torchscript):
timm_extra_args = {}
if args.img_size is not None:
timm_extra_args = dict(image_size=(args.img_size, args.img_size))
if test == "train":
model = create_model(
model_name=args.model,
bench_task='train',
num_classes=args.num_classes,
pretrained=args.pretrained,
pretrained_backbone=args.pretrained_backbone,
redundant_bias=args.redundant_bias,
label_smoothing=args.smoothing,
legacy_focal=args.legacy_focal,
jit_loss=args.jit_loss,
soft_nms=args.soft_nms,
bench_labeler=args.bench_labeler,
checkpoint_path=args.initial_checkpoint,
)
elif test == "eval":
model = create_model(
model_name=args.model,
bench_task='predict',
num_classes=args.num_classes,
pretrained=args.pretrained,
redundant_bias=args.redundant_bias,
soft_nms=args.soft_nms,
checkpoint_path=args.checkpoint,
checkpoint_ema=args.use_ema,
**timm_extra_args,
)
model_config = model.config # grab before we obscure with DP/DDP wrappers
self.model = model.to(device)
if args.channels_last:
self.model = self.model.to(memory_format=torch.channels_last)
self.loader_train, self.loader_eval, self.evaluator, _, dataset_eval = create_datasets_and_loaders(args, model_config)
self.amp_autocast = suppress
if test == "train":
self.optimizer = create_optimizer(args, model)
self.loss_scaler = None
self.model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
self.model_ema = ModelEmaV2(model, decay=args.model_ema_decay)
self.lr_scheduler, self.num_epochs = create_scheduler(args, self.optimizer)
if model_config.num_classes < self.loader_train.dataset.parser.max_label:
logging.error(
f'Model {model_config.num_classes} has fewer classes than dataset {self.loader_train.dataset.parser.max_label}.')
exit(1)
if model_config.num_classes > self.loader_train.dataset.parser.max_label:
logging.warning(
f'Model {model_config.num_classes} has more classes than dataset {self.loader_train.dataset.parser.max_label}.')
self.loader_train = prefetch(self.loader_train, self.device, self.NUM_OF_BATCHES)
self.loader_eval = prefetch(self.loader_eval, self.device, self.NUM_OF_BATCHES)
self.loader = self.loader_train
elif test == "eval":
# Create eval loader
input_config = resolve_input_config(args, model_config)
self.loader = create_loader(
dataset_eval,
input_size=input_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=args.eval_interpolation,
fill_color=input_config['fill_color'],
mean=input_config['mean'],
std=input_config['std'],
num_workers=args.workers,
pin_mem=args.pin_mem)
self.loader = prefetch(self.loader, self.device, self.NUM_OF_BATCHES)
self.args = args
# Only run 1 epoch
self.num_epochs = 1
def get_module(self):
for _, (input, target) in zip(range(self.NUM_OF_BATCHES), self.loader):
return self.model, (input, target)
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self.lr_scheduler, self.num_epochs = create_scheduler(args, self.optimizer)
def enable_amp(self):
if self.device == "cuda":
self.amp_autocast = torch.cuda.amp.autocast
elif self.device == "cpu":
self.amp_autocast = torch.cpu.amp.autocast
self.loss_scaler = NativeScaler()
def train(self):
eval_metric = self.args.eval_metric
for epoch in range(self.num_epochs):
train_metrics = train_epoch(
epoch, self.model, self.loader_train,
self.optimizer, self.args,
lr_scheduler=self.lr_scheduler, amp_autocast = self.amp_autocast,
loss_scaler=self.loss_scaler, model_ema=self.model_ema,
num_batch=self.NUM_OF_BATCHES,
)
# TorchBench: skip validation step in train
# the overhead of evaluating with coco style datasets is fairly high, so just ema or non, not both
# if self.model_ema is not None:
# eval_metrics = validate(self.model_ema.module, self.loader_eval, self.args, self.evaluator, log_suffix=' (EMA)', num_batch=self.NUM_OF_BATCHES)
# else:
# eval_metrics = validate(self.model, self.loader_eval, self.args, self.evaluator, num_batch=self.NUM_OF_BATCHES)
# if self.lr_scheduler is not None:
# # step LR for next epoch
# self.lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
def eval(self) -> Tuple[torch.Tensor]:
with torch.no_grad():
for input, target in self.loader:
with self.amp_autocast():
output = self.model(input, img_info=target)
self.evaluator.add_predictions(output, target)
return (output, )
|
from effdet.data import resolve_input_config, SkipSubset
from effdet import create_loader, create_dataset, create_evaluator
from effdet.anchors import Anchors, AnchorLabeler
from effdet.data.dataset_config import CocoCfg
from dataclasses import dataclass, field
from typing import Dict
@dataclass
class Coco2017MinimalCfg(CocoCfg):
variant: str = '2017-minimal'
splits: Dict[str, dict] = field(default_factory=lambda: dict(
train=dict(ann_filename='annotations/instances_val2017_100.json', img_dir='val2017', has_labels=True),
val=dict(ann_filename='annotations/instances_val2017_100.json', img_dir='val2017', has_labels=True),
))
def create_datasets_and_loaders(
args,
model_config,
transform_train_fn=None,
transform_eval_fn=None,
collate_fn=None,
):
""" Setup datasets, transforms, loaders, evaluator.
Args:
args: Command line args / config for training
model_config: Model specific configuration dict / struct
transform_train_fn: Override default image + annotation transforms (see note in loaders.py)
transform_eval_fn: Override default image + annotation transforms (see note in loaders.py)
collate_fn: Override default fast collate function
Returns:
Train loader, validation loader, evaluator
"""
input_config = resolve_input_config(args, model_config=model_config)
dataset_train, dataset_eval = create_dataset(args.dataset, args.root, custom_dataset_cfg=Coco2017MinimalCfg())
# setup labeler in loader/collate_fn if not enabled in the model bench
labeler = None
if not args.bench_labeler:
labeler = AnchorLabeler(
Anchors.from_config(model_config), model_config.num_classes, match_threshold=0.5)
loader_train = create_loader(
dataset_train,
input_size=input_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
# color_jitter=args.color_jitter,
# auto_augment=args.aa,
interpolation=args.train_interpolation or input_config['interpolation'],
fill_color=input_config['fill_color'],
mean=input_config['mean'],
std=input_config['std'],
num_workers=args.workers,
distributed=args.distributed,
pin_mem=args.pin_mem,
anchor_labeler=labeler,
transform_fn=transform_train_fn,
collate_fn=collate_fn,
)
if args.val_skip > 1:
dataset_eval = SkipSubset(dataset_eval, args.val_skip)
loader_eval = create_loader(
dataset_eval,
input_size=input_config['input_size'],
batch_size=args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=input_config['interpolation'],
fill_color=input_config['fill_color'],
mean=input_config['mean'],
std=input_config['std'],
num_workers=args.workers,
distributed=args.distributed,
pin_mem=args.pin_mem,
anchor_labeler=labeler,
transform_fn=transform_eval_fn,
collate_fn=collate_fn,
)
evaluator = create_evaluator(args.dataset, loader_eval.dataset, distributed=args.distributed, pred_yxyx=False)
return loader_train, loader_eval, evaluator, dataset_train, dataset_eval |
import torch
from collections import OrderedDict
from contextlib import suppress
from timm.utils import AverageMeter, reduce_tensor
def train_epoch(
epoch, model, loader, optimizer, args,
lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress, loss_scaler=None, model_ema=None,
num_batch=1):
# batch_time_m = AverageMeter()
# data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
# end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in zip(range(num_batch), loader):
last_batch = batch_idx == last_idx
# data_time_m.update(time.time() - end)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input, target)
loss = output['loss']
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(loss, optimizer, clip_grad=args.clip_grad, parameters=model.parameters())
else:
loss.backward()
if args.clip_grad:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
num_updates += 1
# batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
# if args.distributed:
# reduced_loss = reduce_tensor(loss.data, args.world_size)
# losses_m.update(reduced_loss.item(), input.size(0))
#
# if args.local_rank == 0:
# logging.info(
# 'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
# 'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '
# 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
# '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
# 'LR: {lr:.3e} '
# 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
# epoch,
# batch_idx, len(loader),
# 100. * batch_idx / last_idx,
# loss=losses_m,
# batch_time=batch_time_m,
# rate=input.size(0) * args.world_size / batch_time_m.val,
# rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
# lr=lr,
# data_time=data_time_m))
# if args.save_images and output_dir:
# torchvision.utils.save_image(
# input,
# os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
# padding=0,
# normalize=True)
# if saver is not None and args.recovery_interval and (
# last_batch or (batch_idx + 1) % args.recovery_interval == 0):
# saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
# end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, args, evaluator=None, log_suffix='',
num_batch=1):
# batch_time_m = AverageMeter()
losses_m = AverageMeter()
model.eval()
# end = time.time()
# last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in zip(range(num_batch), loader):
# last_batch = batch_idx == last_idx
output = model(input, target)
loss = output['loss']
if evaluator is not None:
evaluator.add_predictions(output['detections'], target)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
# batch_time_m.update(time.time() - end)
# end = time.time()
# if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
# log_name = 'Test' + log_suffix
# logging.info(
# '{0}: [{1:>4d}/{2}] '
# 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
# 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '.format(
# log_name, batch_idx, last_idx, batch_time=batch_time_m, loss=losses_m))
metrics = OrderedDict([('loss', losses_m.avg)])
if evaluator is not None:
metrics['map'] = evaluator.evaluate()
return metrics |
import yaml
import argparse
from timm.utils import add_bool_arg
def get_args(config_file=None):
def _parse_args():
if config_file:
with open(config_file, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# There may be remaining unrecognized options
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args, _ = parser.parse_known_args()
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
# parser.add_argument('root', metavar='DIR',
# help='path to dataset')
parser.add_argument('--dataset', default='coco', type=str, metavar='DATASET',
help='Name of dataset to train (default: "coco"')
parser.add_argument('--model', default='tf_efficientdet_d1', type=str, metavar='MODEL',
help='Name of model to train (default: "tf_efficientdet_d1"')
add_bool_arg(parser, 'redundant-bias', default=None, help='override model config for redundant bias')
add_bool_arg(parser, 'soft-nms', default=None, help='override model config for soft-nms')
parser.add_argument('--val-skip', type=int, default=0, metavar='N',
help='Skip every N validation samples.')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='Override num_classes in model config if set. For fine-tuning from pretrained.')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--no-pretrained-backbone', action='store_true', default=False,
help='Do not start with pretrained backbone weights, fully random.')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--fill-color', default=None, type=str, metavar='NAME',
help='Image augmentation fill (background) color ("mean" or int)')
parser.add_argument('--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--clip-grad', type=float, default=10.0, metavar='NORM',
help='Clip gradient norm (default: 10.0)')
# Optimizer parameters
parser.add_argument('--opt', default='momentum', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "momentum"')
parser.add_argument('--opt-eps', default=1e-3, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-3)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=4e-5,
help='weight decay (default: 0.00004)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default=None, metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
# loss
parser.add_argument('--smoothing', type=float, default=None, help='override model config label smoothing')
add_bool_arg(parser, 'jit-loss', default=None, help='override model config for torchscript jit loss fn')
add_bool_arg(parser, 'legacy-focal', default=None, help='override model config to use legacy focal loss')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('-j', '--workers', type=int, default=0, metavar='N',
help='how many training processes to use (default: 0)')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
add_bool_arg(parser, 'bench-labeler', default=False,
help='label targets in model bench, increases GPU load at expense of loader processes')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--eval-metric', default='map', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "map"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
# Evaluation parameters
parser.add_argument('--eval-interpolation', default='bilinear', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
args, _ = _parse_args()
return args
|
import os
import sys
import patch
from pathlib import Path
import subprocess
from utils import s3_utils
def patch_effdet():
import effdet
current_dir = os.path.dirname(os.path.abspath(__file__))
patch_file = os.path.join(current_dir, "effdet.patch")
target_dir = os.path.dirname(effdet.__file__)
p = patch.fromfile(patch_file)
if not p.apply(strip=1, root=target_dir):
print("Failed to patch effdet. Exit.")
exit(1)
def patch_pycocotools():
import pycocotools
current_dir = os.path.dirname(os.path.abspath(__file__))
patch_file = os.path.join(current_dir, "pycocotools.patch")
target_dir = os.path.dirname(os.path.abspath(pycocotools.__file__))
p = patch.fromfile(patch_file)
if not p.apply(strip=1, root=target_dir):
print("Failed to patch pycocotools. Exit.")
exit(1)
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
s3_utils.checkout_s3_data("INPUT_TARBALLS", "coco2017-minimal.tar.gz", decompress=True)
pip_install_requirements()
patch_effdet()
patch_pycocotools()
|
import os
import torch
from torch.distributed._tensor import DeviceMesh
from torch.distributed.tensor.parallel import parallelize_module
from torch.distributed.tensor.parallel.style import ColwiseParallel, RowwiseParallel
from torchbenchmark.tasks import NLP
from ...util.model import BenchmarkModel
from .model import LLaMA
class Model(BenchmarkModel):
task = NLP.GENERATION
DEFAULT_EVAL_BSIZE = 1
def validate_environment(self):
if not torch.cuda.is_available() or "cuda" not in self.device:
return NotImplementedError("Model requires CUDA")
if not torch.cuda.is_bf16_supported():
return NotImplementedError("Model requires BF16")
if not hasattr(self, "_world_size"):
return NotImplementedError("Model needs to be run via dynamo torchbench and be provided distributed parameters")
if self._world_size != torch.cuda.device_count():
return NotImplementedError(
f"DTensor and all local GPUs to be within the device mesh. {torch.cuda.device_count()} local GPUs, but only world size is only {self._world_size}"
)
return None
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(
test=test,
device=device,
batch_size=batch_size,
extra_args=extra_args,
)
error = self.validate_environment()
if error:
raise error
self.model = LLaMA.from_name("7B", self._world_size).to(device=device, dtype=torch.bfloat16)
# Tensor parallelism using DTensor
mesh = DeviceMesh("cuda", list(range(self._world_size)))
for block in self.model.transformer.h:
# prepare attention weights to be parallelized
block.attn.prepare_qkv_for_dtensor_tp()
parallelize_module(
module=block,
device_mesh=mesh,
parallelize_plan={
"attn.c_attn_q": ColwiseParallel(),
"attn.c_attn_k": ColwiseParallel(),
"attn.c_attn_v": ColwiseParallel(),
"attn.c_proj": RowwiseParallel(),
"mlp.c_fc1": ColwiseParallel(),
"mlp.c_fc2": ColwiseParallel(),
"mlp.c_proj": RowwiseParallel(),
},
tp_mesh_dim=0,
)
max_batch_size = self.DEFAULT_EVAL_BSIZE
self.model.setup_caches(
max_batch_size=max_batch_size, max_seq_length=self.model.config.block_size
)
prompt_size = 10
idx = torch.randint(
self.model.config.vocab_size,
(max_batch_size, prompt_size),
dtype=torch.int32,
device=device,
)
input_pos = torch.arange(prompt_size, device=device)
self.example_inputs = [idx, input_pos]
def get_module(self):
return self.model, self.example_inputs
def train(self):
raise NotImplementedError("Training not supported for this model")
def eval(self):
raise NotImplementedError("Model needs to be run via dynamo torchbench and be provided distributed parameters")
|
"""Full definition of a LLaMA Language Model, all of it in this single file.
Based on the nanoGPT implementation: https://github.com/karpathy/nanoGPT.
"""
# mypy: ignore-errors
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from typing_extensions import Self
import torch
import torch.nn as nn
from torch.nn import functional as F
MaskCache = torch.Tensor
RoPECache = torch.Tensor
KVCache = Tuple[torch.Tensor, torch.Tensor]
def find_multiple(n: int, k: int) -> int:
if n % k == 0:
return n
return n + k - (n % k)
class LinearInt8(torch.nn.Module):
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: torch.Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.register_buffer("weight", torch.empty((out_features, in_features), dtype=torch.int8))
# if bias:
# self.register_buffer("bias", torch.empty(out_features, **factory_kwargs, dtype=torch.int8))
# else:
# self.bias('bias', None)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(input, self.weight.to(dtype=input.dtype))
# nn.Linear = LinearInt8
@dataclass
class LLaMAConfig:
block_size: int = 2048
vocab_size: int = 32000
padded_vocab_size: Optional[int] = None
n_layer: int = 32
n_head: int = 32
n_embd: int = 4096
def __post_init__(self):
if self.padded_vocab_size is None:
self.padded_vocab_size = find_multiple(self.vocab_size, 64)
@classmethod
def from_name(cls, name: str) -> Self:
return cls(**llama_configs[name])
llama_configs = {
"7B": dict(n_layer=32, n_head=32, n_embd=4096),
"13B": dict(n_layer=40, n_head=40, n_embd=5120),
"30B": dict(n_layer=60, n_head=52, n_embd=6656),
"65B": dict(n_layer=80, n_head=64, n_embd=8192),
}
class KVCache(nn.Module):
@torch.no_grad()
def __init__(self, max_batch_size, max_seq_length, n_heads, head_size, device='cuda', dtype=torch.bfloat16):
super().__init__()
cache_shape = (max_batch_size, n_heads, max_seq_length, head_size)
self.k_cache = torch.nn.Parameter(torch.zeros(cache_shape, device=device, dtype=dtype))
self.v_cache = torch.nn.Parameter(torch.zeros(cache_shape, device=device, dtype=dtype))
@torch.no_grad()
def update(self, input_pos, k_val, v_val):
# input_pos: [S], k_val: [B, H, S, D]
assert input_pos.shape[0] == k_val.shape[2]
self.k_cache[:, :, input_pos] = k_val
self.v_cache[:, :, input_pos] = v_val
return self.k_cache, self.v_cache
class KVCacheAggregator(nn.Module):
def __init__(self):
super().__init__()
self.kv_caches = nn.ModuleList([])
def initialize(self,layers, max_batch_size, max_seq_length, n_heads, head_size, device='cuda', dtype=torch.bfloat16):
cache_shape = (max_batch_size, n_heads, max_seq_length, head_size)
self.kv_caches = nn.ModuleList([KVCache(max_batch_size, max_seq_length, n_heads, head_size) for _ in range(layers)])
def __getitem__(self, idx):
return self.kv_caches[idx]
def clear(self):
self.kv_caches = nn.ParameterList([])
class LLaMA(nn.Module):
def __init__(self, config: LLaMAConfig, world_size: int) -> None:
super().__init__()
self.world_size = world_size
assert config.padded_vocab_size is not None
self.config = config
self.lm_head = nn.Linear(config.n_embd, config.padded_vocab_size, bias=False)
self.transformer = nn.ModuleDict(
dict(
wte=nn.Embedding(config.padded_vocab_size, config.n_embd),
h=nn.ModuleList(Block(config, self.world_size) for _ in range(config.n_layer)),
ln_f=RMSNorm(config.n_embd),
)
)
self.rope_cache: Optional[RoPECache] = None
self.mask_cache: Optional[MaskCache] = None
self.kv_caches = KVCacheAggregator()
self.max_batch_size = None
self.max_seq_length = None
def setup_caches(self, max_batch_size, max_seq_length, device='cuda', dtype=torch.bfloat16):
n_embd = self.config.n_embd // self.world_size
n_head = self.config.n_head // self.world_size
head_size = n_embd // n_head
self.max_seq_length = max_seq_length
self.max_batch_size = max_batch_size
self.kv_caches.initialize(layers=self.config.n_layer, max_batch_size=max_batch_size, max_seq_length=max_seq_length, n_heads=n_head, head_size=head_size)
self.rope_cache = build_rope_cache(
seq_len=self.config.block_size,
n_elem=head_size,
dtype=dtype,
device=device,
)
ones = torch.ones((self.config.block_size, self.config.block_size), device=device, dtype=torch.bool)
self.mask_cache = torch.tril(ones).unsqueeze(0).unsqueeze(0)
def _init_weights(self, module: nn.Module) -> None:
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02 / math.sqrt(2 * self.config.n_layer))
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02 / math.sqrt(2 * self.config.n_layer))
def forward(
self, idx: torch.Tensor, input_pos: Optional[torch.Tensor] = None
) -> Union[torch.Tensor, Tuple[torch.Tensor, List[KVCache]]]:
B, T = idx.size()
assert self.rope_cache is not None, "Caches must be initialized first"
block_size = self.config.block_size
max_seq_length = self.max_seq_length
if max_seq_length is None:
max_seq_length = block_size
assert T <= max_seq_length, f"Cannot forward sequence of length {T}, max seq length is only {max_seq_length}"
assert max_seq_length <= block_size, f"Cannot attend to {max_seq_length}, block size is only {block_size}"
assert T <= block_size, f"Cannot forward sequence of length {T}, block size is only {block_size}"
rope = self.rope_cache.index_select(0, input_pos)
mask = self.mask_cache.index_select(2, input_pos)
mask = mask[:, :, :, :max_seq_length]
# forward the model itself
x = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
for i, block in enumerate(self.transformer.h):
x, new_kv_cache = block(x, rope, mask, max_seq_length, input_pos, self.kv_caches[i])
x = self.transformer.ln_f(x)
logits = self.lm_head(x) # (b, t, vocab_size)
return logits
@classmethod
def from_name(cls, name: str, world_size: int) -> Self:
return cls(LLaMAConfig.from_name(name), world_size)
def reset_cache(self) -> None:
self.kv_caches.clear()
class Block(nn.Module):
def __init__(self, config: LLaMAConfig, world_size: int) -> None:
super().__init__()
self.rms_1 = RMSNorm(config.n_embd)
self.attn = CausalSelfAttention(config, world_size)
self.rms_2 = RMSNorm(config.n_embd)
self.mlp = MLP(config)
def forward(
self,
x: torch.Tensor,
rope: RoPECache,
mask: MaskCache,
max_seq_length: int,
input_pos: Optional[torch.Tensor] = None,
kv_cache: Optional[KVCache] = None,
) -> Tuple[torch.Tensor, Optional[KVCache]]:
h, new_kv_cache = self.attn(self.rms_1(x), rope, mask, max_seq_length, input_pos, kv_cache)
x = x + h
x = x + self.mlp(self.rms_2(x))
return x, new_kv_cache
class CausalSelfAttention(nn.Module):
def __init__(self, config: LLaMAConfig, world_size: int) -> None:
super().__init__()
self.world_size = world_size
assert config.n_embd % config.n_head == 0
self.config = config
# key, query, value projections for all heads, but in a batch
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False)
# output projection
self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False)
self.n_head = config.n_head
self.n_embd = config.n_embd
self.block_size = config.block_size
def forward(
self,
x: torch.Tensor,
rope: RoPECache,
mask: MaskCache,
max_seq_length: int,
input_pos: Optional[torch.Tensor] = None,
kv_cache: Optional[KVCache] = None,
) -> Tuple[torch.Tensor, Optional[KVCache]]:
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
_C = C // self.world_size
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
q = self.c_attn_q(x)
k = self.c_attn_k(x)
v = self.c_attn_v(x)
n_head = self.n_head // self.world_size
head_size = _C // n_head
k = k.view(B, T, n_head, head_size)
q = q.view(B, T, n_head, head_size)
v = v.view(B, T, n_head, head_size)
q = apply_rope(q, rope)
k = apply_rope(k, rope)
k = k.transpose(1, 2) # (B, nh, T, hs)
q = q.transpose(1, 2) # (B, nh, T, hs)
v = v.transpose(1, 2) # (B, nh, T, hs)
if kv_cache is not None:
k, v = kv_cache.update(input_pos, k, v)
# efficient attention using Flash Attention CUDA kernels
# y = F.scaled_dot_product_attention(q, k, v)
y = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0)
y = y.transpose(1, 2).contiguous().view(B, T, _C) # re-assemble all head outputs side by side
# output projection
y = self.c_proj(y)
return y, kv_cache
def prepare_qkv_for_dtensor_tp(self):
attn = self.c_attn
assert attn.in_features % self.world_size == 0 # q, k, v must be shardeable
attn.out_features = attn.out_features // self.world_size
# Shard on dim 0 since attn.weight is transposed
# Shard q, k, v separately
q, k, v = attn.weight.split(self.config.n_embd, dim=0) # (C, C)
self.c_attn_q = nn.Linear(self.config.n_embd, self.config.n_embd, bias=False)
self.c_attn_q.weight = nn.Parameter(q)
self.c_attn_k = nn.Linear(self.config.n_embd, self.config.n_embd, bias=False)
self.c_attn_k.weight = nn.Parameter(k)
self.c_attn_v = nn.Linear(self.config.n_embd, self.config.n_embd, bias=False)
self.c_attn_v.weight = nn.Parameter(v)
del self.c_attn
class MLP(nn.Module):
def __init__(self, config: LLaMAConfig) -> None:
super().__init__()
hidden_dim = 4 * config.n_embd
n_hidden = int(2 * hidden_dim / 3)
n_hidden = find_multiple(n_hidden, 256)
self.c_fc1 = nn.Linear(config.n_embd, n_hidden, bias=False)
self.c_fc2 = nn.Linear(config.n_embd, n_hidden, bias=False)
self.c_proj = nn.Linear(n_hidden, config.n_embd, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = F.silu(self.c_fc1(x)) * self.c_fc2(x)
x = self.c_proj(x)
return x
class RMSNorm(nn.Module):
"""Root Mean Square Layer Normalization.
Derived from https://github.com/bzhangGo/rmsnorm/blob/master/rmsnorm_torch.py. BSD 3-Clause License:
https://github.com/bzhangGo/rmsnorm/blob/master/LICENSE.
"""
def __init__(self, size: int, dim: int = -1, eps: float = 1e-5) -> None:
super().__init__()
self.scale = nn.Parameter(torch.ones(size))
self.eps = eps
self.dim = dim
def forward(self, x: torch.Tensor) -> torch.Tensor:
# NOTE: the original RMSNorm paper implementation is not equivalent
# norm_x = x.norm(2, dim=self.dim, keepdim=True)
# rms_x = norm_x * d_x ** (-1. / 2)
# x_normed = x / (rms_x + self.eps)
norm_x = torch.mean(x * x, dim=self.dim, keepdim=True)
x_normed = x * torch.rsqrt(norm_x + self.eps)
return self.scale * x_normed
def build_rope_cache(
seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000
) -> RoPECache:
"""Enhanced Transformer with Rotary Position Embedding.
Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/
transformers/rope/__init__.py. MIT License:
https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.
"""
# $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=dtype, device=device) / n_elem))
# Create position indexes `[0, 1, ..., seq_len - 1]`
seq_idx = torch.arange(seq_len, dtype=dtype, device=device)
# Calculate the product of position index and $\theta_i$
idx_theta = torch.outer(seq_idx, theta).float()
cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)
# this is to mimic the behaviour of complex32, else we will get different results
if dtype in (torch.float16, torch.bfloat16, torch.int8):
cache = cache.half()
return cache
def apply_rope(x: torch.Tensor, rope_cache: RoPECache) -> torch.Tensor:
# truncate to support variable sizes
T = x.size(1)
rope_cache = rope_cache[:T]
# cast because the reference does
xshaped = x.float().reshape(*x.shape[:-1], -1, 2)
rope_cache = rope_cache.view(1, xshaped.size(1), 1, xshaped.size(3), 2)
x_out2 = torch.stack(
[
xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
],
-1,
)
x_out2 = x_out2.flatten(3)
return x_out2.type_as(x)
|
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch
from .model import SequenceGenerator, create_model
import torch
class Model(BenchmarkModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
embed_dim = 1536
beam_size = 1
# This is quite a bit smaller than, e.g., T5, because this model is
# quite a bit slower to run
generate_size = 64
self.model = SequenceGenerator(
create_model(embed_dim),
beam_size,
generate_size,
).eval().to(self.device)
prompt_size = 64
vocab_size = 128 # cribbed from original script
self.example_inputs = (
torch.randint(1, vocab_size, (self.batch_size, prompt_size)).to(self.device),
)
def get_module(self):
return self.model, self.example_inputs
# The code included here is specialized for eval
def train(self):
return NotImplementedError("training script not published")
def eval(self):
with torch.no_grad():
out = self.model(*self.example_inputs)
return (out,)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Portions of this code are derived from https://github.com/facebookresearch/metaseq
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.benchmark as benchmark
from torch import Tensor
from typing import Optional, Dict, Any
from tqdm import tqdm
# torch.set_float32_matmul_precision("high")
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def make_positions(tensor, padding_idx: int):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(
self,
input: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
positions: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
assert (positions is None) or (
self.padding_idx is None
), "If positions is pre-computed then padding_idx should not be set."
# we cannot use incremental state here because we must be aware of
# padding.
if positions is None and self.padding_idx is not None:
positions = make_positions(input, self.padding_idx)
assert positions is not None
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
def PositionalEmbedding(
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
learned: bool = False,
learned_sinusoidal: bool = False,
full_megatron_init=False,
pos_init_scalar=1.0,
megatron_init_sigma=None,
truncate_init=False,
):
def _init_emb(tensor, sigma):
if sigma <= 1e-8: # effectively 0
return nn.init.zeros_(tensor)
if truncate_init:
return nn.init.trunc_normal_(
tensor, mean=0.0, std=sigma, a=-3 * sigma, b=3 * sigma
)
else:
return nn.init.normal_(tensor, mean=0.0, std=sigma)
if learned:
# if padding_idx is specified then offset the embedding ids by
# this index and adjust num_embeddings appropriately
# TODO: The right place for this offset would be inside
# LearnedPositionalEmbedding. Move this there for a cleaner implementation.
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
if full_megatron_init:
_init_emb(m.weight, megatron_init_sigma * pos_init_scalar)
else:
_init_emb(m.weight, embedding_dim**-0.5 * pos_init_scalar)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
elif learned_sinusoidal:
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
with torch.no_grad():
m.weight.copy_(
SinusoidalPositionalEmbedding.get_embedding(
num_embeddings,
embedding_dim,
padding_idx,
)
)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim,
padding_idx,
init_size=num_embeddings + padding_idx + 1,
)
return m
from typing import Tuple
from torch.nn import Parameter, init
import math
import uuid
def softmax(x, dim: int):
return F.softmax(x, dim=dim, dtype=torch.float32)
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class Linear(nn.Module):
"""
Exact same as pytorch nn.Linear but with option to initialize weight and bias directly on GPU
"""
__constants__ = ["in_features", "out_features"]
in_features: int
out_features: int
weight: Tensor
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
initialize_params_on_gpu: bool = False,
dtype: torch.dtype = None,
) -> None:
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
device = torch.cuda.current_device() if initialize_params_on_gpu else None
if dtype is None:
dtype = torch.float
self.weight = Parameter(
torch.empty(out_features, in_features, device=device, dtype=dtype)
)
if bias:
self.bias = Parameter(torch.empty(out_features, device=device, dtype=dtype))
else:
self.register_parameter("bias", None)
def forward(self, input: Tensor) -> Tensor:
return F.linear(input, self.weight, self.bias)
def extra_repr(self) -> str:
return "in_features={}, out_features={}, bias={}".format(
self.in_features, self.out_features, self.bias is not None
)
class Dropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def extra_repr(self) -> str:
return "p={}".format(self.p)
def forward(self, x, inplace: bool = False):
if self.p > 0 and (self.training or self.apply_during_inference):
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def init_incremental_state(self):
self._incremental_state_id = "5" # str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
initialize_params_on_gpu=False,
dtype: Optional[torch.dtype] = None,
):
self.init_incremental_state()
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = Dropout(dropout, module_name=self.__class__.__name__)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
random_state = torch.get_rng_state()
# random_state_cuda = torch.cuda.get_rng_state()
self.k_proj = Linear(
self.kdim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
self.v_proj = Linear(
self.vdim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
self.q_proj = Linear(
embed_dim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
self.out_proj = Linear(
embed_dim,
embed_dim,
bias=bias,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
torch.set_rng_state(random_state)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
attn_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
"""
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if (
incremental_state is None
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
):
assert key is not None and value is not None
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
False,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
assert v is not None
v = torch.cat([prev_value, v], dim=1)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
# Replace any non-finite values with finite equivalents, since otherwise
# we may get NaN when adding attn_mask or computing softmax.
attn_weights = torch.nan_to_num(attn_weights)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights_float = softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
return attn, None # To match return type of F.multi_head_attention_forward
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
from typing import Callable, List
class ActivationFn(nn.Module):
def __init__(self, name, embed_dim, ffn_dim):
super().__init__()
self.fn = self.__get_fn(name)
def forward(self, fc1_in, fc1_out, model_parallel: bool):
return self.fn(fc1_out)
def __get_fn(self, name: str) -> Callable:
"""Returns the activation function corresponding to the arg passed in the run"""
if name == "relu":
return F.relu
elif name == "relu_squared":
return relu_squared
elif name == "gelu":
return gelu
elif name == "tanh":
return torch.tanh
elif name == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(name))
class TransformerDecoderLayer(nn.Module):
"""Pre-norm Decoder layer block.
Note that we have found model training to require pre-norm to remain stable.
Args:
embed_dim (int): dimension of the model embedding
decoder_embed_dim (int): dimension of the decoder embedding
dropout (float): dropout probability
decoder_attention_heads (int): number of decoder attention heads
attention_dropout (float): dropout probability for attention weights
decoder_ffn_embed_dim (int): dimension of the decoder feedforward network embedding
activation_fn (str): activation function name
add_bias_kv (bool): whether to add bias to the key and value projections
add_zero_attn (bool): whether to add a zero attention vector for padding tokens
disable_affine_ln (bool): whether to disable affine layer normalization
disable_bias (bool): whether to disable bias in linear layers
tensor_parallel_init_model_on_gpu (bool): whether to initialize model on GPU for tensor parallelism
full_megatron_init (bool): whether to use full Megatron initialization
megatron_init_sigma (float): sigma value for Megatron initialization
truncate_init (bool): whether to truncate the initialization values
"""
def __init__(
self,
embed_dim,
decoder_embed_dim,
dropout=0.1,
decoder_attention_heads=8,
attention_dropout=0.1,
decoder_ffn_embed_dim=2048,
activation_fn="relu",
add_bias_kv=False,
add_zero_attn=False,
disable_affine_ln=False,
disable_bias=False,
tensor_parallel_init_model_on_gpu=False,
full_megatron_init=False,
megatron_init_sigma=0.006,
truncate_init=False,
):
super().__init__()
self.embed_dim = embed_dim
self.dropout_module = Dropout(dropout, module_name=self.__class__.__name__)
self.self_attn = self.build_self_attention(
decoder_embed_dim,
decoder_attention_heads,
attention_dropout,
add_bias_kv,
add_zero_attn,
tensor_parallel_init_model_on_gpu,
disable_bias,
megatron_init_sigma,
truncate_init,
)
self.nh = decoder_attention_heads
self.head_dim = int(decoder_embed_dim / self.nh)
affine_ln = not disable_affine_ln
self.self_attn_layer_norm = LayerNorm(
decoder_embed_dim, elementwise_affine=affine_ln
)
self.fc1 = self.build_fc1(
decoder_embed_dim,
decoder_ffn_embed_dim,
tensor_parallel_init_model_on_gpu,
full_megatron_init,
megatron_init_sigma,
truncate_init,
disable_bias,
)
self.activation_fn = ActivationFn(
activation_fn,
decoder_embed_dim,
decoder_ffn_embed_dim,
)
self.fc2 = self.build_fc2(
decoder_ffn_embed_dim,
decoder_embed_dim,
tensor_parallel_init_model_on_gpu,
full_megatron_init,
megatron_init_sigma,
truncate_init,
disable_bias,
)
self.final_layer_norm = LayerNorm(
decoder_embed_dim, elementwise_affine=affine_ln
)
def build_fc1(
self,
input_dim,
output_dim,
initialize_params_on_gpu=False,
full_megatron_init=False,
megatron_init_sigma=0.006,
truncate_init=False,
disable_bias=False,
):
return Linear(
input_dim,
output_dim,
initialize_params_on_gpu=initialize_params_on_gpu,
bias=not disable_bias,
)
def build_fc2(
self,
input_dim,
output_dim,
initialize_params_on_gpu=False,
full_megatron_init=False,
megatron_init_sigma=0.006,
truncate_init=False,
disable_bias=False,
):
return Linear(
input_dim,
output_dim,
initialize_params_on_gpu=initialize_params_on_gpu,
bias=not disable_bias,
)
def build_self_attention(
self,
embed_dim,
decoder_attention_heads,
attention_dropout,
add_bias_kv,
add_zero_attn,
tensor_parallel_init_model_on_gpu,
disable_bias,
megatron_init_sigma,
truncate_init,
):
return MultiheadAttention(
embed_dim,
decoder_attention_heads,
dropout=attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True,
initialize_params_on_gpu=tensor_parallel_init_model_on_gpu,
bias=not disable_bias,
)
def forward_attention(
self,
query,
key,
value,
residual,
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
attn_mask: Optional[Tensor] = None,
):
x, _ = self.self_attn(
query=query,
key=key,
value=value,
key_padding_mask=key_padding_mask,
incremental_state=incremental_state,
attn_mask=attn_mask,
)
x = self.dropout_module(x)
x = residual + x
return x
def forward(
self,
x,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
self_attn_mask: Optional[Tensor] = None,
self_attn_padding_mask: Optional[Tensor] = None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
residual = x
x = self.self_attn_layer_norm(x)
x = self.forward_attention(
query=x,
key=x,
value=x,
residual=residual,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
attn_mask=self_attn_mask,
)
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(x, self.fc1(x), model_parallel=False)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
return x
class TransformerDecoder(nn.Module):
def __init__(
self,
embed_tokens,
decoder_attention_heads,
decoder_ffn_embed_dim,
activation_fn="relu",
dropout=0.1,
attention_dropout=0.1,
no_emb_dropout=False,
share_decoder_input_output_embed=False,
embed_dim=512,
max_target_positions=1024,
no_scale_embedding=False,
decoder_learned_pos=False,
decoder_learned_sinusoidal=False,
full_megatron_init=False,
pos_init_scalar=1.0,
megatron_init_sigma=0.006,
truncate_init=False,
decoder_layers=6,
self_attn_doc_sep=-1,
initialize_params_on_gpu=False,
dtype=torch.float32,
add_bias_kv=False,
add_zero_attn=False,
disable_affine_ln=False,
disable_bias=False,
tensor_parallel_init_model_on_gpu=False,
):
super().__init__()
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.tensor_parallel_init_model_on_gpu = tensor_parallel_init_model_on_gpu
self.megatron_init_sigma = megatron_init_sigma
self.full_megatron_init = full_megatron_init
self.activation_fn = activation_fn
self.attention_dropout = attention_dropout
self.dropout_module = Dropout(dropout, module_name=self.__class__.__name__)
self.dropout = dropout
self.truncate_init = truncate_init
if no_emb_dropout:
self.dropout_module = None
self.add_bias_kv = add_bias_kv
self.add_zero_attn = add_zero_attn
self.disable_affine_ln = disable_affine_ln
self.disable_bias = disable_bias
self.decoder_attention_heads = decoder_attention_heads
self.share_input_output_embed = share_decoder_input_output_embed
self.embed_dim = embed_dim
self.padding_idx: int = embed_tokens.padding_idx
assert self.padding_idx is not None
self.max_target_positions = max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if no_scale_embedding else math.sqrt(self.embed_dim)
self.decoder_ffn_embed_dim = decoder_ffn_embed_dim
# default value
device = torch.cuda.current_device() if initialize_params_on_gpu else None
# default value
self.self_attn_doc_sep = self_attn_doc_sep
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
self.embed_dim,
self.padding_idx,
learned=decoder_learned_pos,
learned_sinusoidal=decoder_learned_sinusoidal,
full_megatron_init=full_megatron_init,
pos_init_scalar=pos_init_scalar,
megatron_init_sigma=megatron_init_sigma,
truncate_init=truncate_init,
)
if decoder_learned_pos
else None
)
self.embed_positions.to(device).to(dtype)
self.layers = nn.ModuleList([])
layers = []
for i in range(decoder_layers):
layers.append(self.build_decoder_layer())
self.layers = nn.ModuleList(layers)
self.num_layers = len(self.layers)
self.layer_norm = LayerNorm(
self.embed_dim,
elementwise_affine=not disable_affine_ln,
)
self.layer_norm.to(device).to(dtype)
self.output_projection = None
if self.share_input_output_embed:
self.output_projection = Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = Linear(
self.embed_dim,
len(dictionary),
bias=False,
initialize_params_on_gpu=initialize_params_on_gpu,
dtype=dtype,
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.embed_dim**-0.5
)
def build_base_decoder_layer(self):
return TransformerDecoderLayer(
self.embed_dim,
self.embed_dim,
self.dropout,
self.decoder_attention_heads,
self.attention_dropout,
self.decoder_ffn_embed_dim,
self.activation_fn,
self.add_bias_kv,
self.add_zero_attn,
self.disable_affine_ln,
self.disable_bias,
self.tensor_parallel_init_model_on_gpu,
self.full_megatron_init,
self.megatron_init_sigma,
self.truncate_init,
)
def build_decoder_layer(self):
layer = self.build_base_decoder_layer()
return layer
def forward_embedding(
self,
tokens,
token_embedding: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
# embed tokens and positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
tokens, incremental_state=incremental_state, positions=positions
)
# see BaseDecoder for important information about
# incremental state
if incremental_state is not None:
tokens = tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
if token_embedding is None:
token_embedding = self.embed_tokens(tokens)
x = embed = self.embed_scale * token_embedding
if positions is not None:
x += positions
if self.dropout_module is not None:
x = self.dropout_module(x)
# Returning in T x B x C format as that makes integrating sequence parallelism easier.
x = x.transpose(0, 1).contiguous()
return x, embed, positions
# forward for TransformerDecoder
def forward(
self,
prev_output_tokens,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
src_lengths: Optional[Any] = None,
token_embeddings: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[Tensor] = None,
):
"""
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
self_attn_padding_mask (torch.Tensor, optional): precomputed padding
mask for self-attention (default None will recompute mask)
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
# see BaseDecoder for important information about
# incremental state
x = self.extract_features(
prev_output_tokens,
incremental_state=incremental_state,
token_embeddings=token_embeddings,
self_attn_padding_mask=self_attn_padding_mask,
)
if not features_only:
x = self.output_layer(x)
# Transposing back to B x T x C, so that the interface stays the same.
x = x.transpose(0, 1).contiguous()
return x
def extract_features(
self,
prev_output_tokens: torch.Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
token_embeddings: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[Tensor] = None,
) -> torch.Tensor:
# compute self-attention padding mask (involves device-to-host transfer,
# so put it at the top of the forward)
assert prev_output_tokens is not None
assert self.padding_idx is not None
if (
self_attn_padding_mask is None
and prev_output_tokens.eq(self.padding_idx).any()
):
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# assert self_attn_padding_mask is not None
# embed tokens and positions
# x is T x B x C
x, tok, pos = self.forward_embedding(
prev_output_tokens, token_embeddings, incremental_state
)
# see BaseDecoder for important information about
# incremental state. Note that it may be an empty dictionary.
if incremental_state is not None:
self_attn_mask = self.buffered_future_mask(x, prev_output_tokens)
else:
self_attn_mask = None
# decoder layers
# store other representations for instrumentation in VocabParallelCrossEntCrit
# Note: we are only storing the embeddings output and output of final transformer block
# instead of all inner representations, as thats the only thing being logged and storing
# all intermediate representation causes OOM for large models during validation.
for idx, layer in enumerate(self.layers):
x = layer(
x,
incremental_state=incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
)
if self.layer_norm is not None:
x = self.layer_norm(x)
# Returned x is T x B x C here, as sequence_parallel requires T to be first dim
return x
def output_layer(self, features):
"""Project features to the vocabulary size."""
return self.output_projection(features)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor, input_tokens=None) -> torch.Tensor:
cur_seq_len, batch_size = tensor.size(0), tensor.size(1)
max_seq_len = self.max_positions()
need_to_make_new_mask = (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(1) < max_seq_len
or (
self._future_mask.size(0) != (batch_size * self.decoder_attention_heads)
)
)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if need_to_make_new_mask:
self._future_mask = torch.triu(
fill_with_neg_inf(
torch.zeros([max_seq_len, max_seq_len], device=tensor.device)
),
1,
)
self._future_mask = self._future_mask.to(tensor)
if self.self_attn_doc_sep != -1:
return self._future_mask
else:
return self._future_mask[:cur_seq_len, :cur_seq_len]
def _sample_topp(temperature: float, sampling_topp: float, lprobs: torch.Tensor):
if temperature == 0.0 or sampling_topp == 0.0:
# greedy search
return tuple(lprobs.max(dim=-1))
probs = lprobs.exp()
sprobs, sinds = probs.sort(dim=-1, descending=True)
mask = (sprobs.cumsum(dim=-1) - sprobs) >= sampling_topp
trunc_sprobs = sprobs.detach().clone()
trunc_sprobs[mask] = 0
trunc_sprobs.div_(trunc_sprobs.sum(dim=-1).unsqueeze(-1))
choices = torch.multinomial(trunc_sprobs, 1)[:, 0]
hyp_ids = torch.arange(lprobs.size(0)).to(lprobs.device)
tok_ids = sinds[hyp_ids, choices]
scores = sprobs[hyp_ids, choices].log()
return scores, tok_ids
class SequenceGenerator(nn.Module):
def __init__(
self, model, beam_size: int, generate_size: int, use_incremental: bool = True
) -> None:
super().__init__()
self.model = model
self.beam_size = beam_size
self.generate_size = generate_size
self.use_incremental = use_incremental
def forward(self, src_tokens):
with torch.no_grad():
incremental_states = torch.jit.annotate(
Dict[str, Dict[str, Optional[Tensor]]], {}
)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
max_len = src_len + self.generate_size
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
tokens = (
torch.zeros(bsz * beam_size, max_len).to(src_tokens).long().fill_(0)
)
start_step = src_tokens.shape[1]
tokens[:, :start_step] = src_tokens.repeat_interleave(beam_size, 0)
model_out = self.model(
tokens[:, :start_step],
incremental_state=incremental_states if self.use_incremental else None,
)
model_predictions = F.log_softmax(model_out.float()[:, -1, :])
for step in range(start_step, max_len):
tokens[:, step] = model_predictions.max(-1)[1]
# forward through the next pass
model_out = self.model(
tokens[:, : step + 1],
incremental_state=incremental_states
if self.use_incremental
else None,
)
# see above for why this must remain float
model_predictions = F.log_softmax(model_out.float()[:, -1, :])
return tokens
class SequenceGeneratorFixedSize(nn.Module):
def __init__(self, model, beam_size: int, generate_size: int) -> None:
super().__init__()
self.model = model
self.beam_size = beam_size
self.generate_size = generate_size
def forward(self, src_tokens):
with torch.no_grad():
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
max_len = src_len + self.generate_size
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
start_step = src_tokens.shape[1]
tokens = (
torch.zeros(bsz * beam_size, max_len).to(src_tokens).long().fill_(0)
)
tokens[:, :start_step] = src_tokens.repeat_interleave(beam_size, 0)
model_out = self.model(tokens)
model_predictions = F.log_softmax(model_out.float()[:, start_step, :])
for step in range(start_step, max_len):
tokens[:, step] = model_predictions.max(-1)[1]
model_out = self.model(
tokens,
)
# see above for why this must remain float
model_predictions = F.log_softmax(model_out.float()[:, step, :])
return tokens
def create_model(embed_dim=1536):
embed_tokens = torch.nn.Embedding(2048, embed_dim, padding_idx=-1)
return (
TransformerDecoder(
embed_tokens,
decoder_layers=24,
decoder_attention_heads=16,
max_target_positions=2048,
embed_dim=embed_dim,
decoder_ffn_embed_dim=embed_dim * 4,
no_scale_embedding=True,
share_decoder_input_output_embed=True,
decoder_learned_pos=True,
dropout=0.1,
)
)
|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='vit_giant_patch14_224', device=device,
batch_size=batch_size, extra_args=extra_args)
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_Bart", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
import numpy as np
import random
import time
import torch
from argparse import Namespace
from .meta import Meta
from pathlib import Path
from typing import Tuple
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
CANNOT_SET_CUSTOM_OPTIMIZER = True
# Skip correctness check, because maml runs backward and optimizer in eval()
# Which will return non-deterministic results
SKIP_CORRECTNESS_CHECK = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
# load from disk or synthesize data
use_data_file = False
debug_print = False
root = str(Path(__file__).parent)
args = Namespace(**{
'n_way': 5,
'k_spt': 1,
'k_qry': 15,
'imgsz': 28,
'imgc': 1,
'task_num': 32,
'meta_lr': 1e-3,
'update_lr': 0.4,
'update_step': 5,
'update_step_test': 10
})
config = [
('conv2d', [64, args.imgc, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 2, 2, 1, 0]),
('relu', [True]),
('bn', [64]),
('flatten', []),
('linear', [args.n_way, 64])
]
self.module = Meta(args, config).to(device)
if use_data_file:
self.example_inputs = torch.load(f'{root}/batch.pt')
self.example_inputs = tuple([torch.from_numpy(i).to(self.device) for i in self.example_inputs])
else:
# synthesize data parameterized by arg values
self.example_inputs = (
torch.randn(args.task_num, args.n_way, args.imgc, args.imgsz, args.imgsz).to(device),
torch.randint(0, args.n_way, [args.task_num, args.n_way], dtype=torch.long).to(device),
torch.randn(args.task_num, args.n_way * args.k_qry, args.imgc, args.imgsz, args.imgsz).to(device),
torch.randint(0, args.n_way, [args.task_num, args.n_way * args.k_qry], dtype=torch.long).to(device))
# print input shapes
if debug_print:
for i in range(len(self.example_inputs)):
print(self.example_inputs[i].shape)
def get_module(self):
return self.module, self.example_inputs
def eval(self) -> Tuple[torch.Tensor]:
out = self.module(*self.example_inputs)
return (out, )
def train(self):
raise NotImplementedError("MAML model doesn't support train.")
def eval_in_nograd(self):
return False
|
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch import optim
import numpy as np
from .learner import Learner
from copy import deepcopy
class Meta(nn.Module):
"""
Meta Learner
"""
def __init__(self, args, config):
"""
:param args:
"""
super(Meta, self).__init__()
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.n_way = args.n_way
self.k_spt = args.k_spt
self.k_qry = args.k_qry
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.net = Learner(config, args.imgc, args.imgsz)
self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
def clip_grad_by_norm_(self, grad, max_norm):
"""
in-place gradient clipping.
:param grad: list of gradients
:param max_norm: maximum norm allowable
:return:
"""
total_norm = 0
counter = 0
for g in grad:
param_norm = g.data.norm(2)
total_norm += param_norm.item() ** 2
counter += 1
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grad:
g.data.mul_(clip_coef)
return total_norm/counter
def forward(self, x_spt, y_spt, x_qry, y_qry):
if self.training:
return self.forward_train(x_spt, y_spt, x_qry, y_qry)
else:
return self.finetunning(x_spt[0], y_spt[0], x_qry[0], y_qry[0])
def forward_train(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [b, setsz, c_, h, w]
:param y_spt: [b, setsz]
:param x_qry: [b, querysz, c_, h, w]
:param y_qry: [b, querysz]
:return:
"""
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
losses_q = [0 for _ in range(self.update_step + 1)] # losses_q[i] is the loss on step i
corrects = [0 for _ in range(self.update_step + 1)]
for i in range(task_num):
# 1. run the i-th task and compute loss for k=0
logits = self.net(x_spt[i], vars=None, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
grad = torch.autograd.grad(loss, self.net.parameters())
fast_weights = list([p[1] - self.update_lr * p[0]for p in zip(grad, self.net.parameters())])
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[0] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[0] = corrects[0] + correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[1] += loss_q
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step):
# 1. run the i-th task and compute loss for k=1~K-1
logits = self.net(x_spt[i], fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt[i])
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = [p[1] - self.update_lr * p[0] for p in zip(grad, fast_weights)]
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry[i])
losses_q[k + 1] += loss_q
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
# end of all tasks
# sum over all losses on query set across all tasks
loss_q = losses_q[-1] / task_num
# optimize theta parameters
self.meta_optim.zero_grad()
loss_q.backward()
# print('meta update')
# for p in self.net.parameters()[:5]:
# print(torch.norm(p).item())
self.meta_optim.step()
accs = torch.tensor(corrects) / (querysz * task_num)
return accs
def finetunning(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [setsz, c_, h, w]
:param y_spt: [setsz]
:param x_qry: [querysz, c_, h, w]
:param y_qry: [querysz]
:return:
"""
querysz = x_qry.size(0)
corrects = [0 for _ in range(self.update_step_test + 1)]
# in order to not ruin the state of running_mean/variance and bn_weight/bias
# we finetunning on the copied model instead of self.net
net = deepcopy(self.net)
# 1. run the i-th task and compute loss for k=0
logits = net(x_spt)
loss = F.cross_entropy(logits, y_spt)
grad = torch.autograd.grad(loss, net.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, net.parameters(), bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
corrects[0] = corrects[0] + correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
logits_q = net(x_qry, fast_weights, bn_training=True)
# [setsz]
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
# scalar
correct = torch.eq(pred_q, y_qry).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step_test):
# 1. run the i-th task and compute loss for k=1~K-1
logits = net(x_spt, fast_weights, bn_training=True)
loss = F.cross_entropy(logits, y_spt)
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, fast_weights)
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q = net(x_qry, fast_weights, bn_training=True)
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = F.cross_entropy(logits_q, y_qry)
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
del net
accs = torch.tensor(corrects) / querysz
return accs
def main():
pass
if __name__ == '__main__':
main()
|
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from typing import List
class Learner(nn.Module):
"""
"""
def __init__(self, config, imgc, imgsz):
"""
:param config: network config file, type:list of (string, list)
:param imgc: 1 or 3
:param imgsz: 28 or 84
"""
super(Learner, self).__init__()
self.config = config
# this dict contains all tensors needed to be optimized
self.vars = nn.ParameterList()
# running_mean and running_var
self.vars_bn = nn.ParameterList()
for i, (name, param) in enumerate(self.config):
if name == 'conv2d':
# [ch_out, ch_in, kernelsz, kernelsz]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name == 'convt2d':
# [ch_in, ch_out, kernelsz, kernelsz, stride, padding]
w = nn.Parameter(torch.ones(*param[:4]))
# gain=1 according to cbfin's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_in, ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[1])))
elif name == 'linear':
# [ch_out, ch_in]
w = nn.Parameter(torch.ones(*param))
# gain=1 according to cbfinn's implementation
torch.nn.init.kaiming_normal_(w)
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
elif name == 'bn':
# [ch_out]
w = nn.Parameter(torch.ones(param[0]))
self.vars.append(w)
# [ch_out]
self.vars.append(nn.Parameter(torch.zeros(param[0])))
# must set requires_grad=False
running_mean = nn.Parameter(torch.zeros(param[0]), requires_grad=False)
running_var = nn.Parameter(torch.ones(param[0]), requires_grad=False)
self.vars_bn.extend([running_mean, running_var])
elif name in ['tanh', 'relu', 'upsample', 'avg_pool2d', 'max_pool2d',
'flatten', 'reshape', 'leakyrelu', 'sigmoid']:
continue
else:
raise NotImplementedError
def extra_repr(self):
info = ''
for name, param in self.config:
if name == 'conv2d':
tmp = 'conv2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[1], param[0], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name == 'convt2d':
tmp = 'convTranspose2d:(ch_in:%d, ch_out:%d, k:%dx%d, stride:%d, padding:%d)'\
%(param[0], param[1], param[2], param[3], param[4], param[5],)
info += tmp + '\n'
elif name == 'linear':
tmp = 'linear:(in:%d, out:%d)'%(param[1], param[0])
info += tmp + '\n'
elif name == 'leakyrelu':
tmp = 'leakyrelu:(slope:%f)'%(param[0])
info += tmp + '\n'
elif name == 'avg_pool2d':
tmp = 'avg_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name == 'max_pool2d':
tmp = 'max_pool2d:(k:%d, stride:%d, padding:%d)'%(param[0], param[1], param[2])
info += tmp + '\n'
elif name in ['flatten', 'tanh', 'relu', 'upsample', 'reshape', 'sigmoid', 'use_logits', 'bn']:
tmp = name + ':' + str(tuple(param))
info += tmp + '\n'
else:
raise NotImplementedError
return info
def forward(self, x, vars=None, bn_training=True):
"""
This function can be called by finetunning, however, in finetunning, we dont wish to update
running_mean/running_var. Thought weights/bias of bn == updated, it has been separated by fast_weights.
Indeed, to not update running_mean/running_var, we need set update_bn_statistics=False
but weight/bias will be updated and not dirty initial theta parameters via fast_weiths.
:param x: [b, 1, 28, 28]
:param vars:
:param bn_training: set False to not update
:return: x, loss, likelihood, kld
"""
if vars == None:
vars = self.vars
idx = 0
bn_idx = 0
for name, param in self.config:
if name == 'conv2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name == 'convt2d':
w, b = vars[idx], vars[idx + 1]
# remember to keep synchrozied of forward_encoder and forward_decoder!
x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])
idx += 2
# print(name, param, '\tout:', x.shape)
elif name == 'linear':
w, b = vars[idx], vars[idx + 1]
x = F.linear(x, w, b)
idx += 2
# print('forward:', idx, x.norm().item())
elif name == 'bn':
w, b = vars[idx], vars[idx + 1]
running_mean, running_var = self.vars_bn[bn_idx], self.vars_bn[bn_idx+1]
x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)
idx += 2
bn_idx += 2
elif name == 'flatten':
# print(x.shape)
x = x.view(x.size(0), -1)
elif name == 'reshape':
# [b, 8] => [b, 2, 2, 2]
x = x.view(x.size(0), *param)
elif name == 'relu':
x = F.relu(x, inplace=param[0])
elif name == 'leakyrelu':
x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])
elif name == 'tanh':
x = F.tanh(x)
elif name == 'sigmoid':
x = torch.sigmoid(x)
elif name == 'upsample':
x = F.upsample_nearest(x, scale_factor=param[0])
elif name == 'max_pool2d':
x = F.max_pool2d(x, param[0], param[1], param[2])
elif name == 'avg_pool2d':
x = F.avg_pool2d(x, param[0], param[1], param[2])
else:
raise NotImplementedError
# make sure variable == used properly
assert idx == len(vars)
assert bn_idx == len(self.vars_bn)
return x
def zero_grad(self, vars=None):
"""
:param vars:
:return:
"""
with torch.no_grad():
if vars == None:
for p in self.vars:
if not p.grad == None:
p.grad.zero_()
else:
for p in vars:
if not p.grad == None:
p.grad.zero_()
def parameters(self):
"""
override this function since initial parameters will return with a generator.
:return:
"""
return self.vars |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from typing import Optional, Tuple
from .sam import Sam
from .transforms import ResizeLongestSide
class SamPredictor:
def __init__(
self,
sam_model: Sam,
) -> None:
"""
Uses SAM to calculate the image embedding for an image, and then
allow repeated, efficient mask prediction given prompts.
Arguments:
sam_model (Sam): The model to use for mask prediction.
"""
super().__init__()
self.model = sam_model
self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
self.reset_image()
def set_image(
self,
image: np.ndarray,
image_format: str = "RGB",
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method.
Arguments:
image (np.ndarray): The image for calculating masks. Expects an
image in HWC uint8 format, with pixel values in [0, 255].
image_format (str): The color format of the image, in ['RGB', 'BGR'].
"""
assert image_format in [
"RGB",
"BGR",
], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
if image_format != self.model.image_format:
image = image[..., ::-1]
# Transform the image to the form expected by the model
input_image = self.transform.apply_image(image)
input_image_torch = torch.as_tensor(input_image, device=self.device)
input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
self.set_torch_image(input_image_torch, image.shape[:2])
@torch.no_grad()
def set_torch_image(
self,
transformed_image: torch.Tensor,
original_image_size: Tuple[int, ...],
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method. Expects the input
image to be already transformed to the format expected by the model.
Arguments:
transformed_image (torch.Tensor): The input image, with shape
1x3xHxW, which has been transformed with ResizeLongestSide.
original_image_size (tuple(int, int)): The size of the image
before transformation, in (H, W) format.
"""
assert (
len(transformed_image.shape) == 4
and transformed_image.shape[1] == 3
and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
self.reset_image()
self.original_size = original_image_size
self.input_size = tuple(transformed_image.shape[-2:])
input_image = self.model.preprocess(transformed_image)
self.features = self.model.image_encoder(input_image)
self.is_image_set = True
def predict(
self,
point_coords: Optional[np.ndarray] = None,
point_labels: Optional[np.ndarray] = None,
box: Optional[np.ndarray] = None,
mask_input: Optional[np.ndarray] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Predict masks for the given input prompts, using the currently set image.
Arguments:
point_coords (np.ndarray or None): A Nx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (np.ndarray or None): A length N array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
box (np.ndarray or None): A length 4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form 1xHxW, where
for SAM, H=W=256.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(np.ndarray): The output masks in CxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(np.ndarray): An array of length C containing the model's
predictions for the quality of each mask.
(np.ndarray): An array of shape CxHxW, where C is the number
of masks and H=W=256. These low resolution logits can be passed to
a subsequent iteration as mask input.
"""
if not self.is_image_set:
raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
# Transform input prompts
coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
if point_coords is not None:
assert (
point_labels is not None
), "point_labels must be supplied if point_coords is supplied."
point_coords = self.transform.apply_coords(point_coords, self.original_size)
coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)
labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
if box is not None:
box = self.transform.apply_boxes(box, self.original_size)
box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
box_torch = box_torch[None, :]
if mask_input is not None:
mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)
mask_input_torch = mask_input_torch[None, :, :, :]
masks, iou_predictions, low_res_masks = self.predict_torch(
coords_torch,
labels_torch,
box_torch,
mask_input_torch,
multimask_output,
return_logits=return_logits,
)
masks_np = masks[0].detach().cpu().numpy()
iou_predictions_np = iou_predictions[0].to(torch.float32).detach().cpu().numpy()
low_res_masks_np = low_res_masks[0].to(torch.float32).detach().cpu().numpy()
return masks_np, iou_predictions_np, low_res_masks_np
@torch.no_grad()
def predict_torch(
self,
point_coords: Optional[torch.Tensor],
point_labels: Optional[torch.Tensor],
boxes: Optional[torch.Tensor] = None,
mask_input: Optional[torch.Tensor] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks for the given input prompts, using the currently set image.
Input prompts are batched torch tensors and are expected to already be
transformed to the input frame using ResizeLongestSide.
Arguments:
point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (torch.Tensor or None): A BxN array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
boxes (np.ndarray or None): A Bx4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form Bx1xHxW, where
for SAM, H=W=256. Masks returned by a previous iteration of the
predict method do not need further transformation.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(torch.Tensor): The output masks in BxCxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(torch.Tensor): An array of shape BxC containing the model's
predictions for the quality of each mask.
(torch.Tensor): An array of shape BxCxHxW, where C is the number
of masks and H=W=256. These low res logits can be passed to
a subsequent iteration as mask input.
"""
# if not self.is_image_set:
# raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
if point_coords is not None:
points = (point_coords, point_labels)
else:
points = None
# Embed prompts
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
points=points,
boxes=boxes,
masks=mask_input,
)
# Predict masks
low_res_masks, iou_predictions = self.model.mask_decoder(
image_embeddings=self.features,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
# Upscale the masks to the original image resolution
masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)
if not return_logits:
masks = masks > self.model.mask_threshold
return masks, iou_predictions, low_res_masks
def get_image_embedding(self) -> torch.Tensor:
"""
Returns the image embeddings for the currently set image, with
shape 1xCxHxW, where C is the embedding dimension and (H,W) are
the embedding spatial dimension of SAM (typically C=256, H=W=64).
"""
# if not self.is_image_set:
# raise RuntimeError(
# "An image must be set with .set_image(...) to generate an embedding."
# )
assert self.features is not None, "Features must exist if an image has been set."
return self.features
@property
def device(self) -> torch.device:
return self.model.device
def reset_image(self) -> None:
"""Resets the currently set image."""
self.is_image_set = False
self.features = None
self.orig_h = None
self.orig_w = None
self.input_h = None
self.input_w = None |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from functools import partial
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder
from .transformer import TwoWayTransformer
from .sam import Sam
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
sam_model_registry = {
"default": build_sam_vit_h,
"vit_h": build_sam_vit_h,
"vit_l": build_sam_vit_l,
"vit_b": build_sam_vit_b,
}
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
sam.eval()
if checkpoint is not None:
with open(checkpoint, "rb") as f:
state_dict = torch.load(f)
sam.load_state_dict(state_dict)
return sam |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torch.nn import functional as F
from torchvision.transforms.functional import resize, to_pil_image # type: ignore
from copy import deepcopy
from typing import Tuple
class ResizeLongestSide:
"""
Resizes images to the longest side 'target_length', as well as provides
methods for resizing coordinates and boxes. Provides methods for
transforming both numpy array and batched torch tensors.
"""
def __init__(self, target_length: int) -> None:
self.target_length = target_length
def apply_image(self, image: np.ndarray) -> np.ndarray:
"""
Expects a numpy array with shape HxWxC in uint8 format.
"""
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
return np.array(resize(to_pil_image(image), target_size))
def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
"""
Expects a numpy array of length 2 in the final dimension. Requires the
original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.get_preprocess_shape(
original_size[0], original_size[1], self.target_length
)
coords = deepcopy(coords).astype(float)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
return coords
def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
"""
Expects a numpy array shape Bx4. Requires the original image size
in (H, W) format.
"""
boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
"""
Expects batched images with shape BxCxHxW and float format. This
transformation may not exactly match apply_image. apply_image is
the transformation expected by the model.
"""
# Expects an image in BCHW format. May not exactly match apply_image.
target_size = self.get_preprocess_shape(image.shape[2], image.shape[3], self.target_length)
return F.interpolate(
image, target_size, mode="bilinear", align_corners=False, antialias=True
)
def apply_coords_torch(
self, coords: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with length 2 in the last dimension. Requires the
original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.get_preprocess_shape(
original_size[0], original_size[1], self.target_length
)
coords = deepcopy(coords).to(torch.float)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
return coords
def apply_boxes_torch(
self, boxes: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with shape Bx4. Requires the original image
size in (H, W) format.
"""
boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
@staticmethod
def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]:
"""
Compute the output size given input size and target long side length.
"""
scale = long_side_length * 1.0 / max(oldh, oldw)
newh, neww = oldh * scale, oldw * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return (newh, neww) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.