python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Any, Dict
from classy_vision.generic.util import log_class_usage
class ClassyTask(ABC):
"""
An abstract base class for a training task.
A ClassyTask encapsulates all the components and steps needed
to train using a :class:`classy_vision.trainer.ClassyTrainer`.
"""
def __init__(self) -> "ClassyTask":
"""
Constructs a ClassyTask.
"""
self.hooks = []
log_class_usage("Task", self.__class__)
@classmethod
@abstractmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassyTask":
"""Instantiates a ClassyTask from a configuration.
Args:
config: A configuration for a ClassyTask.
Returns:
A ClassyTask instance.
"""
raise NotImplementedError()
@property
@abstractmethod
def where(self) -> float:
"""
Tells how far along (where) we are during training.
Returns:
A float in [0, 1) which tells the training progress.
"""
pass
@abstractmethod
def done_training(self) -> bool:
"""
Tells if we are done training.
Returns:
A boolean telling if training is over.
"""
pass
@abstractmethod
def get_classy_state(self, deep_copy: bool = False) -> Dict[str, Any]:
"""Get the state of the ClassyTask.
The returned state is used for checkpointing.
Args:
deep_copy: If True, creates a deep copy of the state dict. Otherwise, the
returned dict's state will be tied to the object's.
Returns:
A state dictionary containing the state of the task.
"""
pass
@abstractmethod
def set_classy_state(self, state):
"""Set the state of the ClassyTask.
Args:
state_dict: The state dictionary. Must be the output of a call to
:func:`get_classy_state`.
This is used to load the state of the task from a checkpoint.
"""
pass
@abstractmethod
def prepare(self, num_dataloader_workers=0, dataloader_mp_context=None) -> None:
"""
Prepares the task for training.
Will be called by the :class:`classy_vision.trainer.ClassyTrainer` to
prepare the task, before on_start is called.
Args:
num_dataloader_workers: Number of workers to create for the dataloaders
pin_memory: Whether the dataloaders should copy the Tensors into CUDA
pinned memory (default False)
"""
pass
@abstractmethod
def train_step(self) -> None:
"""
Run a train step.
This corresponds to training over one batch of data from the dataloaders.
"""
pass
@abstractmethod
def on_start(self):
"""
Start training.
Called by :class:`classy_vision.trainer.ClassyTrainer` before training starts.
"""
pass
@abstractmethod
def on_phase_start(self):
"""
Epoch start.
Called by :class:`classy_vision.trainer.ClassyTrainer` before each epoch starts.
"""
pass
@abstractmethod
def on_phase_end(self):
"""
Epoch end.
Called by :class:`classy_vision.trainer.ClassyTrainer` after each epoch ends.
"""
pass
@abstractmethod
def on_end(self):
"""
Training end.
Called by :class:`classy_vision.trainer.ClassyTrainer` after training ends.
"""
pass
@abstractmethod
def eval_step(self) -> None:
"""
Run an evaluation step.
This corresponds to evaluating the model over one batch of data.
"""
pass
def step(self) -> None:
from classy_vision.hooks import ClassyHookFunctions
if self.train:
self.train_step()
else:
self.eval_step()
for hook in self.hooks:
hook.on_step(self)
def run_hooks(self, local_variables: Dict[str, Any], hook_function: str) -> None:
"""
Helper function that runs a hook function for all the
:class:`classy_vision.hooks.ClassyHook`.
Args:
local_variables: Local variables created in :func:`train_step`
hook_function: One of the hook functions in the
:class:`classy_vision.hooks.ClassyHookFunctions`
enum.
"""
for hook in self.hooks:
getattr(hook, hook_function)(self, local_variables)
| ClassyVision-main | classy_vision/tasks/classy_task.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import traceback
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from .classy_task import ClassyTask
FILE_ROOT = Path(__file__).parent
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
TASK_REGISTRY_TB = {}
TASK_CLASS_NAMES_TB = {}
def build_task(config):
"""Builds a ClassyTask from a config.
This assumes a 'name' key in the config which is used to determine what
task class to instantiate. For instance, a config `{"name": "my_task",
"foo": "bar"}` will find a class that was registered as "my_task"
(see :func:`register_task`) and call .from_config on it."""
task = TASK_REGISTRY[config["name"]].from_config(config)
return task
def register_task(name):
"""Registers a ClassyTask subclass.
This decorator allows Classy Vision to instantiate a subclass of ClassyTask
from a configuration file, even if the class itself is not part of the
Classy Vision framework. To use it, apply this decorator to a ClassyTask
subclass, like this:
.. code-block:: python
@register_task('my_task')
class MyTask(ClassyTask):
...
To instantiate a task from a configuration file, see :func:`build_task`."""
def register_task_cls(cls):
if name in TASK_REGISTRY:
msg = "Cannot register duplicate task ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, TASK_REGISTRY_TB[name]))
if not issubclass(cls, ClassyTask):
raise ValueError(
"Task ({}: {}) must extend ClassyTask".format(name, cls.__name__)
)
if cls.__name__ in TASK_CLASS_NAMES:
msg = (
"Cannot register task with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, TASK_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
TASK_REGISTRY_TB[name] = tb
TASK_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_task_cls
from .classification_task import ClassificationTask # isort:skip
from .fine_tuning_task import FineTuningTask # isort:skip
__all__ = [
"ClassyTask",
"FineTuningTask",
"build_task",
"register_task",
"ClassificationTask",
]
# automatically import any Python files in the tasks/ directory
import_all_modules(FILE_ROOT, "classy_vision.tasks")
| ClassyVision-main | classy_vision/tasks/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import enum
import json
import logging
import math
import multiprocessing as mp
import time
from typing import Any, Dict, List, NamedTuple, Optional, Union
import torch
import torch.nn as nn
from classy_vision.dataset import build_dataset, ClassyDataset
from classy_vision.dataset.transforms.mixup import MixupTransform
from classy_vision.generic.distributed_util import (
all_reduce_mean,
barrier,
init_distributed_data_parallel_model,
is_distributed_training_run,
)
from classy_vision.generic.util import (
copy_model_to_gpu,
get_torch_version,
load_and_broadcast_checkpoint,
master_params,
recursive_copy_to_gpu,
split_batchnorm_params,
Timer,
update_classy_state,
)
from classy_vision.hooks import build_hooks, CheckpointHook, ClassyHook
from classy_vision.losses import build_loss, ClassyLoss
from classy_vision.meters import build_meters, ClassyMeter
from classy_vision.models import build_model, ClassyModel
from classy_vision.optim import (
build_optimizer,
build_optimizer_schedulers,
ClassyOptimizer,
)
from classy_vision.optim.zero import ZeRO
from torch.distributed import broadcast
from . import register_task
from .classy_task import ClassyTask
try:
import apex
apex_available = True
except ImportError:
apex_available = False
try:
from torch.cuda.amp import GradScaler as TorchGradScaler
except ImportError:
pass
try:
from fairscale.optim.grad_scaler import ShardedGradScaler
fairscale_available = True
except ImportError:
fairscale_available = False
class AmpType(enum.Enum):
# Automatic Mixed Precision supported types
APEX = enum.auto()
PYTORCH = enum.auto()
class BroadcastBuffersMode(enum.Enum):
DISABLED = enum.auto()
# Enable DistributedDataParallel's broadcast_buffers option, synchronizing
# model buffers every forward pass.
FORWARD_PASS = enum.auto()
# Similar to FORWARD_PASS, but only synchronizes model buffers once
# per epoch, between train and test phases. If your motivation for
# synchronizing buffers is for buffers to be consistent during eval, use
# this instead of FORWARD_PASS to reduce training overhead.
BEFORE_EVAL = enum.auto()
class BatchNormSyncMode(enum.Enum):
DISABLED = enum.auto() # No Synchronized Batch Normalization
PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm
APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed
class LastBatchInfo(NamedTuple):
loss: torch.Tensor
output: torch.Tensor
target: torch.Tensor
sample: Dict[str, Any]
step_data: Dict[str, Any]
@register_task("classification_task")
class ClassificationTask(ClassyTask):
"""Basic classification training task.
This task encapsultates all of the components and steps needed to
train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`.
Assumes a train / test phase per each epoch and that the datasets
have the same API as the map-style Dataset class in
`torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html
#torch.utils.data.Dataset>`_ (in particular, this task makes use of
the len). If you are using an `IterableDataset <https://pytorch.org/docs/
stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task
may be appropriate.
:var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used
for computing the loss in each forward pass
:var datasets: Mapping from a ``phase_type`` in ["train", "test']
to dataset used for training (or testing)
:var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`)
to calculate during training
:var num_epochs: Number of epochs (passes over dataset) to train
:var test_only: Used to only run the test phase
:var base_model: Model to be trained, unwrapped in DDP or DP wrappers
:var optimizer: Optimizer used in train step
:var optimizer_schedulers: Dictionary. Key is the name of the optimizer
option (e.g. lr), value is a ClassyParamScheduler
:var checkpoint: Serializable dict which represents state in training
:var phases: List of phase specific information, e.g. if phase is
train / test.
:var hooks: List of hooks to apply during training
:var train: Phase type, if true it means we are training,
false means testing
:var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel)
:var phase_idx: Current phase id, first phase is 0, if task has not started
training then returns -1
:var train_phase_idx: Only counts train phases
:var num_updates: Number of total parameter updates applied to model
by the optimizer
:var data_iterator: Iterator which can be used to obtain batches
:var losses: Loss curve
:var perf_log: list of training speed measurements, to be logged
:var clip_grad_norm: maximum gradient norm (default None)
:var simulated_global_batchsize: batch size simulated via gradient accumulation
:var optimizer_period: apply optimizer after this many steps; derived from
simulated_global_batchsize, default 1.
"""
def __init__(self):
"""Constructs a ClassificationTask"""
super().__init__()
self.base_loss = None
self.datasets = {}
self.meters = []
self.num_epochs = 1
self.test_phase_period = 1
self.train_phases_per_epoch = 0
self.test_only = False
self.base_model = None
self.optimizer = None
self.optimizer_schedulers = {}
self.checkpoint_dict = None
self.checkpoint_path = None
self.checkpoint_load_strict = True
self.phases = []
self.hooks = []
self.train = True
self.distributed_model = None
self.distributed_loss = None
self.phase_idx = -1
self.train_phase_idx = -1
self.num_updates = 0
self.dataloader = None
self.data_iterator = None
self.losses = []
self.broadcast_buffers_mode: BroadcastBuffersMode = (
BroadcastBuffersMode.BEFORE_EVAL
)
self.amp_args = None
self.amp_type = None
self.amp_grad_scaler = None
self.mixup_transform = None
self.perf_log = []
self.last_batch = None
self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED
self.find_unused_parameters = False
self.use_gpu = torch.cuda.is_available()
self.dataloader_mp_context = "spawn"
self.bn_weight_decay = False
self._train_only = True
self.clip_grad_norm = None
self.simulated_global_batchsize = None
self.optimizer_period = 1
self.ddp_bucket_cap_mb = 25
self.use_sharded_ddp = False
self.fp16_grad_compress = False
def set_use_sharded_ddp(self, use_sharded_ddp: bool):
self.use_sharded_ddp = use_sharded_ddp
if self.use_sharded_ddp:
logging.info("Using Sharded DDP")
return self
def set_use_gpu(self, use_gpu: bool):
self.use_gpu = use_gpu
assert (
not self.use_gpu or torch.cuda.is_available()
), "CUDA required to train on GPUs"
return self
def set_clip_grad_norm(self, clip_grad_norm: Optional[float]):
"""Sets maximum gradient norm.
None means gradient clipping is disabled. Defaults to None."""
self.clip_grad_norm = clip_grad_norm
if clip_grad_norm is None:
logging.info("Disabled gradient norm clipping.")
else:
logging.info(
f"Enabled gradient norm clipping with threshold: {clip_grad_norm}"
)
return self
def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]):
"""Sets a simulated batch size by gradient accumulation.
Gradient accumulation adds up gradients from multiple minibatches and
steps the optimizer every N train_steps, where N is optimizer_period.
When enabled, the very last train_steps might end up not updating the
model, depending on the number of total steps. None means gradient
accumulation is disabled. Defaults to None."""
self.simulated_global_batchsize = simulated_global_batchsize
return self
def set_checkpoint(self, checkpoint_path: str):
"""Sets checkpoint on task.
Args:
checkpoint_path: The path to load the checkpoint from. Can be a file or a
directory. See :func:`load_checkpoint` for more information.
"""
self.checkpoint_path = checkpoint_path
return self
def set_checkpoint_load_strict(self, checkpoint_load_strict: bool):
"""Sets checkpoint on task.
Args:
checkpoint_load_strict: Whether to use load_strict when copying model weights
"""
self.checkpoint_load_strict = checkpoint_load_strict
return self
def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]):
"""Sets the checkpoint dict in the task. Only used for testing.
Args:
checkpoint_dict: A serializable dict representing current task state
"""
self.checkpoint_dict = checkpoint_dict
return self
def set_num_epochs(self, num_epochs: Union[int, float]):
"""Set number of epochs to be run.
Args:
num_epochs: Number of epochs to run task
"""
self.num_epochs = num_epochs
return self
def set_test_phase_period(self, test_phase_period: int):
"""Set the period of test phase.
Args:
test_phase_period: The period of test phase
"""
self.test_phase_period = test_phase_period
return self
def set_dataset(self, dataset: ClassyDataset, phase_type: str):
"""Set dataset for phase type on task
Args:
dataset: ClassyDataset for returning samples.
phase_type: str must be one of "train" or "test"
"""
assert phase_type in [
"train",
"test",
], "phase_type must be in ['train', 'test']"
self.datasets[phase_type] = dataset
if phase_type == "train":
self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1)
else:
self._train_only = False
return self
def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]):
"""Set the multiprocessing context used by the dataloader.
The context can be either 'spawn', 'fork', 'forkserver' or None (uses the
default context). See
https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
for more details."""
self.dataloader_mp_context = dataloader_mp_context
return self
def set_optimizer(self, optimizer: ClassyOptimizer):
"""Set optimizer for task
Args:
optimizer: optimizer for task
"""
self.optimizer = optimizer
return self
def set_loss(self, loss: ClassyLoss):
"""Set loss function for task
Args:
loss: loss for task
"""
self.base_loss = loss
return self
def set_meters(self, meters: List["ClassyMeter"]):
"""Set meters for task
Args:
meters: list of meters to compute during training
"""
self.meters = meters
return self
def set_distributed_options(
self,
broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL,
batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED,
batch_norm_sync_group_size: int = 0,
find_unused_parameters: bool = False,
bucket_cap_mb: int = 25,
fp16_grad_compress: bool = False,
):
"""Set distributed options.
Args:
broadcast_buffers_mode: Broadcast buffers mode. See
:class:`BroadcastBuffersMode` for options.
batch_norm_sync_mode: Batch normalization synchronization mode. See
:class:`BatchNormSyncMode` for options.
batch_norm_sync_group_size: Group size to use for synchronized batch norm.
0 means that the stats are synchronized across all replicas. For
efficient synchronization, set it to the number of GPUs in a node (
usually 8).
find_unused_parameters: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
bucket_cap_mb: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
Raises:
RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex
is not installed.
"""
self.broadcast_buffers_mode = broadcast_buffers_mode
if batch_norm_sync_group_size > 0:
if not batch_norm_sync_mode == BatchNormSyncMode.APEX:
# this should ideally work with PyTorch Sync BN as well, but it
# fails while initializing DDP for some reason.
raise ValueError(
"batch_norm_sync_group_size can be > 0 only when "
"Apex Synchronized Batch Normalization is being used."
)
self.batch_norm_sync_group_size = batch_norm_sync_group_size
if batch_norm_sync_mode == BatchNormSyncMode.DISABLED:
logging.info("Synchronized Batch Normalization is disabled")
else:
if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available:
raise RuntimeError("apex is not installed")
msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}"
if self.batch_norm_sync_group_size > 0:
msg += f" and group size {batch_norm_sync_group_size}"
logging.info(msg)
self.batch_norm_sync_mode = batch_norm_sync_mode
if find_unused_parameters:
logging.info("Enabling find_unused_parameters in DDP")
self.find_unused_parameters = find_unused_parameters
self.ddp_bucket_cap_mb = bucket_cap_mb
if fp16_grad_compress:
if get_torch_version() < [1, 8]:
raise RuntimeError(
"FP16 grad compression is only supported since PyTorch 1.8"
)
logging.info("Enabling FP16 grad compression")
self.fp16_grad_compress = fp16_grad_compress
return self
def set_hooks(self, hooks: List["ClassyHook"]):
"""Set hooks for task
Args:
hooks: List of hooks to apply during training
"""
from classy_vision.hooks import ClassyHook
assert isinstance(hooks, list)
assert all(isinstance(hook, ClassyHook) for hook in hooks)
assert len({hook.name() for hook in hooks}) == len(
hooks
), "Cannot have repeated hooks of the same class"
# TODO (zyan3): we move checkpoint hook to the end of the list because some hooks
# may change the state of the model, and we want to save changed state in the checkpoint.
# This is temporary fix.
non_checkpoint_hooks = [
hook for hook in hooks if not isinstance(hook, CheckpointHook)
]
checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)]
hooks = non_checkpoint_hooks + checkpoint_hooks
self.hooks = hooks
return self
def set_model(self, model: ClassyModel):
"""Set model for task
Args:
model: Model to be trained
"""
self.base_model = model
return self
def set_test_only(self, test_only: bool):
"""Set test only flag
Args:
test_only: If true, only test phases will be run
"""
self.test_only = test_only
return self
def set_bn_weight_decay(self, bn_weight_decay: bool):
assert type(bn_weight_decay) == bool
self.bn_weight_decay = bn_weight_decay
return self
def set_amp_args(self, amp_args: Optional[Dict[str, Any]]):
"""Disable / enable apex.amp and set the automatic mixed precision parameters.
apex.amp can be utilized for mixed / half precision training.
Args:
amp_args: Dictionary containing arguments to be passed to
amp.initialize. Set to None to disable amp. To enable mixed
precision training, pass amp_args={"opt_level": "O1"} here.
See https://nvidia.github.io/apex/amp.html for more info.
Raises:
RuntimeError: If opt_level is not None and apex is not installed.
Warning: apex needs to be installed to utilize this feature.
"""
self.amp_args = amp_args
if amp_args is None:
logging.info("AMP disabled")
else:
# Check that the requested AMP type is known
try:
self.amp_type = AmpType[self.amp_args["amp_type"].upper()]
except KeyError:
logging.info("AMP type not specified, defaulting to Apex")
self.amp_type = AmpType.APEX
# Check for CUDA availability, required for both Apex and Pytorch AMP
if not torch.cuda.is_available():
raise RuntimeError(
"AMP is required but CUDA is not supported, cannot enable AMP"
)
# Check for Apex availability
if self.amp_type == AmpType.APEX and not apex_available:
raise RuntimeError(
"Apex AMP is required but Apex is not installed, cannot enable AMP"
)
if self.use_sharded_ddp:
if self.amp_type == AmpType.APEX:
raise RuntimeError(
"ShardedDDP has been requested, which is incompatible with Apex AMP"
)
if not fairscale_available:
raise RuntimeError(
"ShardedDDP has been requested, but fairscale is not installed in the current environment"
)
# Set Torch AMP grad scaler, used to prevent gradient underflow
elif self.amp_type == AmpType.PYTORCH:
if self.use_sharded_ddp:
logging.info("Using ShardedGradScaler to manage Pytorch AMP")
self.amp_grad_scaler = ShardedGradScaler()
else:
self.amp_grad_scaler = TorchGradScaler()
logging.info(f"AMP enabled with args {amp_args}")
return self
def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]):
"""Disable / enable mixup transform for data augmentation
Args::
mixup_transform: a callable object which performs mixup data augmentation
"""
self.mixup_transform = mixup_transform
if mixup_transform is None:
logging.info("mixup disabled")
else:
logging.info("mixup enabled")
return self
def set_optimizer_schedulers(self, schedulers):
self.optimizer_schedulers = schedulers
return self
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
"""Instantiates a ClassificationTask from a configuration.
Args:
config: A configuration for a ClassificationTask.
See :func:`__init__` for parameters expected in the config.
Returns:
A ClassificationTask instance.
"""
test_only = config.get("test_only", False)
if not test_only:
# TODO Make distinction between epochs and phases in optimizer clear
train_phases_per_epoch = config["dataset"]["train"].get(
"phases_per_epoch", 1
)
optimizer_config = config["optimizer"]
optimizer_config["num_epochs"] = (
config["num_epochs"] * train_phases_per_epoch
)
optimizer = build_optimizer(optimizer_config)
param_schedulers = build_optimizer_schedulers(optimizer_config)
datasets = {}
phase_types = ["train", "test"]
for phase_type in phase_types:
if phase_type in config["dataset"]:
datasets[phase_type] = build_dataset(config["dataset"][phase_type])
loss = build_loss(config["loss"])
amp_args = config.get("amp_args")
meters = build_meters(config.get("meters", {}))
model = build_model(config["model"])
mixup_transform = None
if config.get("mixup") is not None:
assert "alpha" in config["mixup"], "key alpha is missing in mixup dict"
mixup_transform = MixupTransform(
config["mixup"]["alpha"],
num_classes=config["mixup"].get("num_classes"),
cutmix_alpha=config["mixup"].get("cutmix_alpha", 0),
cutmix_minmax=config["mixup"].get("cutmix_minmax"),
mix_prob=config["mixup"].get("mix_prob", 1.0),
switch_prob=config["mixup"].get("switch_prob", 0.5),
mode=config["mixup"].get("mode", "batch"),
label_smoothing=config["mixup"].get("label_smoothing", 0.0),
)
# hooks config is optional
hooks_config = config.get("hooks")
hooks = []
if hooks_config is not None:
hooks = build_hooks(hooks_config)
distributed_config = config.get("distributed", {})
distributed_options = {
"broadcast_buffers_mode": BroadcastBuffersMode[
distributed_config.get("broadcast_buffers", "before_eval").upper()
],
"batch_norm_sync_mode": BatchNormSyncMode[
distributed_config.get("batch_norm_sync_mode", "disabled").upper()
],
"batch_norm_sync_group_size": distributed_config.get(
"batch_norm_sync_group_size", 0
),
"find_unused_parameters": distributed_config.get(
"find_unused_parameters", False
),
"bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25),
"fp16_grad_compress": distributed_config.get("fp16_grad_compress", False),
}
task = (
cls()
.set_num_epochs(config["num_epochs"])
.set_test_phase_period(config.get("test_phase_period", 1))
.set_loss(loss)
.set_test_only(test_only)
.set_model(model)
.set_meters(meters)
.set_amp_args(amp_args)
.set_mixup_transform(mixup_transform)
.set_distributed_options(**distributed_options)
.set_hooks(hooks)
.set_bn_weight_decay(config.get("bn_weight_decay", False))
.set_clip_grad_norm(config.get("clip_grad_norm"))
.set_simulated_global_batchsize(config.get("simulated_global_batchsize"))
.set_use_sharded_ddp(config.get("use_sharded_ddp", False))
)
if not test_only:
task.set_optimizer(optimizer)
task.set_optimizer_schedulers(param_schedulers)
use_gpu = config.get("use_gpu")
if use_gpu is not None:
task.set_use_gpu(use_gpu)
for phase_type in datasets:
task.set_dataset(datasets[phase_type], phase_type)
# NOTE: this is a private member and only meant to be used for
# logging/debugging purposes. See __repr__ implementation
task._config = config
return task
@property
def num_batches_per_phase(self):
"""Returns number of batches in current phase iterator"""
return len(self.data_iterator)
@property
def model(self):
"""Returns model used in training (can be wrapped with DDP)"""
return (
self.distributed_model if is_distributed_training_run() else self.base_model
)
@property
def loss(self):
"""Returns loss used in training (can be wrapped with DDP)"""
return self.distributed_loss if self.distributed_loss else self.base_loss
@property
def phase_type(self):
"""Returns current phase type. String with value "train" or "test" """
return "train" if self.train else "test"
@property
def eval_phase_idx(self):
"""Returns current evaluation phase"""
return self.phase_idx - self.train_phase_idx - 1
def get_total_training_phases(self):
"""
Returns the total number of "train" phases in the task
"""
num_training_phases = 0
for phase in self.phases:
if phase["train"] is True:
num_training_phases += 1
return num_training_phases
def get_total_test_phases(self):
"""
Returns the total number of "test" phases in the task
"""
num_test_phases = 0
for phase in self.phases:
if phase["train"] is False:
num_test_phases += 1
return num_test_phases
def _build_phases(self):
"""Returns list of phases from config.
These phases will look like:
{
train: is this a train or test phase?
optimizer: optimizer settings
}
- If this is a test only run, then only test phases will be
generated
- If this is a training run with both train and test datasets, then x phases =
x train phases + x test phases, interleaved. If test_phase_period > 1, test
phases are only added after test_phase_period train phases. The last phase is
always a test phase.
- If this is a training run with only a train dataset, then x phases = x train
phases.
"""
if not self.test_only:
phases = [
{"train": True}
for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs))
]
if self._train_only:
return phases
final_phases = []
for i, phase in enumerate(phases):
final_phases.append(phase)
if (i + 1) % self.test_phase_period == 0:
final_phases.append({"train": False})
if final_phases[-1]["train"]:
final_phases.append({"train": False})
return final_phases
return [{"train": False} for _ in range(self.num_epochs)]
def build_dataloader_from_dataset(self, dataset, **kwargs):
"""Builds a dataloader from the provided dataset
Args:
dataset: A ClassyDataset
kwargs: Additional kwargs to pass during dataloader construction for
derived classes
"""
return dataset.iterator(
phase_type=self.phase_type,
current_phase_id=self.train_phase_idx if self.train else 0,
pin_memory=self.use_gpu and torch.cuda.device_count() > 1,
multiprocessing_context=mp.get_context(self.dataloader_mp_context),
**kwargs,
)
def build_dataloaders_for_current_phase(self):
"""Builds dataloader(s) for the current phase.
Deriving classes can override this method to support custom behavior, like
supporting multiple dataloaders in parallel.
"""
self.dataloader = self.build_dataloader_from_dataset(
self.datasets[self.phase_type]
)
def prepare_optimizer(self, optimizer, model, loss=None):
bn_params, other_params = split_batchnorm_params(model)
if loss is not None:
bn_params_loss, params_loss = split_batchnorm_params(loss)
bn_params = bn_params + bn_params_loss
other_params = other_params + params_loss
bn_schedulers = self.optimizer_schedulers.copy()
if not self.bn_weight_decay:
bn_schedulers["weight_decay"] = 0
param_groups = [{"params": other_params, **self.optimizer_schedulers}]
if len(bn_params) > 0:
param_groups.append({"params": bn_params, **bn_schedulers})
self.optimizer.set_param_groups(param_groups)
def prepare(self):
"""Prepares task for training, populates all derived attributes"""
self.phases = self._build_phases()
self.train = False if self.test_only else self.train
if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH:
self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model)
elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX:
sync_bn_process_group = apex.parallel.create_syncbn_process_group(
self.batch_norm_sync_group_size
)
self.base_model = apex.parallel.convert_syncbn_model(
self.base_model, process_group=sync_bn_process_group
)
# move the model and loss to the right device
if self.use_gpu:
self.base_model, self.base_loss = copy_model_to_gpu(
self.base_model, self.base_loss
)
else:
self.base_loss.cpu()
self.base_model.cpu()
if self.optimizer is not None:
self.prepare_optimizer(
optimizer=self.optimizer, model=self.base_model, loss=self.base_loss
)
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
# Initialize apex.amp. This updates the model and the PyTorch optimizer (
# if training, which is wrapped by the ClassyOptimizer in self.optimizer).
# Please note this must happen before loading the checkpoint, cause
# there's amp state to be restored.
if self.optimizer is None:
self.base_model = apex.amp.initialize(
self.base_model, optimizers=None, **self.amp_args
)
else:
self.base_model, self.optimizer.optimizer = apex.amp.initialize(
self.base_model, self.optimizer.optimizer, **self.amp_args
)
if self.simulated_global_batchsize is not None:
if self.simulated_global_batchsize % self.get_global_batchsize() != 0:
raise ValueError(
f"Global batch size ({self.get_global_batchsize()}) must divide "
f"simulated_global_batchsize ({self.simulated_global_batchsize})"
)
else:
self.simulated_global_batchsize = self.get_global_batchsize()
self.optimizer_period = (
self.simulated_global_batchsize // self.get_global_batchsize()
)
if self.optimizer_period > 1:
logging.info(
f"Using gradient accumulation with a period of {self.optimizer_period}"
)
if self.checkpoint_path:
self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path)
classy_state_dict = (
None
if self.checkpoint_dict is None
else self.checkpoint_dict["classy_state_dict"]
)
if classy_state_dict is not None:
state_load_success = update_classy_state(self, classy_state_dict)
assert (
state_load_success
), "Update classy state from checkpoint was unsuccessful."
self.init_distributed_data_parallel_model()
def init_distributed_data_parallel_model(self):
"""
Initialize
`torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/
docs/stable/nn.html#distributeddataparallel>`_.
Needed for distributed training. This is where a model should be wrapped by DDP.
"""
if not is_distributed_training_run():
return
assert (
self.distributed_model is None
), "init_ddp_non_elastic must only be called once"
broadcast_buffers = (
self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS
)
if self.use_sharded_ddp:
if not isinstance(self.optimizer, ZeRO):
raise ValueError(
"ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer"
)
from fairscale.nn.data_parallel import ShardedDataParallel
# Replace the original DDP wrap by the shard-aware ShardedDDP
self.distributed_model = ShardedDataParallel(
module=self.base_model,
sharded_optimizer=self.optimizer.optimizer,
broadcast_buffers=broadcast_buffers,
)
else:
self.distributed_model = init_distributed_data_parallel_model(
self.base_model,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
if self.fp16_grad_compress:
from torch.distributed.algorithms import ddp_comm_hooks
# FP16 hook is stateless and only takes a process group as the state.
# We use the default process group so we set the state to None.
process_group = None
self.distributed_model.register_comm_hook(
process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook
)
if (
isinstance(self.base_loss, ClassyLoss)
and self.base_loss.has_learned_parameters()
):
logging.info("Initializing distributed loss")
self.distributed_loss = init_distributed_data_parallel_model(
self.base_loss,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
@property
def where(self):
"""Returns the proportion of training that has completed. If in test
only mode, returns proportion of testing completed
Returned value is a float in the range [0, 1)
"""
current_step = self.num_updates / self.get_global_batchsize()
num_phases = (
self.get_total_test_phases()
if self.test_only
else self.get_total_training_phases()
)
if self.num_batches_per_phase <= 0:
raise RuntimeError("No batches to read. Is the dataset empty?")
num_steps = num_phases * self.num_batches_per_phase
where = current_step / num_steps
return where
def get_classy_state(self, deep_copy: bool = False):
"""Returns serialiable state of task
Args:
deep_copy: If true, does a deep copy of state before returning.
"""
optimizer_state = {}
if self.optimizer is not None:
optimizer_state = self.optimizer.get_classy_state()
classy_state_dict = {
"train": self.train,
"base_model": self.base_model.get_classy_state(),
"meters": [meter.get_classy_state() for meter in self.meters],
"optimizer": optimizer_state,
"phase_idx": self.phase_idx,
"train_phase_idx": self.train_phase_idx,
"num_updates": self.num_updates,
"losses": self.losses,
"hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks},
"loss": {},
}
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
classy_state_dict["train_dataset_iterator"] = self.datasets[
"train"
].get_classy_state()
if isinstance(self.base_loss, ClassyLoss):
classy_state_dict["loss"] = self.base_loss.get_classy_state()
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
classy_state_dict["amp"] = apex.amp.state_dict()
elif self.amp_grad_scaler is not None:
classy_state_dict["amp"] = self.amp_grad_scaler.state_dict()
if deep_copy:
classy_state_dict = copy.deepcopy(classy_state_dict)
return classy_state_dict
def set_classy_state(self, state):
"""Set task state
Args:
state: Dict containing state of a task
"""
self.train = False if self.test_only else state["train"]
self.base_model.set_classy_state(state["base_model"])
if self.test_only:
# if we're only testing, just need the state of the model to be updated
return
self.phase_idx = state["phase_idx"]
self.num_updates = state["num_updates"]
self.train_phase_idx = state["train_phase_idx"]
self.losses = state["losses"]
for meter, meter_state in zip(self.meters, state["meters"]):
meter.set_classy_state(meter_state)
if self.optimizer is not None:
self.optimizer.set_classy_state(state["optimizer"])
if state.get("loss") and isinstance(self.base_loss, ClassyLoss):
self.base_loss.set_classy_state(state["loss"])
if "amp" in state:
if self.amp_type == AmpType.APEX:
apex.amp.load_state_dict(state["amp"])
else:
self.amp_grad_scaler.load_state_dict(state["amp"])
for hook in self.hooks:
# we still want to be able to run when new hooks are added or old
# hooks are removed
if hook.name() in state["hooks"]:
hook.set_classy_state(state["hooks"][hook.name()])
else:
logging.warning(f"No state found for hook: {hook.name()}")
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
self.datasets["train"].set_classy_state(state.get("train_dataset_iterator"))
@staticmethod
def _is_checkpointable_dataset(dataset):
return hasattr(dataset, "get_classy_state") and hasattr(
dataset, "set_classy_state"
)
def eval_step(self):
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
with torch.no_grad(), torch_amp_context:
output = self.model(sample["input"])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.losses.append(loss.data.cpu().item())
self.update_meters(output, sample)
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def check_inf_nan(self, loss):
if loss == float("inf") or loss == float("-inf") or loss != loss:
raise FloatingPointError(f"Loss is infinity or NaN: {loss}")
def _should_do_step(self):
"""Tells if we will be performing an optimizer step.
Returns True always if there is no gradient accumulation. With gradient
accumulation returns True only when the gradients will be synchronized and we
will be performing an optimizer step.
"""
update_idx = self.num_updates // self.get_global_batchsize()
return (update_idx % self.optimizer_period) == self.optimizer_period - 1
def train_step(self):
"""Train step to be executed in train loop."""
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
# Copy sample to GPU
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
if self.mixup_transform is not None:
sample = self.mixup_transform(sample)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
# only sync with DDP when we need to perform an optimizer step
# an optimizer step can be skipped if gradient accumulation is enabled
do_step = self._should_do_step()
ctx_mgr_model = (
self.distributed_model.no_sync()
if self.distributed_model is not None and not do_step
else contextlib.suppress()
)
ctx_mgr_loss = (
self.distributed_loss.no_sync()
if self.distributed_loss is not None and not do_step
else contextlib.suppress()
)
with ctx_mgr_model, ctx_mgr_loss:
# Forward pass
with torch.enable_grad(), torch_amp_context:
output = self.compute_model(sample)
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.losses.append(loss.data.cpu().item())
self.update_meters(output, sample)
# Backwards pass + optimizer step
self.run_optimizer(local_loss)
self.num_updates += self.get_global_batchsize()
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def compute_model(self, sample):
return self.model(sample["input"])
def compute_loss(self, model_output, sample):
return self.loss(model_output, sample["target"])
def run_optimizer(self, loss):
"""Runs backwards pass and update the optimizer"""
self.check_inf_nan(loss)
# Gradient accumulation logic. We always set optimizer_period, even
# if gradient accumulation is disabled. Assumes all batches have the
# same size
update_idx = self.num_updates // self.get_global_batchsize()
do_zero_grad = (update_idx % self.optimizer_period) == 0
do_step = self._should_do_step()
if do_zero_grad:
self.optimizer.zero_grad()
if self.amp_type == AmpType.APEX:
with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.amp_type == AmpType.PYTORCH:
self.amp_grad_scaler.scale(loss).backward()
else:
loss.backward()
if do_step:
# Handle gradient accumulation related gradient rescaling
if self.optimizer_period != 1:
self._rescale_gradients(1 / self.optimizer_period)
# Clipping must happen after grad accumulation
if self.clip_grad_norm is not None:
self._clip_gradients(self.clip_grad_norm)
if self.amp_type == AmpType.PYTORCH:
# If using mixed precision, handle underflow-related scaling
# See https://pytorch.org/docs/stable/amp.html#gradient-scaling
# for context
self.amp_grad_scaler.step(self.optimizer, where=self.where)
self.amp_grad_scaler.update()
else:
self.optimizer.step(where=self.where)
def _rescale_gradients(self, scale):
for param in master_params(self.optimizer):
if param.grad is not None:
param.grad.data.mul_(scale)
def _clip_gradients(self, max_norm):
nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm)
def update_meters(self, model_output, sample):
target = sample["target"].detach().cpu()
model_output = model_output.detach().cpu()
# Update meters
for meter in self.meters:
meter.update(model_output, target, is_train=self.train)
def synchronize_losses(self):
"""Average the losses across the different replicas"""
# Average losses across nodes
losses_tensor = torch.tensor(self.losses)
synchronized_losses_tensor = all_reduce_mean(losses_tensor)
self.losses = synchronized_losses_tensor.tolist()
def advance_phase(self):
"""Performs bookkeeping / task updates between phases
Increments phase idx, resets meters, resets loss history,
resets counters, shuffles dataset, rebuilds iterators, and
sets the train / test state for phase.
"""
logging.debug("Advancing phase")
# Reset meters for next phase / epoch
for meter in self.meters:
meter.reset()
# Reset loss history for next epoch
self.losses = []
# Setup new phase
self.phase_idx += 1
phase = self.phases[self.phase_idx]
self.train = True if phase["train"] else False
if self.train:
self.train_phase_idx += 1
# Re-build dataloader & re-create iterator anytime membership changes.
self.build_dataloaders_for_current_phase()
self.create_data_iterators()
# Set up pytorch module in train vs eval mode, update optimizer.
self._set_model_train_mode()
def done_training(self):
"""Stop condition for training"""
return self.phase_idx + 1 >= len(self.phases)
def create_data_iterators(self):
"""Creates data iterator(s) for the current phase."""
# Delete iterator explicitly so that all dataloader processes
# are cleaned up.
del self.data_iterator
self.data_iterator = iter(self.dataloader)
def _set_model_train_mode(self):
"""Set train mode for model"""
phase = self.phases[self.phase_idx]
self.base_model.train(phase["train"])
self.base_loss.train(phase["train"])
if (
self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL
and not self.train
):
self._broadcast_buffers()
def _broadcast_buffers(self):
"""Explicitly synchronize buffers across all devices."""
if self.distributed_model is None:
return
buffers = list(self.base_model.buffers())
if len(buffers) > 0:
logging.info("Synchronizing buffers before evaluation.")
for buffer in buffers:
broadcast(buffer, 0, group=self.distributed_model.process_group)
# TODO: Functions below should be better abstracted into the dataloader
# abstraction
def get_batchsize_per_replica(self):
"""Return local replica's batchsize for dataset (e.g. batchsize per GPU)"""
return self.datasets[self.phase_type].get_batchsize_per_replica()
def get_global_batchsize(self):
"""Return global batchsize across all trainers"""
return self.datasets[self.phase_type].get_global_batchsize()
def on_start(self):
for hook in self.hooks:
hook.on_start(self)
def on_phase_start(self):
self.phase_start_time_total = time.perf_counter()
self.advance_phase()
for hook in self.hooks:
hook.on_phase_start(self)
self.phase_start_time_train = time.perf_counter()
def on_phase_end(self):
self.log_phase_end(self.phase_type)
if self.train:
self.optimizer.on_epoch(where=self.where)
logging.debug("Syncing losses on phase end...")
self.synchronize_losses()
logging.debug("...losses synced")
logging.debug("Syncing meters on phase end...")
for meter in self.meters:
meter.sync_state()
logging.debug("...meters synced")
barrier()
for hook in self.hooks:
hook.on_phase_end(self)
self.perf_log = []
self.log_phase_end(f"{self.phase_type}_total")
if hasattr(self.datasets[self.phase_type], "on_phase_end"):
self.datasets[self.phase_type].on_phase_end()
def on_end(self):
for hook in self.hooks:
hook.on_end(self)
def log_phase_end(self, tag):
start_time = (
self.phase_start_time_train
if tag == self.phase_type
else self.phase_start_time_total
)
phase_duration = time.perf_counter() - start_time
im_per_sec = (
self.get_global_batchsize() * self.num_batches_per_phase
) / phase_duration
self.perf_log.append(
{"tag": tag, "phase_idx": self.train_phase_idx, "im_per_sec": im_per_sec}
)
def __repr__(self):
if hasattr(self, "_config"):
config = json.dumps(self._config, indent=4)
return f"{super().__repr__()} initialized with config:\n{config}"
return super().__repr__()
| ClassyVision-main | classy_vision/tasks/classification_task.py |
#!/usr/bin/env python3 -u
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import socket
import subprocess
import sys
import time
from argparse import ArgumentParser, REMAINDER
from contextlib import closing
import ray
class NodeLaunchActor:
"""Ray actor. The code here will run in each node allocated by Ray."""
def run(self, master_addr, master_port, node_rank, dist_world_size, args):
processes = []
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = master_addr
current_env["MASTER_PORT"] = str(master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
if "OMP_NUM_THREADS" not in os.environ and args.nproc_per_node > 1:
current_env["OMP_NUM_THREADS"] = str(1)
print(
"*****************************************\n"
"Setting OMP_NUM_THREADS environment variable for each process "
"to be {} in default, to avoid your system being overloaded, "
"please further tune the variable for optimal performance in "
"your application as needed. \n"
"*****************************************".format(
current_env["OMP_NUM_THREADS"]
)
)
# Set the init_method and rank of the process for distributed training.
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * node_rank + local_rank
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# spawn the processes
with_python = not args.no_python
cmd = []
if with_python:
cmd = [sys.executable, "-u"]
if args.module:
cmd.append("-m")
else:
if not args.use_env:
raise ValueError(
"When using the '--no_python' flag, "
"you must also set the '--use_env' flag."
)
if args.module:
raise ValueError(
"Don't use both the '--no_python' flag"
"and the '--module' flag at the same time."
)
cmd.append(args.training_script)
if not args.use_env:
cmd.append("--local_rank={}".format(local_rank))
cmd.extend(args.training_script_args)
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(
returncode=process.returncode, cmd=cmd
)
def get_node_ip(self):
return ray.services.get_node_ip_address()
def find_free_port(self):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def wait_for_gpus(world_size, timeout_secs=3600):
n_gpus = int(ray.cluster_resources().get("GPU", 0))
elapsed_time = 0
while n_gpus < world_size:
logging.warning(
f"Not enough GPUs available ({n_gpus} available,"
f"need {world_size}), waiting 10 seconds"
)
time.sleep(10)
elapsed_time += 10
if elapsed_time > timeout_secs:
raise RuntimeError("Timeout: could not find enough GPUs")
n_gpus = int(ray.cluster_resources().get("GPU", 0))
def parse_args():
"""Helper function parsing the command line options.
@retval ArgumentParser
"""
parser = ArgumentParser(
description="Classy Vision distributed training launch "
"helper utility that will spawn up multiple nodes using Ray"
)
# Optional arguments for the launch helper
parser.add_argument(
"--nnodes",
type=int,
default=1,
help="The number of nodes to use for distributed training",
)
parser.add_argument(
"--nproc_per_node",
type=int,
default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.",
)
parser.add_argument(
"--use_env",
default=False,
action="store_true",
help="Use environment variable to pass "
"'local rank'."
"If set to True, the script will not pass "
"--local_rank as argument, and will instead set LOCAL_RANK.",
)
parser.add_argument(
"-m",
"--module",
default=False,
action="store_true",
help="Changes each process to interpret the launch script "
"as a python module, executing with the same behavior as"
"'python -m'.",
)
parser.add_argument(
"--no_python",
default=False,
action="store_true",
help='Do not prepend the training script with "python" - just exec '
"it directly. Useful when the script is not a Python script.",
)
# Ray-related arguments
group = parser.add_argument_group("Ray related arguments")
group.add_argument("--ray-address", default="auto", type=str)
# positional
parser.add_argument(
"training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script",
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
ray.init(address=args.ray_address)
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
wait_for_gpus(dist_world_size)
# Set up Ray distributed actors.
actor = ray.remote(num_cpus=1, num_gpus=args.nproc_per_node)(NodeLaunchActor)
workers = [actor.remote() for i in range(args.nnodes)]
# Set worker 0 as the master
master_addr = ray.get(workers[0].get_node_ip.remote())
master_port = ray.get(workers[0].find_free_port.remote())
unfinished = [
worker.run.remote(
master_addr=master_addr,
master_port=master_port,
node_rank=i,
dist_world_size=dist_world_size,
args=args,
)
for i, worker in enumerate(workers)
]
try:
while len(unfinished) > 0:
finished, unfinished = ray.wait(unfinished)
finished = ray.get(finished)
except Exception as inst:
logging.exception("An error occurred:")
ray.shutdown()
if __name__ == "__main__":
main()
| ClassyVision-main | classy_vision/distributed/launch_ray.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| ClassyVision-main | classy_vision/distributed/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Callable, Dict, Optional
import torch
from torchvision.datasets.hmdb51 import HMDB51
from . import register_dataset
from .classy_video_dataset import ClassyVideoDataset
from .transforms.util_video import build_video_field_transform_default
@register_dataset("hmdb51")
class HMDB51Dataset(ClassyVideoDataset):
"""`HMDB51 <http://serre-lab.clps.brown.edu/resource/
hmdb-a-large-human-motion-database/>`_ is an action recognition video dataset,
and it has 51 classes.
It is built on top of `HMDB51 <https://github.com/pytorch/vision/blob/
master/torchvision/datasets/hmdb51.py#L10/>`_ dataset class in TorchVision.
"""
def __init__(
self,
split: str,
batchsize_per_replica: int,
shuffle: bool,
transform: Callable,
num_samples: Optional[int],
frames_per_clip: int,
video_width: int,
video_height: int,
video_min_dimension: int,
audio_samples: int,
step_between_clips: int,
frame_rate: Optional[int],
clips_per_video: int,
video_dir: str,
splits_dir: str,
fold: int,
metadata_filepath: str,
):
"""The constructor of HMDB51Dataset.
Args:
split: dataset split which can be either "train" or "test"
batchsize_per_replica: batch size per model replica
shuffle: If true, shuffle the dataset
transform: a dict where transforms video and audio data
num_samples: if not None, it will subsample dataset
frames_per_clip: the number of frames in a video clip
video_width: rescaled video width. If 0, keep original width
video_height: rescaled video height. If 0, keep original height
video_min_dimension: rescale video so that min(height, width) =
``video_min_dimension``. If 0, keep original video resolution.
Note only one of (``video_width``, ``video_height``) and
(``video_min_dimension``) can be set
audio_samples: desired audio sample rate. If 0, keep original
audio sample rate.
step_between_clips: Number of frames between each clip.
frame_rate: desired video frame rate. If None, keep
orignal video frame rate.
clips_per_video: Number of clips to sample from each video
video_dir: path to video folder
splits_dir: path to dataset splitting file folder
fold: HMDB51 dataset has 3 folds. Valid values are 1, 2 and 3.
metadata_filepath: path to the dataset meta data
"""
# dataset metadata includes the path of video file, the pts of frames in
# the video and other meta info such as video fps, duration, audio sample rate.
# Users do not need to know the details of metadata. The computing, loading
# and saving logic of metata are all handled inside of the dataset.
# Given the "metadata_file" path, if such file exists, we load it as meta data.
# Otherwise, we compute the meta data, and save it at "metadata_file" path.
metadata = None
if os.path.exists(metadata_filepath):
metadata = HMDB51Dataset.load_metadata(
metadata_filepath, video_dir=video_dir, update_file_path=True
)
dataset = HMDB51(
video_dir,
splits_dir,
frames_per_clip,
step_between_clips=step_between_clips,
frame_rate=frame_rate,
_precomputed_metadata=metadata,
fold=fold,
train=(split == "train"),
num_workers=torch.get_num_threads() // 2, # heuristically use half threads
_video_width=video_width,
_video_height=video_height,
_video_min_dimension=video_min_dimension,
_audio_samples=audio_samples,
)
metadata = dataset.metadata
if metadata and not os.path.exists(metadata_filepath):
HMDB51Dataset.save_metadata(metadata, metadata_filepath)
super().__init__(
dataset,
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
clips_per_video,
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "HMDB51Dataset":
"""Instantiates a HMDB51Dataset from a configuration.
Args:
config: A configuration for a HMDB51Dataset.
See :func:`__init__` for parameters expected in the config.
Returns:
A HMDB51Dataset instance.
"""
required_args = ["split", "metadata_file", "video_dir", "splits_dir"]
assert all(
arg in config for arg in required_args
), f"The arguments {required_args} are all required."
split = config["split"]
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
frames_per_clip,
video_width,
video_height,
video_min_dimension,
audio_samples,
step_between_clips,
frame_rate,
clips_per_video,
) = cls.parse_config(config)
transform = build_video_field_transform_default(transform_config, split)
return cls(
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
frames_per_clip,
video_width,
video_height,
video_min_dimension,
audio_samples,
step_between_clips,
frame_rate,
clips_per_video,
config["video_dir"],
config["splits_dir"],
config["fold"]
if "fold" in config
else 1, # HMDB51 has 3 folds. Use fold 1 by default
config["metadata_file"],
)
| ClassyVision-main | classy_vision/dataset/classy_hmdb51.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Any, Iterable, Iterator
class DataloaderWrapper(ABC):
"""
Abstract class representing dataloader which wraps another dataloader.
Attribute accesses are passed to the wrapped dataloader.
"""
def __init__(self, dataloader: Iterable) -> None:
# we use self.__dict__ to set the attributes since the __setattr__ method
# is overridden
attributes = {"dataloader": dataloader, "_iter": None}
self.__dict__.update(attributes)
@abstractmethod
def __iter__(self) -> Iterator[Any]:
pass
@abstractmethod
def __next__(self) -> Any:
pass
def __getattr__(self, attr) -> Any:
"""
Pass the getattr call to the wrapped dataloader
"""
if attr in self.__dict__:
return self.__dict__[attr]
return getattr(self.dataloader, attr)
def __setattr__(self, attr, value) -> None:
"""
Pass the setattr call to the wrapped dataloader
"""
if attr in self.__dict__:
self.__dict__[attr] = value
else:
setattr(self.dataloader, attr, value)
| ClassyVision-main | classy_vision/dataset/dataloader_wrapper.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
from classy_vision.dataset import ClassyDataset, register_dataset
from classy_vision.dataset.transforms import build_transforms, ClassyTransform
from torchvision.datasets.cifar import CIFAR10, CIFAR100
class CIFARDataset(ClassyDataset):
_CIFAR_TYPE = None
def __init__(
self,
split: Optional[str],
batchsize_per_replica: int,
shuffle: bool,
transform: Optional[Union[ClassyTransform, Callable]],
num_samples: Optional[int],
root: str,
download: bool = None,
):
assert self._CIFAR_TYPE in [
"cifar10",
"cifar100",
], "CIFARDataset must be subclassed and a valid _CIFAR_TYPE provided"
if self._CIFAR_TYPE == "cifar10":
dataset = CIFAR10(root=root, train=(split == "train"), download=download)
if self._CIFAR_TYPE == "cifar100":
dataset = CIFAR100(root=root, train=(split == "train"), download=download)
super().__init__(
dataset, batchsize_per_replica, shuffle, transform, num_samples
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "CIFARDataset":
"""Instantiates a CIFARDataset from a configuration.
Args:
config: A configuration for a CIFARDataset.
See :func:`__init__` for parameters expected in the config.
Returns:
A CIFARDataset instance.
"""
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
) = cls.parse_config(config)
split = config.get("split")
root = config.get("root")
download = config.get("download")
transform = build_transforms(transform_config)
return cls(
split=split,
batchsize_per_replica=batchsize_per_replica,
shuffle=shuffle,
transform=transform,
num_samples=num_samples,
root=root,
download=download,
)
@register_dataset("classy_cifar10")
class CIFAR10Dataset(CIFARDataset):
_CIFAR_TYPE = "cifar10"
@register_dataset("classy_cifar100")
class CIFAR100Dataset(CIFARDataset):
_CIFAR_TYPE = "cifar100"
| ClassyVision-main | classy_vision/dataset/classy_cifar.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path
from typing import Any, Callable, Dict, List, Optional, Union
from torchvision import datasets, transforms
from . import ClassyDataset, register_dataset
from .core import ListDataset
from .transforms import build_transforms, ClassyTransform, TupleToMapTransform
def _is_torchvision_imagefolder(image_folder):
with os.scandir(image_folder) as folder_iter:
try:
dir_entry = next(folder_iter)
return dir_entry.is_dir()
except StopIteration:
raise OSError(f"Image folder {image_folder} is empty")
def _get_image_paths(image_folder):
return [f"{image_folder}/{file}" for file in os.listdir(image_folder)]
def _load_dataset(image_folder, image_files):
if image_folder is not None:
if _is_torchvision_imagefolder(image_folder):
return (
datasets.ImageFolder(image_folder),
TupleToMapTransform(list_of_map_keys=["input", "target"]),
)
else:
image_files = _get_image_paths(image_folder)
return ListDataset(image_files, metadata=None), None
@register_dataset("image_path")
class ImagePathDataset(ClassyDataset):
"""Dataset which reads images from a local filesystem. Implements ClassyDataset."""
def __init__(
self,
batchsize_per_replica: int,
shuffle: bool,
transform: Optional[Union[ClassyTransform, Callable]] = None,
num_samples: Optional[int] = None,
image_folder: Optional[str] = None,
image_files: Optional[List[str]] = None,
):
"""Constructor for ImagePathDataset.
Only one of image_folder or image_files should be passed to specify the images.
Args:
batchsize_per_replica: Positive integer indicating batch size for each
replica
shuffle: Whether we should shuffle between epochs
transform: Transform to be applied to each sample
num_samples: When set, this restricts the number of samples provided by
the dataset
image_folder: A directory with one of the following structures -
- A directory containing sub-directories with images for each target,
which is the format expected by
:class:`torchvision.datasets.ImageFolder` -
dog/xxx.png
dog/xxy.png
cat/123.png
cat/nsdf3.png
In this case, the targets are inferred from the sub-directories.
- A directory containing images -
123.png
xyz.png
In this case, the targets are not returned (useful for inference).
image_files: A list of image files -
[
"123.png",
"dog/xyz.png",
"/home/cat/aaa.png"
]
In this case, the targets are not returned (useful for inference).
"""
if (image_folder is None) == (image_files is None):
raise ValueError("One of image_folder and image_files should be provided")
dataset, preproc_transform = _load_dataset(image_folder, image_files)
super().__init__(
dataset, batchsize_per_replica, shuffle, transform, num_samples
)
# Some of the base datasets from _load_dataset have different
# sample formats, the preproc_transform should map them all to
# the dict {"input": img, "target": label} format
if preproc_transform is not None:
self.transform = transforms.Compose([preproc_transform, self.transform])
@classmethod
def from_config(cls, config: Dict[str, Any]):
"""Instantiates ImagePathDataset from a config.
Args:
config: A configuration for ImagePathDataset.
See :func:`__init__` for parameters expected in the config.
Returns:
An ImagePathDataset instance.
"""
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
) = cls.parse_config(config)
transform = build_transforms(transform_config)
return cls(
batchsize_per_replica,
shuffle,
transform,
num_samples,
image_folder=config.get("image_folder"),
image_files=config.get("image_files"),
)
| ClassyVision-main | classy_vision/dataset/image_path_dataset.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Callable, Dict, List, Optional
import torch
from torchvision.datasets.kinetics import Kinetics
from . import register_dataset
from .classy_video_dataset import ClassyVideoDataset
from .transforms.util_video import build_video_field_transform_default
@register_dataset("kinetics400")
class Kinetics400Dataset(ClassyVideoDataset):
"""`Kinetics-400 <https://deepmind.com/research/open-source/
open-source-datasets/kinetics/>`_ is an action recognition video dataset,
and it has 400 classes.
`Original publication <https://arxiv.org/pdf/1705.06950.pdf>`_
We assume videos are already trimmed to 10-second clip, and are stored in a
folder.
It is built on top of `Kinetics <https://github.com/pytorch/vision/blob/
master/torchvision/datasets/kinetics.py#L7/>`_ dataset class in TorchVision.
"""
def __init__(
self,
split: str,
batchsize_per_replica: int,
shuffle: bool,
transform: Callable,
num_samples: Optional[int],
frames_per_clip: int,
video_width: int,
video_height: int,
video_min_dimension: int,
audio_samples: int,
audio_channels: int,
step_between_clips: int,
frame_rate: Optional[int],
clips_per_video: int,
video_dir: str,
extensions: List[str],
metadata_filepath: str,
):
"""The constructor of Kinetics400Dataset.
Args:
split: dataset split which can be either "train" or "test"
batchsize_per_replica: batch size per model replica
shuffle: If true, shuffle the dataset
transform: a dict where transforms video and audio data
num_samples: if provided, it will subsample dataset
frames_per_clip: the No. of frames in a video clip
video_width: rescaled video width. If 0, keep original width
video_height: rescaled video height. If 0, keep original height
video_min_dimension: rescale video so that min(height, width) =
video_min_dimension. If 0, keep original video resolution. Note
only one of (video_width, video_height) and (video_min_dimension)
can be set
audio_samples: desired audio sample rate. If 0, keep original
audio sample rate
audio_channels: desire No. of audio channel. If 0, keep original audio
channels
step_between_clips: Number of frames between each clip.
frame_rate: desired video frame rate. If None, keep
orignal video frame rate.
clips_per_video: Number of clips to sample from each video
video_dir: path to video folder
extensions: A list of file extensions, such as "avi" and "mp4". Only
video matching those file extensions are added to the dataset
metadata_filepath: path to the dataset meta data
"""
# dataset metadata includes the path of video file, the pts of frames in
# the video and other meta info such as video fps, duration, audio sample rate.
# Users do not need to know the details of metadata. The computing, loading
# and saving logic of metata are all handled inside of the dataset.
# Given the "metadata_file" path, if such file exists, we load it as meta data.
# Otherwise, we compute the meta data, and save it at "metadata_file" path.
metadata = None
if os.path.exists(metadata_filepath):
metadata = Kinetics400Dataset.load_metadata(
metadata_filepath, video_dir=video_dir, update_file_path=True
)
dataset = Kinetics(
video_dir,
frames_per_clip,
num_classes="400",
step_between_clips=step_between_clips,
frame_rate=frame_rate,
_precomputed_metadata=metadata,
extensions=extensions,
num_workers=torch.get_num_threads() // 2, # heuristically use half threads
_video_width=video_width,
_video_height=video_height,
_video_min_dimension=video_min_dimension,
_audio_samples=audio_samples,
_audio_channels=audio_channels,
_legacy=True,
)
metadata = dataset.metadata
if metadata and not os.path.exists(metadata_filepath):
Kinetics400Dataset.save_metadata(metadata, metadata_filepath)
super().__init__(
dataset,
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
clips_per_video,
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "Kinetics400Dataset":
"""Instantiates a Kinetics400Dataset from a configuration.
Args:
config: A configuration for a Kinetics400Dataset.
See :func:`__init__` for parameters expected in the config.
Returns:
A Kinetics400Dataset instance.
"""
required_args = ["split", "metadata_file", "video_dir"]
assert all(
arg in config for arg in required_args
), f"The arguments {required_args} are all required."
split = config["split"]
audio_channels = config.get("audio_channels", 0)
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
frames_per_clip,
video_width,
video_height,
video_min_dimension,
audio_samples,
step_between_clips,
frame_rate,
clips_per_video,
) = cls.parse_config(config)
extensions = config.get("extensions", ("mp4"))
transform = build_video_field_transform_default(transform_config, split)
return cls(
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
frames_per_clip,
video_width,
video_height,
video_min_dimension,
audio_samples,
audio_channels,
step_between_clips,
frame_rate,
clips_per_video,
config["video_dir"],
extensions,
config["metadata_file"],
)
| ClassyVision-main | classy_vision/dataset/classy_kinetics400.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict
from classy_vision.generic.distributed_util import get_rank, get_world_size
from torch.utils.data.distributed import DistributedSampler
from . import register_dataset
from .classy_video_dataset import ClassyVideoDataset
from .core import RandomVideoDataset
from .transforms.util_video import build_video_field_transform_default
@register_dataset("synthetic_video")
class SyntheticVideoDataset(ClassyVideoDataset):
"""Classy Dataset which produces random synthetic video clips.
Useful for testing since the dataset is much faster to initialize and fetch samples
from, compared to real world datasets.
Note: Unlike :class:`SyntheticImageDataset`, this dataset generates targets
randomly, independent of the video clips.
"""
def __init__(
self,
num_classes: int,
split: str,
batchsize_per_replica: int,
shuffle: bool,
transform: Callable,
num_samples: int,
frames_per_clip: int,
video_width: int,
video_height: int,
audio_samples: int,
clips_per_video: int,
):
"""The constructor of SyntheticVideoDataset.
Args:
num_classes: Number of classes in the generated targets.
split: Split of dataset to use
batchsize_per_replica: batch size per model replica
shuffle: Whether we should shuffle between epochs
transform: Transform to be applied to each sample
num_samples: Number of samples to return
frames_per_clip: Number of frames in a video clip
video_width: Width of the video clip
video_height: Height of the video clip
audio_samples: Audio sample rate
clips_per_video: Number of clips per video
"""
dataset = RandomVideoDataset(
num_classes,
split,
num_samples,
frames_per_clip,
video_width,
video_height,
audio_samples,
clips_per_video,
)
super().__init__(
dataset,
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
clips_per_video,
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "SyntheticVideoDataset":
"""Instantiates a SyntheticVideoDataset from a configuration.
Args:
config: A configuration for a SyntheticVideoDataset.
See :func:`__init__` for parameters expected in the config.
Returns:
A SyntheticVideoDataset instance.
"""
split = config["split"]
num_classes = config["num_classes"]
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
frames_per_clip,
video_width,
video_height,
video_min_dimension,
audio_samples,
step_between_clips,
frame_rate,
clips_per_video,
) = cls.parse_config(config)
transform = build_video_field_transform_default(transform_config, split)
return cls(
num_classes,
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
frames_per_clip,
video_width,
video_height,
audio_samples,
clips_per_video,
)
@property
def video_clips(self):
raise NotImplementedError()
def _get_sampler(self, epoch):
world_size = get_world_size()
rank = get_rank()
sampler = DistributedSampler(
self, num_replicas=world_size, rank=rank, shuffle=self.shuffle
)
sampler.set_epoch(epoch)
return sampler
| ClassyVision-main | classy_vision/dataset/classy_synthetic_video.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import traceback
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from .classy_dataset import ClassyDataset
FILE_ROOT = Path(__file__).parent
DATASET_REGISTRY = {}
DATASET_REGISTRY_TB = {}
DATASET_CLASS_NAMES = set()
DATASET_CLASS_NAMES_TB = {}
def build_dataset(config, *args, **kwargs):
"""Builds a :class:`ClassyDataset` from a config.
This assumes a 'name' key in the config which is used to determine what
dataset class to instantiate. For instance, a config `{"name": "my_dataset",
"folder": "/data"}` will find a class that was registered as "my_dataset"
(see :func:`register_dataset`) and call .from_config on it."""
dataset = DATASET_REGISTRY[config["name"]].from_config(config, *args, **kwargs)
num_workers = config.get("num_workers")
if num_workers is not None:
dataset.set_num_workers(num_workers)
return dataset
def register_dataset(name, bypass_checks=False):
"""Registers a :class:`ClassyDataset` subclass.
This decorator allows Classy Vision to instantiate a subclass of
ClassyDataset from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyDataset subclass like this:
.. code-block:: python
@register_dataset("my_dataset")
class MyDataset(ClassyDataset):
...
To instantiate a dataset from a configuration file, see
:func:`build_dataset`."""
def register_dataset_cls(cls):
if not bypass_checks:
if name in DATASET_REGISTRY:
msg = "Cannot register duplicate dataset ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, DATASET_REGISTRY_TB[name]))
if not issubclass(cls, ClassyDataset):
raise ValueError(
"Dataset ({}: {}) must extend ClassyDataset".format(
name, cls.__name__
)
)
if cls.__name__ in DATASET_CLASS_NAMES:
msg = (
"Cannot register dataset with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, DATASET_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
DATASET_REGISTRY[name] = cls
DATASET_CLASS_NAMES.add(cls.__name__)
DATASET_REGISTRY_TB[name] = tb
DATASET_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_dataset_cls
# automatically import any Python files in the dataset/ directory
import_all_modules(FILE_ROOT, "classy_vision.dataset")
from .classy_cifar import CIFARDataset # isort:skip
from .classy_hmdb51 import HMDB51Dataset # isort:skip
from .classy_kinetics400 import Kinetics400Dataset # isort:skip
from .classy_synthetic_image import SyntheticImageDataset # isort:skip
from .classy_synthetic_image_streaming import ( # isort:skip
SyntheticImageStreamingDataset, # isort:skip
) # isort:skip
from .classy_synthetic_video import SyntheticVideoDataset # isort:skip
from .classy_ucf101 import UCF101Dataset # isort:skip
from .classy_video_dataset import ClassyVideoDataset # isort:skip
from .dataloader_async_gpu_wrapper import DataloaderAsyncGPUWrapper # isort:skip
from .dataloader_limit_wrapper import DataloaderLimitWrapper # isort:skip
from .dataloader_skip_none_wrapper import DataloaderSkipNoneWrapper # isort:skip
from .dataloader_wrapper import DataloaderWrapper # isort:skip
from .image_path_dataset import ImagePathDataset # isort:skip
__all__ = [
"CIFARDataset",
"ClassyDataset",
"ClassyVideoDataset",
"DataloaderLimitWrapper",
"DataloaderSkipNoneWrapper",
"DataloaderWrapper",
"DataloaderAsyncGPUWrapper",
"HMDB51Dataset",
"ImagePathDataset",
"Kinetics400Dataset",
"SyntheticImageDataset",
"SyntheticImageStreamingDataset",
"SyntheticVideoDataset",
"UCF101Dataset",
"build_dataset",
"register_dataset",
]
| ClassyVision-main | classy_vision/dataset/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torchvision.transforms as transforms
from classy_vision.dataset import register_dataset
from classy_vision.dataset.classy_dataset import ClassyDataset
from classy_vision.dataset.core import RandomImageBinaryClassDataset
from classy_vision.dataset.dataloader_async_gpu_wrapper import DataloaderAsyncGPUWrapper
from classy_vision.dataset.dataloader_limit_wrapper import DataloaderLimitWrapper
from classy_vision.dataset.transforms.util import (
build_field_transform_default_imagenet,
ImagenetConstants,
)
@register_dataset("synthetic_image_streaming")
class SyntheticImageStreamingDataset(ClassyDataset):
"""
Synthetic image dataset that behaves like a streaming dataset.
Requires a "num_samples" argument which decides the number of samples in the
phase. Also takes an optional "length" input which sets the length of the
dataset.
"""
def __init__(
self,
batchsize_per_replica,
shuffle,
transform,
num_samples,
crop_size,
class_ratio,
seed,
length=None,
async_gpu_copy: bool = False,
):
if length is None:
# If length not provided, set to be same as num_samples
length = num_samples
dataset = RandomImageBinaryClassDataset(crop_size, class_ratio, length, seed)
super().__init__(
dataset, batchsize_per_replica, shuffle, transform, num_samples
)
self.async_gpu_copy = async_gpu_copy
@classmethod
def from_config(cls, config):
# Parse the config
assert all(key in config for key in ["crop_size", "class_ratio", "seed"])
length = config.get("length")
crop_size = config["crop_size"]
class_ratio = config["class_ratio"]
seed = config["seed"]
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
) = cls.parse_config(config)
async_gpu_copy = config.get("async_gpu_copy", False)
# Build the transforms
default_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=ImagenetConstants.MEAN, std=ImagenetConstants.STD
),
]
)
transform = build_field_transform_default_imagenet(
transform_config, default_transform=default_transform
)
return cls(
batchsize_per_replica,
shuffle,
transform,
num_samples,
crop_size,
class_ratio,
seed,
length=length,
async_gpu_copy=async_gpu_copy,
)
def iterator(self, *args, **kwargs):
dataloader = DataloaderLimitWrapper(
super().iterator(*args, **kwargs),
self.num_samples // self.get_global_batchsize(),
)
if self.async_gpu_copy:
dataloader = DataloaderAsyncGPUWrapper(dataloader)
return dataloader
| ClassyVision-main | classy_vision/dataset/classy_synthetic_image_streaming.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Callable, Dict, List, Optional
import torchvision.transforms as transforms
from . import register_dataset
from .classy_dataset import ClassyDataset
from .core import RandomImageBinaryClassDataset
from .transforms import build_transforms
from .transforms.util import build_field_transform_default_imagenet, ImagenetConstants
@register_dataset("synthetic_image")
class SyntheticImageDataset(ClassyDataset):
"""Classy Dataset which produces random synthetic images with binary targets.
The underlying dataset sets targets based on the channels in the image, so users can
validate their setup by checking if they can get 100% accuracy on this dataset.
Useful for testing since the dataset is much faster to initialize and fetch samples
from, compared to real world datasets.
"""
def __init__(
self,
batchsize_per_replica: int,
shuffle: bool,
transform: Optional[Callable],
num_samples: int,
crop_size: int,
class_ratio: float,
seed: int,
) -> None:
"""
Args:
batchsize_per_replica: Positive integer indicating batch size for each
replica
shuffle: Whether we should shuffle between epochs
transform: When specified, transform to be applied to each sample
num_samples: Number of samples to return
crop_size: Image size, used for both height and width
class_ratio: Ratio of the distribution of target classes
seed: Seed used for image generation. Use the same seed to generate the same
set of samples.
split: When specified, split of dataset to use
"""
dataset = RandomImageBinaryClassDataset(
crop_size, class_ratio, num_samples, seed
)
super().__init__(
dataset, batchsize_per_replica, shuffle, transform, num_samples
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "SyntheticImageDataset":
"""Instantiates a SyntheticImageDataset from a configuration.
Args:
config: A configuration for a SyntheticImageDataset.
See :func:`__init__` for parameters expected in the config.
Returns:
A SyntheticImageDataset instance.
"""
assert all(key in config for key in ["crop_size", "class_ratio", "seed"])
crop_size = config["crop_size"]
class_ratio = config["class_ratio"]
seed = config["seed"]
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
) = cls.parse_config(config)
try:
transform = build_transforms(transform_config)
except Exception:
logging.error(
"We recently changed transform behavior"
" do you need to update your config?"
" See resnet50_synthetic_image_classy_config.json"
" as an example."
)
raise
return cls(
batchsize_per_replica,
shuffle,
transform,
num_samples,
crop_size,
class_ratio,
seed,
)
| ClassyVision-main | classy_vision/dataset/classy_synthetic_image.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Callable, Dict, Optional
import torch
from torchvision.datasets.ucf101 import UCF101
from . import register_dataset
from .classy_video_dataset import ClassyVideoDataset
from .transforms.util_video import build_video_field_transform_default
@register_dataset("ucf101")
class UCF101Dataset(ClassyVideoDataset):
"""`UCF101 <https://www.crcv.ucf.edu/data/UCF101.php/>`_ is an action
recognition video dataset, and it has 101 classes.
It is built on top of `UCF101 <https://github.com/pytorch/vision/blob/master
/torchvision/datasets/ucf101.py#L10>`_ dataset class in TorchVision.
"""
def __init__(
self,
split: str,
batchsize_per_replica: int,
shuffle: bool,
transform: Callable,
num_samples: Optional[int],
frames_per_clip: int,
video_width: int,
video_height: int,
video_min_dimension: int,
audio_samples: int,
step_between_clips: int,
frame_rate: Optional[int],
clips_per_video: int,
video_dir: str,
splits_dir: str,
fold: int,
metadata_filepath: str,
):
"""The constructor of UCF101Dataset.
Args:
split: dataset split which can be either "train" or "test"
batchsize_per_replica: batch size per model replica
shuffle: If true, shuffle the dataset
transform: a dict where transforms video and audio data
num_samples: if not None, it will subsample dataset
frames_per_clip: the No. of frames in a video clip
video_width: rescaled video width. If 0, keep original width
video_height: rescaled video height. If 0, keep original height
video_min_dimension: rescale video so that min(height, width) =
``video_min_dimension``. If 0, keep original video resolution.
Note only one of (``video_width``, ``video_height``)
and (``video_min_dimension``) can be set
audio_samples: desired audio sample rate. If 0, keep original
audio sample rate.
step_between_clips: Number of frames between each clip.
frame_rate: desired video frame rate. If None, keep original video
frame rate.
clips_per_video: Number of clips to sample from each video
video_dir: path to video folder
splits_dir: path to dataset splitting file folder
fold: UCF101 dataset has 3 folds. Valid values are 1, 2 and 3.
metadata_filepath: path to the dataset meta data
"""
# dataset metadata includes the path of video file, the pts of frames in
# the video and other meta info such as video fps, duration, audio sample rate.
# Users do not need to know the details of metadata. The computing, loading
# and saving logic of metata are all handled inside of the dataset.
# Given the "metadata_file" path, if such file exists, we load it as meta data.
# Otherwise, we compute the meta data, and save it at "metadata_file" path.
metadata = None
if os.path.exists(metadata_filepath):
metadata = UCF101Dataset.load_metadata(
metadata_filepath, video_dir=video_dir, update_file_path=True
)
dataset = UCF101(
video_dir,
splits_dir,
frames_per_clip,
step_between_clips=step_between_clips,
frame_rate=frame_rate,
_precomputed_metadata=metadata,
fold=fold,
train=True if split == "train" else False,
num_workers=torch.get_num_threads() // 2, # heuristically use half threads
_video_width=video_width,
_video_height=video_height,
_video_min_dimension=video_min_dimension,
_audio_samples=audio_samples,
)
metadata = dataset.metadata
if metadata and not os.path.exists(metadata_filepath):
UCF101Dataset.save_metadata(metadata, metadata_filepath)
super().__init__(
dataset,
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
clips_per_video,
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "UCF101Dataset":
"""Instantiates a UCF101Dataset from a configuration.
Args:
config: A configuration for a UCF101Dataset.
See :func:`__init__` for parameters expected in the config.
Returns:
A UCF101Dataset instance.
"""
required_args = ["split", "metadata_file", "video_dir", "splits_dir"]
assert all(
arg in config for arg in required_args
), f"The arguments {required_args} are all required."
split = config["split"]
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
frames_per_clip,
video_width,
video_height,
video_min_dimension,
audio_samples,
step_between_clips,
frame_rate,
clips_per_video,
) = cls.parse_config(config)
transform = build_video_field_transform_default(transform_config, split)
return cls(
split,
batchsize_per_replica,
shuffle,
transform,
num_samples,
frames_per_clip,
video_width,
video_height,
video_min_dimension,
audio_samples,
step_between_clips,
frame_rate,
clips_per_video,
config["video_dir"],
config["splits_dir"],
config["fold"]
if "fold" in config
else 1, # UCF101 has 3 folds. Use fold 1 by default
config["metadata_file"],
)
| ClassyVision-main | classy_vision/dataset/classy_ucf101.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Iterable, Iterator
from .dataloader_wrapper import DataloaderWrapper
class DataloaderLimitWrapper(DataloaderWrapper):
"""
Dataloader which wraps another dataloader and only returns a limited
number of items.
This is useful for Iterable datasets where the length of the datasets isn't known.
Such datasets can wrap their returned iterators with this class. See
:func:`SyntheticImageStreamingDataset.iterator` for an example.
Attribute accesses are passed to the wrapped dataloader.
"""
def __init__(
self, dataloader: Iterable, limit: int, wrap_around: bool = True
) -> None:
"""Constructor for DataloaderLimitWrapper.
Args:
dataloader: The dataloader to wrap around
limit: Specify the number of calls to the underlying dataloader. The wrapper
will raise a `StopIteration` after `limit` calls.
wrap_around: Whether to wrap around the original datatloader if the
dataloader is exhausted before `limit` calls.
Raises:
RuntimeError: If `wrap_around` is set to `False` and the underlying
dataloader is exhausted before `limit` calls.
"""
super().__init__(dataloader)
# we use self.__dict__ to set the attributes since the __setattr__ method
# is overridden
attributes = {"limit": limit, "wrap_around": wrap_around, "_count": None}
self.__dict__.update(attributes)
def __iter__(self) -> Iterator[Any]:
self._iter = iter(self.dataloader)
self._count = 0
return self
def __next__(self) -> Any:
if self._count >= self.limit:
raise StopIteration
self._count += 1
try:
return next(self._iter)
except StopIteration:
if self.wrap_around:
# create a new iterator to load data from the beginning
logging.info(
f"Wrapping around after {self._count - 1} calls. Limit: {self.limit}"
)
try:
self._iter = iter(self.dataloader)
return next(self._iter)
except StopIteration:
raise RuntimeError(
"Looks like the dataset is empty, "
"have you configured it properly?"
)
else:
raise RuntimeError(
f"StopIteration raised before {self.limit} items were returned"
)
def __len__(self) -> int:
return self.limit
| ClassyVision-main | classy_vision/dataset/dataloader_limit_wrapper.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Sequence, Union
from classy_vision.dataset.transforms import ClassyTransform
from classy_vision.generic.distributed_util import get_rank, get_world_size
from classy_vision.generic.util import is_pos_int, log_class_usage
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
def _return_true(_sample):
return True
DEFAULT_NUM_WORKERS = 4
class ClassyDataset:
"""
Class representing a dataset abstraction.
This class wraps a :class:`torch.utils.data.Dataset` via the `dataset` attribute
and configures the dataloaders needed to access the datasets. By default,
this class will use `DEFAULT_NUM_WORKERS` processes to load the data
(num_workers in :class:`torch.utils.data.DataLoader`).
Transforms which need to be applied to the data should be specified in this class.
ClassyDataset can be instantiated from a configuration file as well.
"""
def __init__(
self,
dataset: Sequence,
batchsize_per_replica: int,
shuffle: bool,
transform: Optional[Union[ClassyTransform, Callable]],
num_samples: Optional[int],
) -> None:
"""
Constructor for a ClassyDataset.
Args:
batchsize_per_replica: Positive integer indicating batch size for each
replica
shuffle: Whether to shuffle between epochs
transform: When set, transform to be applied to each sample
num_samples: When set, this restricts the number of samples provided by
the dataset
"""
# Asserts:
assert is_pos_int(
batchsize_per_replica
), "batchsize_per_replica must be a positive int"
assert isinstance(shuffle, bool), "shuffle must be a boolean"
assert num_samples is None or is_pos_int(
num_samples
), "num_samples must be a positive int or None"
# Assignments:
self.batchsize_per_replica = batchsize_per_replica
self.shuffle = shuffle
self.transform = transform
self.num_samples = num_samples
self.dataset = dataset
self.num_workers = DEFAULT_NUM_WORKERS
log_class_usage("Dataset", self.__class__)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassyDataset":
"""Instantiates a ClassyDataset from a configuration.
Args:
config: A configuration for the ClassyDataset.
Returns:
A ClassyDataset instance.
"""
raise NotImplementedError
@classmethod
def parse_config(cls, config: Dict[str, Any]):
"""
This function parses out common config options.
Args:
config: A dict with the following string keys -
| *batchsize_per_replica* (int): Must be a positive int, batch size
| for each replica
| *use_shuffle* (bool): Whether to enable shuffling for the dataset
| *num_samples* (int, optional): When set, restricts the number of
samples in a dataset
| *transforms*: list of tranform configurations to be applied in order
Returns:
A tuple containing the following variables -
| *transform_config*: Config for the dataset transform. Can be passed to
| :func:`transforms.build_transform`
| *batchsize_per_replica*: Batch size per replica
| *shuffle*: Whether we should shuffle between epochs
| *num_samples*: When set, restricts the number of samples in a dataset
"""
batchsize_per_replica = config.get("batchsize_per_replica")
shuffle = config.get("use_shuffle")
num_samples = config.get("num_samples")
transform_config = config.get("transforms")
return transform_config, batchsize_per_replica, shuffle, num_samples
def set_num_workers(self, num_workers):
self.num_workers = num_workers
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.dataset
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = self.dataset[idx]
if self.transform is None:
return sample
return self.transform(sample)
def __len__(self):
if self.dataset is None:
assert self.num_samples
return self.num_samples
assert self.num_samples is None or self.num_samples <= len(
self.dataset
), "Num samples mus be less than length of base dataset"
return len(self.dataset) if self.num_samples is None else self.num_samples
def _get_sampler(self, epoch: int):
"""
Return a :class:`torch.utils.data.sampler.Sampler` to sample the data.
This is used to distribute the data across the replicas. If shuffling
is enabled, every epoch will have a different shuffle.
Args:
epoch: The epoch being fetched.
Returns:
A sampler which tells the data loader which sample to load next.
"""
world_size = get_world_size()
rank = get_rank()
sampler = DistributedSampler(
self, num_replicas=world_size, rank=rank, shuffle=self.shuffle
)
sampler.set_epoch(epoch)
return sampler
def iterator(self, *args, **kwargs):
"""
Returns an iterable which can be used to iterate over the data.
Args:
shuffle_seed (int, optional): Seed for the shuffle
current_phase_id (int, optional): The epoch being fetched. Needed so that
each epoch has a different shuffle order
Returns:
An iterable over the data
"""
# TODO: Fix naming to be consistent (i.e. everyone uses epoch)
shuffle_seed = kwargs.get("shuffle_seed", 0)
assert isinstance(shuffle_seed, int), "Shuffle seed must be an int"
epoch = kwargs.get("current_phase_id", 0)
assert isinstance(epoch, int), "Epoch must be an int"
num_workers_override = kwargs.get("num_workers", self.num_workers)
if num_workers_override == 0:
# set the mp context to None to placate the PyTorch dataloader
kwargs["multiprocessing_context"] = None
offset_epoch = shuffle_seed + epoch
return DataLoader(
self,
batch_size=self.batchsize_per_replica,
num_workers=num_workers_override,
pin_memory=kwargs.get("pin_memory", False),
worker_init_fn=kwargs.get("worker_init_fn", None),
multiprocessing_context=kwargs.get("multiprocessing_context", None),
sampler=self._get_sampler(epoch=offset_epoch),
)
def get_batchsize_per_replica(self):
"""
Get the batch size per replica.
Returns:
The batch size for each replica.
"""
return self.batchsize_per_replica
def get_global_batchsize(self):
"""
Get the global batch size, combined over all the replicas.
Returns:
The overall batch size of the dataset.
"""
return self.get_batchsize_per_replica() * get_world_size()
| ClassyVision-main | classy_vision/dataset/classy_dataset.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Iterable, Iterator
from .dataloader_wrapper import DataloaderWrapper
class DataloaderSkipNoneWrapper(DataloaderWrapper):
"""
Dataloader which wraps another dataloader and skip `None` batch data.
Attribute accesses are passed to the wrapped dataloader.
"""
def __init__(self, dataloader: Iterable) -> None:
super().__init__(dataloader)
def __iter__(self) -> Iterator[Any]:
self._iter = iter(self.dataloader)
return self
def __next__(self) -> Any:
# we may get `None` batch data when all the images/videos in the batch
# are corrupted. In such case, we keep getting the next batch until
# meeting a good batch.
next_batch = None
while next_batch is None:
next_batch = next(self._iter)
return next_batch
| ClassyVision-main | classy_vision/dataset/dataloader_skip_none_wrapper.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from typing import Any, Callable, Dict, Optional
import torch
from classy_vision.generic.distributed_util import get_rank, get_world_size
from torch.utils.data import Sampler
from torchvision import get_video_backend
from torchvision.datasets.samplers.clip_sampler import (
DistributedSampler,
RandomClipSampler,
UniformClipSampler,
)
from .classy_dataset import ClassyDataset
class MaxLengthClipSampler(Sampler):
"""MaxLengthClipSampler is a thin wrapper on top of clip samplers in TorchVision.
It takes as input a TorchVision clip sampler, and an optional argument
`num_samples` to limit the number of samples.
"""
def __init__(self, clip_sampler, num_samples=None):
"""The constructor method of MaxLengthClipSampler.
Args:
clip_sampler: clip sampler without a limit on the total number of clips
it can sample, such as RandomClipSampler and UniformClipSampler.
num_samples: if provided, it denotes the maximal number of clips the sampler
will return
"""
self.clip_sampler = clip_sampler
self.num_samples = num_samples
def __iter__(self):
num_samples = len(self)
n = 0
for clip in self.clip_sampler:
if n < num_samples:
yield clip
n += 1
else:
break
def __len__(self):
full_size = len(self.clip_sampler)
if self.num_samples is None:
return full_size
return min(full_size, self.num_samples)
class ClassyVideoDataset(ClassyDataset):
"""Interface specifying what a ClassyVision video dataset is expected to provide.
This dataset considers every video as a collection of video clips of fixed size,
specified by ``frames_per_clip``, where the step in frames between each clip
is given by ``step_between_clips``. It uses a clip sampler to sample
a specified number of clips (``clips_per_video``) from each video.
For training set, a random clip sampler is used to
sample a small number of clips (e.g. 1) from each video
For testing set, a uniform clip sampler is used to evenly sample a large
number of clips (e.g. 10) from the video.
To give an example, for 2 videos with 10 and 15 frames respectively,
if ``frames_per_clip=5`` and ``step_between_clips=5``, the dataset size
will be (2 + 3) = 5, where the first two elements will come from video 1,
and the next three elements from video 2. Note that we drop clips which do
not have exactly ``frames_per_clip`` elements, so not all frames in a video
may be present.
"""
def __init__(
self,
dataset: Any,
split: str,
batchsize_per_replica: int,
shuffle: bool,
transform: Callable,
num_samples: Optional[int],
clips_per_video: int,
):
"""The constructor method of ClassyVideoDataset.
Args:
dataset: the underlying video dataset from either TorchVision or other
source. It should have an attribute *video_clips* of type
`torchvision.datasets.video_utils.VideoClips <https://github.com/
pytorch/vision/blob/master/torchvision/datasets/
video_utils.py#L46/>`_
split: dataset split. Must be either "train" or "test"
batchsize_per_replica: batch size per model replica
shuffle: If true, shuffle video clips.
transform: callable function to transform video clip sample from
ClassyVideoDataset
num_samples: If provided, return at most `num_samples` video clips
clips_per_video: The number of clips sampled from each video
"""
super(ClassyVideoDataset, self).__init__(
dataset, batchsize_per_replica, shuffle, transform, num_samples
)
# Assignments:
self.clips_per_video = clips_per_video
self.split = split
self.video_backend = get_video_backend()
@classmethod
def parse_config(cls, config: Dict[str, Any]):
"""Parse config to prepare arguments needed by the class constructor."""
assert "frames_per_clip" in config, "frames_per_clip must be set"
video_width = config.get("video_width", 0)
video_height = config.get("video_height", 0)
video_min_dimension = config.get("video_min_dimension", 0)
audio_samples = config.get("audio_samples", 0)
step_between_clips = config.get("step_between_clips", 1)
frame_rate = config.get("frame_rate", None)
clips_per_video = config.get("clips_per_video", 1)
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
) = super().parse_config(config)
if not config["split"] == "train":
# At testing time, we do not crop frames but conduct a FCN-style evaluation.
# Video spatial resolution can vary from video to video. So we test one
# video at a time, and NO. of clips in a minibatch should be equal to
# No. of clips sampled from a video
if not batchsize_per_replica == clips_per_video:
logging.warning(
f"For testing, batchsize per replica ({batchsize_per_replica})"
+ f"should be equal to clips_per_video ({clips_per_video})"
)
return (
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
config["frames_per_clip"],
video_width,
video_height,
video_min_dimension,
audio_samples,
step_between_clips,
frame_rate,
clips_per_video,
)
@classmethod
def load_metadata(
cls,
filepath: str,
video_dir: Optional[str] = None,
update_file_path: bool = False,
) -> Dict[str, Any]:
"""Load pre-computed video dataset meta data.
Video dataset meta data computation takes minutes on small dataset and hours
on large dataset, and thus is time-consuming. However, it only needs to be
computed once, and can be saved into a file via :func:`save_metadata`.
The format of meta data is defined in `TorchVision <https://github.com/
pytorch/vision/blob/master/torchvision/datasets/video_utils.py#L131/>`_.
For each video, meta data contains the video file path, presentation
timestamps of all video frames, and video fps.
Args:
filepath: file path of pre-computed meta data
video_dir: If provided, the folder where video files are stored.
update_file_path: If true, replace the directory part of video file path
in meta data with the actual video directory provided in `video_dir`.
This is necessary for successsfully reusing pre-computed meta data
when video directory has been moved and is no longer consitent
with the full video file path saved in the meta data.
"""
metadata = torch.load(filepath)
if video_dir is not None and update_file_path:
# video path in meta data can be computed in a different root video folder
# If we use a different root video folder, we need to update the video paths
assert os.path.exists(video_dir), "folder does not exist: %s" % video_dir
for idx, video_path in enumerate(metadata["video_paths"]):
# video path template is $VIDEO_DIR/$CLASS_NAME/$VIDEO_FILE
dirname, filename = os.path.split(video_path)
_, class_name = os.path.split(dirname)
metadata["video_paths"][idx] = os.path.join(
video_dir, class_name, filename
)
return metadata
@classmethod
def save_metadata(cls, metadata: Dict[str, Any], filepath: str):
"""Save dataset meta data into a file.
Args:
metadata: dataset meta data, which contains video meta infomration, such
as video file path, video fps, video frame timestamp in each video.
For the format of dataset meta data, check the `TorchVision
documentation <https://github.com/pytorch/vision/blob/master/
torchvision/datasets/video_utils.py#L132-L137/>`_.
filepath: file path where the meta data will be saved
"""
filedir = os.path.dirname(filepath)
if len(filedir) > 0:
# When filepath includes the absoluate/relative directory, we create the
# directory if it does not exist yet
try:
os.makedirs(filedir, exist_ok=True)
logging.info(f"Save metadata to file: {filedir}")
except Exception as err:
logging.warning(f"Fail to create folder: {filedir}")
raise err
try:
torch.save(metadata, filepath)
except ValueError:
logging.warning(f"Fail to save metadata to file: {filepath}")
@property
def video_clips(self):
"""Attribute video_clips.
It is used in ``_get_sampler`` method. Its data type should be
`torchvision.datasets.video_utils.VideoClips <https://github.com/
pytorch/vision/blob/master/torchvision/datasets/video_utils.py#L46/>`_.
"""
return self.dataset.video_clips
def _get_sampler(self, epoch) -> "DistributedSampler":
if self.split == "train":
# For video model training, we don't necessarily want to use all possible
# clips in the video in one training epoch. More often, we randomly
# sample at most N clips per training video. In practice, N is often 1
clip_sampler = RandomClipSampler(self.video_clips, self.clips_per_video)
else:
# For video model testing, we sample N evenly spaced clips per test
# video. We will simply average predictions over them
clip_sampler = UniformClipSampler(self.video_clips, self.clips_per_video)
clip_sampler = MaxLengthClipSampler(clip_sampler, num_samples=self.num_samples)
world_size = get_world_size()
rank = get_rank()
sampler = DistributedSampler(
clip_sampler,
num_replicas=world_size,
rank=rank,
shuffle=self.shuffle,
group_size=self.clips_per_video,
)
sampler.set_epoch(epoch)
return sampler
def _worker_init_fn(self, worker_id):
# we need to set video backend in the worker process explicitly
# because the global variable `_video_backend` in TorchVision will
# always start with the default value `pyav` when multiprocessing
# context other than `fork` is used, and it won't inherit the value of
# `_video_backend` in the main process
from torchvision import set_video_backend
set_video_backend(self.video_backend)
def iterator(self, *args, **kwargs):
kwargs["worker_init_fn"] = self._worker_init_fn
return super(ClassyVideoDataset, self).iterator(*args, **kwargs)
| ClassyVision-main | classy_vision/dataset/classy_video_dataset.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Iterable, Iterator
import torch
from classy_vision.generic.util import recursive_copy_to_gpu
from .dataloader_wrapper import DataloaderWrapper
# See Nvidia's data_prefetcher for reference
# https://github.com/NVIDIA/apex/blob/2ca894da7be755711cbbdf56c74bb7904bfd8417/examples/imagenet/main_amp.py#L264
class DataloaderAsyncGPUWrapper(DataloaderWrapper):
"""
Dataloader which wraps another dataloader, and moves the data to GPU asynchronously.
At most one batch is pre-emptively copied (per worker).
credits: @vini, nvidia Apex
"""
def __init__(self, dataloader: Iterable) -> None:
assert torch.cuda.is_available(), "This Dataloader wrapper needs a CUDA setup"
super().__init__(dataloader)
self.cache = None
self.cache_next = None
self.stream = torch.cuda.Stream()
self._iter = None
def __iter__(self) -> Iterator[Any]:
# The wrapped dataloader may have been changed in place
# rebuild a new iterator and prefetch
self._iter = iter(self.dataloader)
self.preload()
return self
def preload(self):
# Get data from the iterator
try:
self.cache_next = next(self._iter)
# Copy to the device, in a parallel CUDA stream
with torch.cuda.stream(self.stream):
self.cache = recursive_copy_to_gpu(self.cache_next, non_blocking=True)
except StopIteration:
self.cache = None
return
def __next__(self) -> Any:
# Make sure that future work in the main stream (training loop for instance)
# waits for the dependent self.stream to be done
torch.cuda.current_stream().wait_stream(self.stream)
result = self.cache
if self.cache is None:
raise StopIteration
# Pre-load the next sample
self.preload()
return result
def __len__(self) -> int:
return len(self.dataloader)
| ClassyVision-main | classy_vision/dataset/dataloader_async_gpu_wrapper.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
import numpy as np
from PIL import Image
from ...generic.util import numpy_seed
class SampleType(Enum):
DICT = "dict"
TUPLE = "tuple"
LIST = "list"
def _get_typed_sample(input, target, id, sample_type):
if sample_type == SampleType.DICT:
return {"input": input, "target": target, "id": id}
elif sample_type == SampleType.TUPLE:
return (input, target)
elif sample_type == SampleType.LIST:
return [input, target]
else:
raise TypeError("Provided sample_type is not dict, list, tuple")
class RandomImageDataset:
def __init__(
self,
crop_size,
num_channels,
num_classes,
num_samples,
seed,
sample_type=SampleType.DICT,
):
self.crop_size = crop_size
self.num_channels = num_channels
self.num_classes = num_classes
self.num_samples = num_samples
self.seed = seed
self.sample_type = sample_type
def __getitem__(self, idx):
with numpy_seed(self.seed + idx):
input = Image.fromarray(
(
np.random.standard_normal(
[self.crop_size, self.crop_size, self.num_channels]
)
* 255
).astype(np.uint8)
)
target = np.random.randint(self.num_classes)
return _get_typed_sample(input, target, idx, self.sample_type)
def __len__(self):
return self.num_samples
class RandomImageBinaryClassDataset:
def __init__(
self, crop_size, class_ratio, num_samples, seed, sample_type=SampleType.DICT
):
self.crop_size = crop_size
# User Defined Class Imbalace Ratio
self.class_ratio = class_ratio
self.num_samples = num_samples
self.seed = seed
self.sample_type = sample_type
def __getitem__(self, idx):
with numpy_seed(self.seed + idx):
class_id = int(np.random.random() < self.class_ratio)
image = np.zeros((self.crop_size, self.crop_size, 3))
image[:, :, class_id] = np.random.random([self.crop_size, self.crop_size])
image[:, :, 2] = np.random.random([self.crop_size, self.crop_size])
input = Image.fromarray((image * 255).astype(np.uint8))
target = class_id
return _get_typed_sample(input, target, idx, self.sample_type)
def __len__(self):
return self.num_samples
| ClassyVision-main | classy_vision/dataset/core/random_image_datasets.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .list_dataset import ListDataset
from .random_image_datasets import RandomImageBinaryClassDataset, RandomImageDataset
from .random_video_datasets import RandomVideoDataset
__all__ = [
"ListDataset",
"RandomImageBinaryClassDataset",
"RandomImageDataset",
"RandomVideoDataset",
]
| ClassyVision-main | classy_vision/dataset/core/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torchvision.datasets.folder import default_loader
class ListDataset:
"""Dataset that loads data using a list of items, a corresponding loader,
and a list of metadata. The default loader is an image file loader so this
dataset can be used directly with a list of image files.
You can use it without metadata if you set metadata parameter to None
"""
def __init__(self, files, metadata=None, loader=default_loader):
"""
metadata (List[Dict[Type]] or List[Type], Optional):
metadata to be added to each sample.
The Type can be anything that pytorch default_collate can handle.
If Type is tensor, make sure that the tensors are of same dimension.
"""
if metadata is not None:
assert isinstance(metadata, list), "metadata should be a list"
assert len(files) == len(metadata)
assert len(files) > 0, "Empty ListDataset is not allowed"
if not isinstance(metadata[0], dict):
metadata = [{"target": target} for target in metadata]
self.files = files
self.metadata = metadata
self.loader = loader
def __getitem__(self, idx):
assert idx >= 0 and idx < len(self)
img = self.loader(self.files[idx])
item = {"input": img}
if self.metadata is not None:
item.update(self.metadata[idx])
return item
def __len__(self):
return len(self.files)
| ClassyVision-main | classy_vision/dataset/core/list_dataset.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ...generic.util import torch_seed
class RandomVideoDataset:
def __init__(
self,
num_classes,
split,
num_samples,
frames_per_clip,
video_width,
video_height,
audio_samples,
clips_per_video,
seed=10,
):
self.num_classes = num_classes
self.split = split
# video config
self.video_channels = 3
self.num_samples = num_samples
self.frames_per_clip = frames_per_clip
self.video_width = video_width
self.video_height = video_height
# audio config
self.audio_samples = audio_samples
self.clips_per_video = clips_per_video
# misc config
self.seed = seed
def __getitem__(self, idx):
if self.split == "train":
# assume we only sample 1 clip from each training video
target_seed_offset = idx
else:
# for video model testing, clips from the same video share the same
# target label
target_seed_offset = idx // self.clips_per_video
with torch_seed(self.seed + target_seed_offset):
target = torch.randint(0, self.num_classes, (1,)).item()
with torch_seed(self.seed + idx):
return {
"input": {
"video": torch.randint(
0,
256,
(
self.frames_per_clip,
self.video_height,
self.video_width,
self.video_channels,
),
dtype=torch.uint8,
),
"audio": torch.rand((self.audio_samples, 1), dtype=torch.float),
},
"target": target,
}
def __len__(self):
return self.num_samples
| ClassyVision-main | classy_vision/dataset/core/random_video_datasets.py |
#!/usr/bin/env python3
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2020 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code modified from
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/mixup.py
import collections.abc as abc
import math
from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
from torch.distributions.beta import Beta
def one_hot(x, num_classes, on_value=1.0, off_value=0.0, device="cuda"):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(
1, x, on_value
)
def mixup_target(target, num_classes, lam=1.0, smoothing=0.0):
if target.ndim == 1:
off_value = smoothing / num_classes
on_value = 1.0 - smoothing + off_value
y1 = one_hot(
target,
num_classes,
on_value=on_value,
off_value=off_value,
device=target.device,
)
y2 = one_hot(
target.flip(0),
num_classes,
on_value=on_value,
off_value=off_value,
device=target.device,
)
else:
# when 2D one-hot/multi-hot target tensor is already provided, skip label
# smoothing
assert target.ndim == 2, "target tensor shape must be 1D or 2D"
y1 = target
y2 = target.flip(0)
return y1 * lam + y2 * (1.0 - lam)
def rand_bbox(img_shape, lam, margin=0.0, count=1):
"""Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = math.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = torch.randint(0 + margin_y, img_h - margin_y, (count,))
cx = torch.randint(0 + margin_x, img_w - margin_x, (count,))
yl = torch.clamp(cy - cut_h // 2, 0, img_h)
yh = torch.clamp(cy + cut_h // 2, 0, img_h)
xl = torch.clamp(cx - cut_w // 2, 0, img_w)
xh = torch.clamp(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
def rand_bbox_minmax(img_shape, minmax, count=1):
"""Min-Max CutMix bounding-box
Inspired by Darknet cutmix impl, generates a random rectangular bbox
based on min/max percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
minmax (tuple or list): Min and max bbox ratios (as percent of image size)
count (int): Number of bbox to generate
"""
assert len(minmax) == 2
img_h, img_w = img_shape[-2:]
cut_h = np.random.randint(
int(img_h * minmax[0]), int(img_h * minmax[1]), size=count
)
cut_w = np.random.randint(
int(img_w * minmax[0]), int(img_w * minmax[1]), size=count
)
# torch's randint does not accept a vector of max values
yl = np.random.randint(0, img_h - cut_h, size=count)
xl = np.random.randint(0, img_w - cut_w, size=count)
yu = yl + cut_h
xu = xl + cut_w
return [torch.from_numpy(a) for a in [yl, yu, xl, xu]]
def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=1):
"""Generate bbox and apply lambda correction."""
if ratio_minmax is not None:
yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)
else:
yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)
if correct_lam or ratio_minmax is not None:
bbox_area = (yu - yl) * (xu - xl)
lam = (1.0 - bbox_area / float(img_shape[-2] * img_shape[-1])).item()
return (yl, yu, xl, xu), lam
def _recursive_mixup(sample: Any, coeff: float):
if isinstance(sample, (tuple, list)):
mixed_sample = []
for s in sample:
mixed_sample.append(_recursive_mixup(s, coeff))
return mixed_sample if isinstance(sample, list) else tuple(mixed_sample)
elif isinstance(sample, abc.Mapping):
mixed_sample = {}
for key, val in sample.items():
mixed_sample[key] = _recursive_mixup(val, coeff)
return mixed_sample
else:
assert torch.is_tensor(sample), "sample is expected to be a pytorch tensor"
# Assume training data is at least 3D tensor (i.e. 1D data). We only
# mixup content data tensor (e.g. video clip, audio spectrogram), and skip
# other tensors, such as frame_idx and timestamp in video clip samples.
if sample.ndim >= 3:
sample = coeff * sample + (1.0 - coeff) * sample.flip(0)
return sample
class MixupTransform:
"""
This implements the mixup data augmentation in the paper
"mixup: Beyond Empirical Risk Minimization" (https://arxiv.org/abs/1710.09412)
"""
def __init__(
self,
mixup_alpha: float,
num_classes: Optional[int] = None,
cutmix_alpha: float = 0,
cutmix_minmax: Optional[Tuple[float]] = None,
mix_prob: float = 1.0,
switch_prob: float = 0.5,
mode: str = "batch",
correct_lam: bool = True,
label_smoothing: float = 0.0,
):
"""
Args:
mixup_alpha: the hyperparameter of Beta distribution used to sample mixup
coefficient.
num_classes: number of classes in the dataset.
cutmix_alpha: cutmix alpha value, cutmix is active if > 0.
cutmix_minmax cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.
mix_prob: probability of applying mixup or cutmix per batch or element
switch_prob: probability of switching to cutmix instead of mixup when both are active
mode: how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)
correct_lam: apply lambda correction when cutmix bbox clipped by image borders.
label_smoothing: apply label smoothing to the mixed target tensor
"""
self.mixup_alpha = mixup_alpha
self.num_classes = num_classes
self.cutmix_alpha = cutmix_alpha
self.cutmix_minmax = cutmix_minmax
self.mix_prob = mix_prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.mode = mode
self.correct_lam = correct_lam
self.label_smoothing = label_smoothing
def _params_per_elem(self, batch_size):
lam = torch.ones(batch_size)
use_cutmix = torch.zeros(batch_size, dtype=torch.bool)
if self.mixup_alpha > 0.0 and self.cutmix_alpha > 0.0:
use_cutmix = torch.rand(batch_size) < self.switch_prob
lam_mix = torch.where(
use_cutmix,
Beta(self.cutmix_alpha, self.cutmix_alpha).sample((batch_size,)),
Beta(self.mixup_alpha, self.mixup_alpha).sample((batch_size,)),
)
elif self.mixup_alpha > 0.0:
lam_mix = Beta(self.mixup_alpha, self.mixup_alpha).sample((batch_size,))
elif self.cutmix_alpha > 0.0:
use_cutmix = torch.ones(batch_size, dtype=torch.bool)
lam_mix = Beta(self.cutmix_alpha, self.cutmix_alpha).sample((batch_size,))
else:
raise ValueError(
"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
)
lam = torch.where(torch.rand(batch_size) < self.mix_prob, lam_mix, lam)
return lam, use_cutmix
def _params_per_batch(self):
lam = 1.0
use_cutmix = False
if torch.rand(1) < self.mix_prob:
if self.mixup_alpha > 0.0 and self.cutmix_alpha > 0.0:
use_cutmix = torch.rand(1) < self.switch_prob
lam_mix = (
Beta(self.cutmix_alpha, self.cutmix_alpha).sample()
if use_cutmix
else Beta(self.mixup_alpha, self.mixup_alpha).sample()
)
elif self.mixup_alpha > 0.0:
lam_mix = Beta(self.mixup_alpha, self.mixup_alpha).sample()
elif self.cutmix_alpha > 0.0:
use_cutmix = True
lam_mix = Beta(self.cutmix_alpha, self.cutmix_alpha).sample()
else:
raise ValueError(
"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
)
lam = float(lam_mix)
return lam, use_cutmix
def _mix_elem(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.0:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape,
lam,
ratio_minmax=self.cutmix_minmax,
correct_lam=self.correct_lam,
)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
return lam_batch.to(x).unsqueeze(1)
def _mix_pair(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.0:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape,
lam,
ratio_minmax=self.cutmix_minmax,
correct_lam=self.correct_lam,
)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
x[j] = x[j] * lam + x_orig[i] * (1 - lam)
lam_batch = torch.cat((lam_batch, lam_batch.flip(0)))
return lam_batch.to(x).unsqueeze(1)
def _mix_batch(self, x):
lam, use_cutmix = self._params_per_batch()
if lam == 1.0:
return 1.0
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x.shape,
lam,
ratio_minmax=self.cutmix_minmax,
correct_lam=self.correct_lam,
)
x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]
else:
x_flipped = x.flip(0).mul_(1.0 - lam)
x.mul_(lam).add_(x_flipped)
return lam
def __call__(self, sample: Dict[str, Any]) -> Dict[str, Any]:
"""
Args:
sample: the batch data.
"""
assert len(sample["target"]) % 2 == 0, "Batch size should be even"
if torch.is_tensor(sample["input"]) and sample["input"].ndim == 4:
# This is the simple case of image data batch (i.e. 4D tensor).
# We support more advanved joint mixup and cutmix in this case.
if self.mode == "elem":
lam = self._mix_elem(sample["input"])
elif self.mode == "pair":
lam = self._mix_pair(sample["input"])
else:
lam = self._mix_batch(sample["input"])
sample["target"] = mixup_target(
sample["target"],
self.num_classes,
lam=lam,
smoothing=self.label_smoothing,
)
else:
# This is the complex case of video data batch (i.e. 5D tensor) or more complex
# data batch. We only support mixup augmentation in batch mode.
if sample["target"].ndim == 1:
assert (
self.num_classes is not None
), "num_classes is expected for 1D target"
off_value = self.label_smoothing / self.num_classes
on_value = 1.0 - self.label_smoothing + off_value
sample["target"] = one_hot(
sample["target"],
self.num_classes,
on_value=on_value,
off_value=off_value,
device=sample["target"].device,
)
else:
assert (
sample["target"].ndim == 2
), "target tensor shape must be 1D or 2D"
c = Beta(self.mixup_alpha, self.mixup_alpha).sample()
sample["target"] = c * sample["target"] + (1.0 - c) * sample["target"].flip(
0
)
sample["input"] = _recursive_mixup(sample["input"], c)
return sample
| ClassyVision-main | classy_vision/dataset/transforms/mixup.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torchvision.transforms as transforms
from . import build_transforms, ClassyTransform, register_transform
class ImagenetConstants:
"""Constant variables related to the image classification.
MEAN: often used to be subtracted from image RGB value. Computed on ImageNet.
STD: often used to divide the image RGB value after mean centering. Computed
on ImageNet.
CROP_SIZE: the size of image cropping which is often the input to deep network.
RESIZE: the size of rescaled image.
"""
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
CROP_SIZE = 224
RESIZE = 256
@register_transform("apply_transform_to_key")
class ApplyTransformToKey:
"""Serializable class that applies a transform to a key specified field in samples."""
def __init__(self, transform: Callable, key: Union[int, str] = "input") -> None:
"""The constructor method of ApplyTransformToKey class.
Args:
transform: a callable function that takes sample data of type dict as input
key: the key in sample whose corresponding value will undergo
the transform
"""
self.key: Union[int, str] = key
self.transform: Callable = transform
@classmethod
def from_config(cls, config: Dict[str, Any]):
transform = build_transforms(config["transforms"])
return cls(transform=transform, key=config["key"])
def __call__(
self, sample: Union[Tuple[Any], Dict[str, Any]]
) -> Union[Tuple[Any], Dict[str, Any]]:
"""Updates sample by applying a transform to the value at the specified key.
Args:
sample: input sample which will be transformed
"""
if sample is None:
return sample
# Asserts + deal with tuple immutability
convert_to_tuple = False
if isinstance(sample, dict):
assert (
self.key in sample
), "This transform only supports dicts with key '{}'".format(self.key)
elif isinstance(sample, (tuple, list)):
assert self.key < len(
sample
), "This transform only supports tuples / lists with key less "
"than {length}, key provided {key}".format(length=len(sample), key=self.key)
# Convert to list for transformation
if isinstance(sample, tuple):
convert_to_tuple = True
sample = list(sample)
sample[self.key] = self.transform(sample[self.key])
if convert_to_tuple:
sample = tuple(sample)
return sample
@register_transform("imagenet_augment")
class ImagenetAugmentTransform(ClassyTransform):
"""The default image transform with data augmentation.
It is often useful for training models on Imagenet. It sequentially resizes
the image into a random scale, takes a random spatial cropping, randomly flips
the image horizontally, transforms PIL image data into a torch.Tensor and
normalizes the pixel values by mean subtraction and standard deviation division.
"""
def __init__(
self,
crop_size: int = ImagenetConstants.CROP_SIZE,
mean: List[float] = ImagenetConstants.MEAN,
std: List[float] = ImagenetConstants.STD,
):
"""The constructor method of ImagenetAugmentTransform class.
Args:
crop_size: expected output size per dimension after random cropping
mean: a 3-tuple denoting the pixel RGB mean
std: a 3-tuple denoting the pixel RGB standard deviation
"""
self.transform = transforms.Compose(
[
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
]
)
def __call__(self, img):
"""Callable function which applies the tranform to the input image.
Args:
image: input image that will undergo the transform
"""
return self.transform(img)
@register_transform("imagenet_no_augment")
class ImagenetNoAugmentTransform(ClassyTransform):
"""The default image transform without data augmentation.
It is often useful for testing models on Imagenet. It sequentially resizes
the image, takes a central cropping, transforms PIL image data into a
torch.Tensor and normalizes the pixel values by mean subtraction and standard
deviation division.
"""
def __init__(
self,
resize: int = ImagenetConstants.RESIZE,
crop_size: int = ImagenetConstants.CROP_SIZE,
mean: List[float] = ImagenetConstants.MEAN,
std: List[float] = ImagenetConstants.STD,
):
"""The constructor method of ImagenetNoAugmentTransform class.
Args:
resize: expected image size per dimension after resizing
crop_size: expected size for a dimension of central cropping
mean: a 3-tuple denoting the pixel RGB mean
std: a 3-tuple denoting the pixel RGB standard deviation
"""
self.transform = transforms.Compose(
[
transforms.Resize(resize),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
]
)
def __call__(self, img):
"""Callable function which applies the tranform to the input image.
Args:
image: input image that will undergo the transform
"""
return self.transform(img)
@register_transform("generic_image_transform")
class GenericImageTransform(ClassyTransform):
"""Default transform for images used in the classification task
This transform does several things. First, it expects a tuple or
list input (torchvision datasets supply tuples / lists). Second,
it applies a user-provided image transforms to the first entry in
the tuple (again, matching the torchvision tuple format). Third,
it transforms the tuple to a dict sample with entries "input" and
"target".
The defaults are for the standard imagenet augmentations
This is just a convenience wrapper to cover the common
use-case. You can get the same behavior by composing `torchvision
transforms <https://pytorch.org/docs/stable/torchvision/transforms.html>`_
+ :class:`ApplyTransformToKey` + :class:`TupleToMapTransform`.
"""
def __init__(
self, transform: Optional[Callable] = None, split: Optional[str] = None
):
"""Constructor for GenericImageTransfrom
Only one of the two arguments (*transform*, *split*) should be specified.
Args:
transform: A callable or ClassyTransform to be applied to the image only
split: 'train' or 'test'
"""
assert (
split is None or transform is None
), "If split is not None then transform must be None"
assert split in [None, "train", "test"], (
"If specified, split should be either 'train' or 'test', "
"instead got {}".format(split)
)
self._transform = transform
if split is not None:
self._transform = (
ImagenetAugmentTransform()
if split == "train"
else ImagenetNoAugmentTransform()
)
@classmethod
def from_config(cls, config: Dict[str, Any]):
transform = None
if "transforms" in config:
transform = build_transforms(config["transforms"])
split = config.get("split")
return cls(transform, split)
def __call__(self, sample: Tuple[Any]):
"""Applied transform to sample
Args:
sample: A tuple with length >= 2. The first entry should
be the image data, the second entry should be the
target data.
"""
image = sample[0]
transformed_image = (
self._transform(image) if self._transform is not None else image
)
new_sample = {"input": transformed_image, "target": sample[1]}
# Any additional metadata is just appended under index of tuple
if len(sample) > 2:
for i in range(2, len(sample)):
new_sample[str(i)] = sample[i]
return new_sample
@register_transform("tuple_to_map")
class TupleToMapTransform(ClassyTransform):
"""A transform which maps image data from tuple to dict.
This transform has a list of keys (key1, key2, ...),
takes a sample of the form (data1, data2, ...) and
returns a sample of the form {key1: data1, key2: data2, ...}
If duplicate keys are used, the corresponding values are merged into a list.
It is useful for mapping output from datasets like the `PyTorch
ImageFolder <https://github.com/pytorch/vision/blob/master/torchvision/
datasets/folder.py#L177>`_ dataset (tuple) to dict with named data fields.
If sample is already a dict with the required keys, pass sample through.
"""
def __init__(self, list_of_map_keys: List[str]):
"""The constructor method of TupleToMapTransform class.
Args:
list_of_map_keys: a list of dict keys that in order will be mapped
to items in the input data sample list
"""
self._map_keys = list_of_map_keys
def __call__(self, sample):
"""Transform sample from type tuple to type dict.
Args:
sample: input sample which will be transformed
"""
# If already a dict/map with appropriate keys, exit early
if isinstance(sample, dict):
for key in self._map_keys:
assert (
key in sample
), "Sample {sample} must be a tuple or a dict with keys {keys}".format(
sample=str(sample), keys=str(self._map_keys)
)
return sample
assert len(sample) == len(self._map_keys), (
"Provided sample tuple must have same number of keys "
"as provided to transform"
)
output_sample = collections.defaultdict(list)
for idx, s in enumerate(sample):
output_sample[self._map_keys[idx]].append(s)
# Unwrap list if only one item in dict.
for k, v in output_sample.items():
if len(v) == 1:
output_sample[k] = v[0]
return output_sample
DEFAULT_KEY_MAP = TupleToMapTransform(["input", "target"])
def build_field_transform_default_imagenet(
config: Optional[List[Dict[str, Any]]],
default_transform: Optional[Callable] = None,
split: Optional[bool] = None,
key: Union[int, str] = "input",
key_map_transform: Optional[Callable] = DEFAULT_KEY_MAP,
) -> Callable:
"""Returns a ApplyTransformToKey which applies a transform on the specified key.
The transform is built from the config, if it is not None.
Otherwise, uses one of the two mutually exclusive args: If
default_transform is not None, it is used. If split is not None,
imagenet transforms are used, using augmentation for "train", no
augmentation otherwise.
This function also provides an additional
function for mapping from tuples (or other keys) to a desired set
of keys
Args:
config: field transform config
default_transform: used if config is None
split: split for dataset, e.g. "train" or "test"
key: Key to apply transform to
key_map_transform: Used to produce desired map / keys
(e.g. for torchvision datasets, default samples is a
tuple so this argument can be used to map
(input, target) -> {"input": input, "target": target})
"""
assert (
default_transform is None or split is None
), "Can only specify one of default_transform and split"
if config is None:
if default_transform is not None:
transform = default_transform
elif split is not None:
transform = (
ImagenetAugmentTransform()
if split == "train"
else ImagenetNoAugmentTransform()
)
else:
raise ValueError("No transform config provided with no defaults")
else:
transform = build_transforms(config)
transform = ApplyTransformToKey(transform, key=key)
if key_map_transform is None:
return transform
return transforms.Compose([key_map_transform, transform])
def default_unnormalize(img):
"""Default unnormalization transform which undo the "transforms.Normalize".
Specially, it cancels out mean subtraction and standard deviation division.
Args:
img (torch.Tensor): image data to which the transform will be applied
"""
# TODO T39752655: Allow this to be configurable
img = img.clone()
for channel, std, mean in zip(img, ImagenetConstants.STD, ImagenetConstants.MEAN):
channel.mul_(std).add_(mean)
return img
| ClassyVision-main | classy_vision/dataset/transforms/util.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import Any, Callable, Dict, List, Optional, Union
import torch
import torchvision.transforms as transforms
import torchvision.transforms._transforms_video as transforms_video
from . import build_transforms, ClassyTransform, register_transform
from .util import ApplyTransformToKey, ImagenetConstants, TupleToMapTransform
class VideoConstants:
"""Constant variables related to the video classification.
Use the same mean/std from image classification to enable the parameter
inflation where parameters of 2D conv in image model can be inflated into
3D conv in video model.
MEAN: often used to be subtracted from pixel RGB value.
STD: often used to divide the pixel RGB value after mean centering.
SIZE_RANGE: a (min_size, max_size) tuple which denotes the range of
size of the rescaled video clip.
CROP_SIZE: the size of spatial cropping in the video clip.
"""
MEAN = ImagenetConstants.MEAN #
STD = ImagenetConstants.STD
SIZE_RANGE = (128, 160)
CROP_SIZE = 112
def _get_rescaled_size(scale, h, w):
if h < w:
new_h = scale
new_w = int(scale * w / h)
else:
new_w = scale
new_h = int(scale * h / w)
return new_h, new_w
@register_transform("video_clip_random_resize_crop")
class VideoClipRandomResizeCrop(ClassyTransform):
"""A video clip transform that is often useful for trainig data.
Given a size range, randomly choose a size. Rescale the clip so that
its short edge equals to the chosen size. Then randomly crop the video
clip with the specified size.
Such training data augmentation is used in VGG net
(https://arxiv.org/abs/1409.1556).
Also see reference implementation `Kinetics.spatial_sampling` in SlowFast
codebase.
"""
def __init__(
self,
crop_size: Union[int, List[int]],
size_range: List[int],
interpolation_mode: str = "bilinear",
):
"""The constructor method of VideoClipRandomResizeCrop class.
Args:
crop_size: int or 2-tuple as the expected output crop_size (height, width)
size_range: the min- and max size
interpolation_mode: Default: "bilinear"
"""
if isinstance(crop_size, tuple):
assert len(crop_size) == 2, "crop_size should be tuple (height, width)"
self.crop_size = crop_size
else:
self.crop_size = (crop_size, crop_size)
self.interpolation_mode = interpolation_mode
self.size_range = size_range
def __call__(self, clip):
"""Callable function which applies the tranform to the input clip.
Args:
clip (torch.Tensor): input clip tensor
"""
# clip size: C x T x H x W
rand_size = random.randint(self.size_range[0], self.size_range[1])
new_h, new_w = _get_rescaled_size(rand_size, clip.size()[2], clip.size()[3])
clip = torch.nn.functional.interpolate(
clip, size=(new_h, new_w), mode=self.interpolation_mode
)
assert (
self.crop_size[0] <= new_h and self.crop_size[1] <= new_w
), "crop size can not be larger than video frame size"
i = random.randint(0, new_h - self.crop_size[0])
j = random.randint(0, new_w - self.crop_size[1])
clip = clip[:, :, i : i + self.crop_size[0], j : j + self.crop_size[1]]
return clip
@register_transform("video_clip_resize")
class VideoClipResize(ClassyTransform):
"""A video clip transform that is often useful for testing data.
Given an input size, rescale the clip so that its short edge equals to
the input size while aspect ratio is preserved.
"""
def __init__(self, size: int, interpolation_mode: str = "bilinear"):
"""The constructor method of VideoClipResize class.
Args:
size: input size
interpolation_mode: Default: "bilinear". See valid values in
(https://pytorch.org/docs/stable/nn.functional.html#torch.nn.
functional.interpolate)
"""
self.interpolation_mode = interpolation_mode
self.size = size
def __call__(self, clip):
"""Callable function which applies the tranform to the input clip.
Args:
clip (torch.Tensor): input clip tensor
"""
# clip size: C x T x H x W
if not min(clip.size()[2], clip.size()[3]) == self.size:
new_h, new_w = _get_rescaled_size(self.size, clip.size()[2], clip.size()[3])
clip = torch.nn.functional.interpolate(
clip, size=(new_h, new_w), mode=self.interpolation_mode
)
return clip
@register_transform("video_default_augment")
class VideoDefaultAugmentTransform(ClassyTransform):
"""This is the default video transform with data augmentation which is useful for
training.
It sequentially prepares a torch.Tensor of video data, randomly
resizes the video clip, takes a random spatial cropping, randomly flips the
video clip horizontally, and normalizes the pixel values by mean subtraction
and standard deviation division.
"""
def __init__(
self,
crop_size: Union[int, List[int]] = VideoConstants.CROP_SIZE,
size_range: List[int] = VideoConstants.SIZE_RANGE,
mean: List[float] = VideoConstants.MEAN,
std: List[float] = VideoConstants.STD,
):
"""The constructor method of VideoDefaultAugmentTransform class.
Args:
crop_size: expected output crop_size (height, width)
size_range : a 2-tuple denoting the min- and max size
mean: a 3-tuple denoting the pixel RGB mean
std: a 3-tuple denoting the pixel RGB standard deviation
"""
self._transform = transforms.Compose(
[
transforms_video.ToTensorVideo(),
# TODO(zyan3): migrate VideoClipRandomResizeCrop to TorchVision
VideoClipRandomResizeCrop(crop_size, size_range),
transforms_video.RandomHorizontalFlipVideo(),
transforms_video.NormalizeVideo(mean=mean, std=std),
]
)
def __call__(self, video):
"""Apply the default transform with data augmentation to video.
Args:
video: input video that will undergo the transform
"""
return self._transform(video)
@register_transform("video_default_no_augment")
class VideoDefaultNoAugmentTransform(ClassyTransform):
"""This is the default video transform without data augmentation which is useful
for testing.
It sequentially prepares a torch.Tensor of video data, resize the
video clip to have the specified short edge, and normalize the pixel values
by mean subtraction and standard deviation division.
"""
def __init__(
self,
size: int = VideoConstants.SIZE_RANGE[0],
mean: List[float] = VideoConstants.MEAN,
std: List[float] = VideoConstants.STD,
):
"""The constructor method of VideoDefaultNoAugmentTransform class.
Args:
size: the short edge of rescaled video clip
mean: a 3-tuple denoting the pixel RGB mean
std: a 3-tuple denoting the pixel RGB standard deviation
"""
self._transform = transforms.Compose(
# At testing stage, central cropping is not used because we
# conduct fully convolutional-style testing
[
transforms_video.ToTensorVideo(),
# TODO(zyan3): migrate VideoClipResize to TorchVision
VideoClipResize(size),
transforms_video.NormalizeVideo(mean=mean, std=std),
]
)
def __call__(self, video):
"""Apply the default transform without data augmentation to video.
Args:
video: input video that will undergo the transform
"""
return self._transform(video)
@register_transform("dummy_audio_transform")
class DummyAudioTransform(ClassyTransform):
"""This is a dummy audio transform.
It ignores actual audio data, and returns an empty tensor. It is useful when
actual audio data is raw waveform and has a varying number of waveform samples
which makes minibatch assembling impossible
"""
def __init__(self):
"""The constructor method of DummyAudioTransform class."""
pass
def __call__(self, _audio):
"""Callable function which applies the tranform to the input audio data.
Args:
audio: input audio data that will undergo the dummy transform
"""
return torch.zeros(0, 1, dtype=torch.float)
# Maps (video, audio, target) tuple to {'input': (video, audio), 'target': target}
DEFAULT_KEY_MAP = TupleToMapTransform(["input", "input", "target"])
def build_video_field_transform_default(
config: Optional[Dict[str, List[Dict[str, Any]]]],
split: str = "train",
key: Optional[str] = "input",
key_map_transform: Optional[Callable] = DEFAULT_KEY_MAP,
) -> Callable:
"""Returns transform that first maps sample to video keys, then
returns a transform on the specified key in dict.
Converts tuple (list, etc) sample to dict with input / target keys.
For a dict sample, verifies that dict has input / target keys.
For all other samples throws.
Args:
config: If provided, it is a dict where key is the data modality, and
value is a dict specifying the transform config
split: the split of the data to which the transform will be applied
key: the key in data sample of type dict whose corresponding value will
undergo the transform
"""
if config is None and split is None:
raise ValueError("No transform config provided with no defaults")
transforms_for_type = {
"video": VideoDefaultAugmentTransform()
if split == "train"
else VideoDefaultNoAugmentTransform(),
"audio": DummyAudioTransform(),
}
if config is not None:
transforms_for_type.update(
{
mode: build_transforms(modal_config)
for mode, modal_config in config.items()
}
)
transform = transforms.Compose(
[
ApplyTransformToKey(default_transform, key=mode)
for mode, default_transform in transforms_for_type.items()
]
)
if key is not None:
transform = ApplyTransformToKey(
transforms.Compose([TupleToMapTransform(["video", "audio"]), transform]),
key=key,
)
if key_map_transform is None:
return transform
return transforms.Compose([key_map_transform, transform])
| ClassyVision-main | classy_vision/dataset/transforms/util_video.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Any, Dict
class ClassyTransform(ABC):
"""
Class representing a data transform abstraction.
Data transform is most often needed to pre-process input data (e.g. image, video)
before sending it to a model. But it can also be used for other purposes.
"""
@abstractmethod
def __call__(self, image):
"""
The interface `__call__` is used to transform the input data. It should contain
the actual implementation of data transform.
Args:
image: input image data
"""
pass
@classmethod
def from_config(cls, config: Dict[str, Any]):
return cls(**config)
| ClassyVision-main | classy_vision/dataset/transforms/classy_transform.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List
import torchvision.transforms as transforms
from classy_vision.generic.registry_utils import import_all_modules
from classy_vision.generic.util import log_class_usage
from .classy_transform import ClassyTransform
FILE_ROOT = Path(__file__).parent
TRANSFORM_REGISTRY = {}
TRANSFORM_REGISTRY_TB = {}
TRANSFORM_VIDEO = [
"RandomCropVideo",
"RandomResizedCropVideo",
"CenterCropVideo",
"NormalizeVideo",
"ToTensorVideo",
"Totensorvideo",
"RandomHorizontalFlipVideo",
]
def build_transform(transform_config: Dict[str, Any]) -> Callable:
"""Builds a :class:`ClassyTransform` from a config.
This assumes a 'name' key in the config which is used to determine what
transform class to instantiate. For instance, a config `{"name":
"my_transform", "foo": "bar"}` will find a class that was registered as
"my_transform" (see :func:`register_transform`) and call .from_config on
it.
In addition to transforms registered with :func:`register_transform`, we
also support instantiating transforms available in the
`torchvision.transforms <https://pytorch.org/docs/stable/torchvision/
transforms.html>`_ module. Any keys in the config will get expanded
to parameters of the transform constructor. For instance, the following
call will instantiate a :class:`torchvision.transforms.CenterCrop`:
.. code-block:: python
build_transform({"name": "CenterCrop", "size": 224})
"""
assert (
"name" in transform_config
), f"name not provided for transform: {transform_config}"
name = transform_config["name"]
transform_args = {k: v for k, v in transform_config.items() if k != "name"}
if name in TRANSFORM_REGISTRY:
transform = TRANSFORM_REGISTRY[name].from_config(transform_args)
else:
# the name should be available in torchvision.transforms
# if users specify the torchvision transform name in snake case,
# we need to convert it to title case.
if not (hasattr(transforms, name) or (name in TRANSFORM_VIDEO)):
name = name.title().replace("_", "")
assert hasattr(transforms, name) or (name in TRANSFORM_VIDEO), (
f"{name} isn't a registered tranform"
", nor is it available in torchvision.transforms"
)
if hasattr(transforms, name):
transform = getattr(transforms, name)(**transform_args)
else:
import torchvision.transforms._transforms_video as transforms_video
transform = getattr(transforms_video, name)(**transform_args)
log_class_usage("Transform", transform.__class__)
return transform
def build_transforms(transforms_config: List[Dict[str, Any]]) -> Callable:
"""
Builds a transform from the list of transform configurations.
"""
transform_list = [build_transform(config) for config in transforms_config]
return transforms.Compose(transform_list)
def register_transform(name: str, bypass_checks=False):
"""Registers a :class:`ClassyTransform` subclass.
This decorator allows Classy Vision to instantiate a subclass of
:class:`ClassyTransform` from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyTransform subclass like this:
.. code-block:: python
@register_transform("my_transform")
class MyTransform(ClassyTransform):
...
To instantiate a transform from a configuration file, see
:func:`build_transform`."""
def register_transform_cls(cls: Callable[..., Callable]):
if not bypass_checks:
if name in TRANSFORM_REGISTRY:
msg = "Cannot register duplicate transform ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, TRANSFORM_REGISTRY_TB[name]))
if hasattr(transforms, name) or (name in TRANSFORM_VIDEO):
raise ValueError(
"{} has existed in torchvision.transforms, Please change the name!".format(
name
)
)
TRANSFORM_REGISTRY[name] = cls
tb = "".join(traceback.format_stack())
TRANSFORM_REGISTRY_TB[name] = tb
return cls
return register_transform_cls
# automatically import any Python files in the transforms/ directory
import_all_modules(FILE_ROOT, "classy_vision.dataset.transforms")
from .lighting_transform import LightingTransform # isort:skip
from .util import ApplyTransformToKey # isort:skip
from .util import ImagenetAugmentTransform # isort:skip
from .util import ImagenetNoAugmentTransform # isort:skip
from .util import GenericImageTransform # isort:skip
from .util import TupleToMapTransform # isort:skip
__all__ = [
"ClassyTransform",
"ImagenetAugmentTransform",
"ImagenetNoAugmentTransform",
"GenericImageTransform",
"ApplyTransformToKey",
"TupleToMapTransform",
"LightingTransform",
"register_transform",
"build_transform",
"build_transforms",
]
| ClassyVision-main | classy_vision/dataset/transforms/__init__.py |
#!/usr/bin/env python3
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# MIT License
#
# Copyright (c) 2018 Philip Popien
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Code modified from
# https://github.com/DeepVoltaire/AutoAugment/blob/master/autoaugment.py
import random
from enum import auto, Enum
from functools import partial
from typing import Any, Callable, NamedTuple, Sequence, Tuple
import numpy as np
from classy_vision.dataset.transforms import ClassyTransform, register_transform
from PIL import Image, ImageEnhance, ImageOps
MIDDLE_GRAY = (128, 128, 128)
class ImageOp(Enum):
SHEAR_X = auto()
SHEAR_Y = auto()
TRANSLATE_X = auto()
TRANSLATE_Y = auto()
ROTATE = auto()
AUTO_CONTRAST = auto()
INVERT = auto()
EQUALIZE = auto()
SOLARIZE = auto()
POSTERIZE = auto()
CONTRAST = auto()
COLOR = auto()
BRIGHTNESS = auto()
SHARPNESS = auto()
class ImageOpSetting(NamedTuple):
ranges: Sequence
function: Callable
def shear_x(img: Any, magnitude: int, fillcolor: Any = None) -> Any:
return img.transform(
img.size,
Image.AFFINE,
(1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC,
fillcolor=fillcolor,
)
def shear_y(img: Any, magnitude: int, fillcolor: Any = None) -> Any:
return img.transform(
img.size,
Image.AFFINE,
(1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC,
fillcolor=fillcolor,
)
def translate_x(img: Any, magnitude: int, fillcolor: Any = None) -> Any:
return img.transform(
img.size,
Image.AFFINE,
(1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),
fillcolor=fillcolor,
)
def translate_y(img: Any, magnitude: int, fillcolor: Any = None) -> Any:
return img.transform(
img.size,
Image.AFFINE,
(1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),
fillcolor=fillcolor,
)
# from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand # noqa
def rotate_with_fill(img: Any, magnitude: int) -> Any:
rot = img.convert("RGBA").rotate(magnitude)
return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(
img.mode
)
def color(img: Any, magnitude: int) -> Any:
return ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1]))
def posterize(img: Any, magnitude: int) -> Any:
return ImageOps.posterize(img, magnitude)
def solarize(img: Any, magnitude: int) -> Any:
return ImageOps.solarize(img, magnitude)
def contrast(img: Any, magnitude: int) -> Any:
return ImageEnhance.Contrast(img).enhance(1 + magnitude * random.choice([-1, 1]))
def sharpness(img: Any, magnitude: int) -> Any:
return ImageEnhance.Sharpness(img).enhance(1 + magnitude * random.choice([-1, 1]))
def brightness(img: Any, magnitude: int) -> Any:
return ImageEnhance.Brightness(img).enhance(1 + magnitude * random.choice([-1, 1]))
def auto_contrast(img: Any, magnitude: int) -> Any:
return ImageOps.autocontrast(img)
def equalize(img: Any, magnitude: int) -> Any:
return ImageOps.equalize(img)
def invert(img: Any, magnitude: int) -> Any:
return ImageOps.invert(img)
def get_image_op_settings(
image_op: ImageOp, fillcolor: Tuple[int, int, int] = MIDDLE_GRAY
):
return {
ImageOp.SHEAR_X: ImageOpSetting(
np.linspace(0, 0.3, 10), partial(shear_x, fillcolor=fillcolor)
),
ImageOp.SHEAR_Y: ImageOpSetting(
np.linspace(0, 0.3, 10), partial(shear_y, fillcolor=fillcolor)
),
ImageOp.TRANSLATE_X: ImageOpSetting(
np.linspace(0, 150 / 331, 10), partial(translate_x, fillcolor=fillcolor)
),
ImageOp.TRANSLATE_Y: ImageOpSetting(
np.linspace(0, 150 / 331, 10), partial(translate_y, fillcolor=fillcolor)
),
ImageOp.ROTATE: ImageOpSetting(np.linspace(0, 30, 10), rotate_with_fill),
ImageOp.COLOR: ImageOpSetting(np.linspace(0.0, 0.9, 10), color),
ImageOp.POSTERIZE: ImageOpSetting(
np.round(np.linspace(8, 4, 10), 0).astype(np.int), posterize
),
ImageOp.SOLARIZE: ImageOpSetting(np.linspace(256, 0, 10), solarize),
ImageOp.CONTRAST: ImageOpSetting(np.linspace(0.0, 0.9, 10), contrast),
ImageOp.SHARPNESS: ImageOpSetting(np.linspace(0.0, 0.9, 10), sharpness),
ImageOp.BRIGHTNESS: ImageOpSetting(np.linspace(0.0, 0.9, 10), brightness),
ImageOp.AUTO_CONTRAST: ImageOpSetting([0] * 10, auto_contrast),
ImageOp.EQUALIZE: ImageOpSetting([0] * 10, equalize),
ImageOp.INVERT: ImageOpSetting([0] * 10, invert),
}[image_op]
class SubPolicy:
def __init__(
self,
operation1: ImageOp,
magnitude_idx1: int,
p1: float,
operation2: ImageOp,
magnitude_idx2: int,
p2: float,
fillcolor: Tuple[int, int, int] = MIDDLE_GRAY,
) -> None:
operation1_settings = get_image_op_settings(operation1, fillcolor)
self.operation1 = operation1_settings.function
self.magnitude1 = operation1_settings.ranges[magnitude_idx1]
self.p1 = p1
operation2_settings = get_image_op_settings(operation2, fillcolor)
self.operation2 = operation2_settings.function
self.magnitude2 = operation2_settings.ranges[magnitude_idx2]
self.p2 = p2
def __call__(self, img: Any) -> Any:
if random.random() < self.p1:
img = self.operation1(img, self.magnitude1)
if random.random() < self.p2:
img = self.operation2(img, self.magnitude2)
return img
@register_transform("imagenet_autoaugment")
class ImagenetAutoAugment(ClassyTransform):
"""Randomly choose one of the best 24 Sub-policies on ImageNet.
Example:
>>> policy = ImageNetPolicy()
>>> transformed = policy(image)
Example as a PyTorch Transform:
>>> transform=transforms.Compose([
>>> transforms.Resize(256),
>>> ImageNetPolicy(),
>>> transforms.ToTensor()])
"""
def __init__(self, fillcolor: Tuple[int, int, int] = MIDDLE_GRAY) -> None:
self.policies = [
SubPolicy(ImageOp.POSTERIZE, 8, 0.4, ImageOp.ROTATE, 9, 0.6, fillcolor),
SubPolicy(
ImageOp.SOLARIZE, 5, 0.6, ImageOp.AUTO_CONTRAST, 5, 0.6, fillcolor
),
SubPolicy(ImageOp.EQUALIZE, 8, 0.8, ImageOp.EQUALIZE, 3, 0.6, fillcolor),
SubPolicy(ImageOp.POSTERIZE, 7, 0.6, ImageOp.POSTERIZE, 6, 0.6, fillcolor),
SubPolicy(ImageOp.EQUALIZE, 7, 0.4, ImageOp.SOLARIZE, 4, 0.2, fillcolor),
SubPolicy(ImageOp.EQUALIZE, 4, 0.4, ImageOp.ROTATE, 8, 0.8, fillcolor),
SubPolicy(ImageOp.SOLARIZE, 3, 0.6, ImageOp.EQUALIZE, 7, 0.6, fillcolor),
SubPolicy(ImageOp.POSTERIZE, 5, 0.8, ImageOp.EQUALIZE, 2, 1.0, fillcolor),
SubPolicy(ImageOp.ROTATE, 3, 0.2, ImageOp.SOLARIZE, 8, 0.6, fillcolor),
SubPolicy(ImageOp.EQUALIZE, 8, 0.6, ImageOp.POSTERIZE, 6, 0.4, fillcolor),
SubPolicy(ImageOp.ROTATE, 8, 0.8, ImageOp.COLOR, 0, 0.4, fillcolor),
SubPolicy(ImageOp.ROTATE, 9, 0.4, ImageOp.EQUALIZE, 2, 0.6, fillcolor),
SubPolicy(ImageOp.EQUALIZE, 7, 0.0, ImageOp.EQUALIZE, 8, 0.8, fillcolor),
SubPolicy(ImageOp.INVERT, 4, 0.6, ImageOp.EQUALIZE, 8, 1.0, fillcolor),
SubPolicy(ImageOp.COLOR, 4, 0.6, ImageOp.CONTRAST, 8, 1.0, fillcolor),
SubPolicy(ImageOp.ROTATE, 8, 0.8, ImageOp.COLOR, 2, 1.0, fillcolor),
SubPolicy(ImageOp.COLOR, 8, 0.8, ImageOp.SOLARIZE, 7, 0.8, fillcolor),
SubPolicy(ImageOp.SHARPNESS, 7, 0.4, ImageOp.INVERT, 8, 0.6, fillcolor),
SubPolicy(ImageOp.SHEAR_X, 5, 0.6, ImageOp.EQUALIZE, 9, 1.0, fillcolor),
SubPolicy(ImageOp.COLOR, 0, 0.4, ImageOp.EQUALIZE, 3, 0.6, fillcolor),
SubPolicy(ImageOp.EQUALIZE, 7, 0.4, ImageOp.SOLARIZE, 4, 0.2, fillcolor),
SubPolicy(
ImageOp.SOLARIZE, 5, 0.6, ImageOp.AUTO_CONTRAST, 5, 0.6, fillcolor
),
SubPolicy(ImageOp.INVERT, 4, 0.6, ImageOp.EQUALIZE, 8, 1.0, fillcolor),
SubPolicy(ImageOp.COLOR, 4, 0.6, ImageOp.CONTRAST, 8, 1.0, fillcolor),
]
def __call__(self, img: Any) -> Any:
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
| ClassyVision-main | classy_vision/dataset/transforms/autoaugment.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import register_transform
from .classy_transform import ClassyTransform
_IMAGENET_EIGEN_VAL = [0.2175, 0.0188, 0.0045]
_IMAGENET_EIGEN_VEC = [
[-144.7125, 183.396, 102.2295],
[-148.104, -1.1475, -207.57],
[-148.818, -177.174, 107.1765],
]
_DEFAULT_COLOR_LIGHTING_STD = 0.1
@register_transform("lighting")
class LightingTransform(ClassyTransform):
"""
Lighting noise(AlexNet - style PCA - based noise).
This trick was originally used in `AlexNet paper
<https://papers.nips.cc/paper/4824-imagenet-classification
-with-deep-convolutional-neural-networks.pdf>`_
The eigen values and eigen vectors, are taken from caffe2 `ImageInputOp.h
<https://github.com/pytorch/pytorch/blob/master/caffe2/image/
image_input_op.h#L265>`_.
"""
def __init__(
self,
alphastd=_DEFAULT_COLOR_LIGHTING_STD,
eigval=_IMAGENET_EIGEN_VAL,
eigvec=_IMAGENET_EIGEN_VEC,
):
self.alphastd = alphastd
self.eigval = torch.tensor(eigval)
# Divide by 255 as the Lighting operation is expected to be applied
# on `img` pixels ranging between [0.0, 1.0]
self.eigvec = torch.tensor(eigvec) / 255.0
def __call__(self, img):
"""
img: (C x H x W) Tensor with values in range [0.0, 1.0]
"""
assert (
img.min() >= 0.0 and img.max() <= 1.0
), "Image should be normalized by 255 and be in range [0.0, 1.0]"
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = (
self.eigvec.type_as(img)
.clone()
.mul(alpha.view(1, 3).expand(3, 3))
.mul(self.eigval.view(1, 3).expand(3, 3))
.sum(1)
.squeeze()
)
return img.add(rgb.view(3, 1, 1).expand_as(img))
| ClassyVision-main | classy_vision/dataset/transforms/lighting_transform.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch.optim
from classy_vision.generic.util import is_pos_float
from . import ClassyOptimizer, register_optimizer
@register_optimizer("rmsprop")
class RMSProp(ClassyOptimizer):
def __init__(
self,
lr: float = 0.1,
momentum: float = 0,
weight_decay: float = 0,
alpha: float = 0.99,
eps: float = 1e-8,
centered: bool = False,
) -> None:
super().__init__()
self._lr = lr
self._momentum = momentum
self._weight_decay = weight_decay
self._alpha = alpha
self._eps = eps
self._centered = centered
def prepare(self, param_groups):
self.optimizer = torch.optim.RMSprop(
param_groups,
lr=self._lr,
momentum=self._momentum,
weight_decay=self._weight_decay,
alpha=self._alpha,
eps=self._eps,
centered=self._centered,
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "RMSProp":
"""Instantiates a RMSProp from a configuration.
Args:
config: A configuration for a RMSProp.
See :func:`__init__` for parameters expected in the config.
Returns:
A RMSProp instance.
"""
# Default params
config.setdefault("lr", 0.1)
config.setdefault("momentum", 0.0)
config.setdefault("weight_decay", 0.0)
config.setdefault("alpha", 0.99)
config.setdefault("eps", 1e-8)
config.setdefault("centered", False)
for key in ["momentum", "alpha"]:
assert (
config[key] >= 0.0 and config[key] < 1.0 and type(config[key]) == float
), f"Config must contain a '{key}' in [0, 1) for RMSProp optimizer"
assert is_pos_float(
config["eps"]
), f"Config must contain a positive 'eps' for RMSProp optimizer"
assert isinstance(
config["centered"], bool
), "Config must contain a boolean 'centered' param for RMSProp optimizer"
return cls(
lr=config["lr"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
alpha=config["alpha"],
eps=config["eps"],
centered=config["centered"],
)
| ClassyVision-main | classy_vision/optim/rmsprop.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Optional
from classy_vision.generic.util import log_class_usage
from .param_scheduler import ConstantParamScheduler, ParamScheduler, UpdateInterval
class OptionsView:
"""Convenience object to retrieve options from the optimizer param_groups.
For instance, to get the current learning rate in the optimizer, instead of
traversing optimizer.param_groups and finding all values for the "lr" key,
you can just read options_view.lr. This means we don't need to keep an
extra copy of optimizer options (such as lr, momentum) that might become
inconsistent with the actual values used.
"""
def __init__(self, optimizer):
self.optimizer = optimizer
def __getattr__(self, name):
values = []
for pg in self.optimizer.param_groups:
if name in pg and (pg[name] not in values):
# we only add unique values to the view
# this means the length returned can vary if multiple (different)
# schedulers return the same value at a certain point
values.append(pg[name])
if len(values) == 0:
raise AttributeError
elif len(values) == 1:
return values.pop()
return values
class ClassyOptimizer(ABC):
"""
Base class for optimizers.
This wraps a :class:`torch.optim.Optimizer` instance and provides support
for parameter scheduling. Typical PyTorch optimizers are used like this:
optim = SGD(model.parameters(), lr=0.1)
but the user is responsible for updating lr over the course of training.
ClassyOptimizers extend PyTorch optimizers and allow specifying
ParamSchedulers instead:
optim = SGD()
optim.set_param_groups(model.parameters(), lr=LinearParamScheduler(1, 2))
This means that as you step through the optimizer, the learning rate will
automatically get updated with the given schedule. To access the current
learning rate value (or any other optimizer option), you can read
`optim.options_view.lr`. Similar to other Classy abstractions, you can also
instantiate ClassyOptimizers from a configuration file.
"""
def __init__(self) -> None:
"""Constructor for ClassyOptimizer.
:var options_view: provides convenient access to current values of
learning rate, momentum etc.
:var _param_group_schedulers: list of dictionaries in the param_groups
format, containing all ParamScheduler instances needed. Constant
values are converted to ConstantParamScheduler before being inserted
here.
"""
self.options_view = OptionsView(self)
self.optimizer = None
self._param_group_schedulers = None
log_class_usage("Optimizer", self.__class__)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassyOptimizer":
"""Instantiates a ClassyOptimizer from a configuration.
Args:
config: A configuration for the ClassyOptimizer.
Returns:
A ClassyOptimizer instance.
"""
raise NotImplementedError
@abstractmethod
def prepare(self, param_groups):
"""
Prepares the optimizer for training.
Deriving classes should initialize the underlying PyTorch
:class:`torch.optim.Optimizer` in this call. The param_groups argument
follows the same format supported by PyTorch (list of parameters, or
list of param group dictionaries).
Warning:
This should called only after the model has been moved to the correct
device.
"""
raise NotImplementedError
def set_param_groups(self, param_groups, **kwargs):
"""
Specifies what parameters will be optimized.
This is the public API where users of ClassyOptimizer can specify what
parameters will get optimized. Unlike PyTorch optimizers, we don't
require the list of param_groups in the constructor.
Args:
param_groups: this is either a list of Tensors (e.g.
model.parameters()) or a list of dictionaries. If a dictionary,
must contain a key "params" having the same format and semantics as
PyTorch.
"""
def cast_param_groups(params):
"""Converts a list/dict to the PyTorch param_groups format."""
if params is None:
return []
if isinstance(params, dict):
assert "params" in params
return [params]
pg = list(params)
if len(pg) == 0:
raise ValueError("optimizer got an empty parameter list")
if not isinstance(pg[0], dict):
pg = [{"params": pg}]
return pg
self._param_group_schedulers = cast_param_groups(param_groups)
# Convert constant values to constant param schedulers. Use kwargs
# values as defaults.
for pg in self._param_group_schedulers:
for k, v in kwargs.items():
if isinstance(v, (int, float)):
pg[k] = ConstantParamScheduler(v)
else:
# This is necessary to copy values from kwargs to pg
pg[k] = v
for k, v in pg.items():
if isinstance(v, (int, float)):
pg[k] = ConstantParamScheduler(v)
self.prepare(self._run_schedulers(0, None))
def _run_schedulers(self, where: float, update_interval: Optional[UpdateInterval]):
"""Goes over schedulers and gets actual values for a particular choice of where.
If UpdateInterval is None, updates all schedulers, regardless whether
they are configured as epoch or step. Returns a list of dictionaries in
the param_groups format."""
param_groups = []
for pg in self._param_group_schedulers:
param_group = {}
for k, v in pg.items():
if k == "params":
param_group[k] = v
elif update_interval is None or v.update_interval == update_interval:
assert isinstance(
v, ParamScheduler
), f"Elements in param_groups must inherit from ParamScheduler, found: {v}"
param_group[k] = v(where)
param_groups.append(param_group)
return param_groups
@property
def param_groups(self):
return self.optimizer.param_groups
def get_classy_state(self) -> Dict[str, Any]:
"""Get the state of the ClassyOptimizer.
The returned state is used for checkpointing.
Returns:
A state dictionary containing the state of the optimizer.
"""
# The "optim" key is redundant and only kept for checkpoint
# backwards-compatibility.
return {"optim": self.optimizer.state_dict()}
def set_classy_state(self, state: Dict[str, Any]) -> None:
"""Set the state of the ClassyOptimizer.
Args:
state_dict: The state dictionary. Must be the output of a call to
:func:`get_classy_state`.
This is used to load the state of the optimizer from a checkpoint.
"""
self.optimizer.load_state_dict(state["optim"])
def on_epoch(self, where: float) -> None:
"""
Called at the end of a phase.
Updates the param schedule at the end of a phase, till training is in progress.
This should be called by the task at the end of every epoch to update the
schedule of epoch based param schedulers (See
:class:`param_scheduler.ParamScheduler` for more information).
Args:
where: where we are in terms of training progress (output of
:func:`tasks.ClassyTask.where`)
"""
if where < 1:
# do not update the schedule on final on_epoch call when where == 1
self._update_schedule(self._run_schedulers(where, UpdateInterval.EPOCH))
def step(
self, *args, closure: Optional[Callable] = None, where: float = None
) -> None:
"""
Perform the optimization updates for a given training step.
The optimization options (such as learning rate) performed during this
step will correspond to the where value given as an argument to this
function. The exact values used can be read via the options_view
property in the optimizer.
Args:
where: where we are in terms of training progress (output of
:method:`ClassyTask.where`). Must be a float in the [0;1)
interval; This dictates parameter scheduling;
"""
if where is None:
raise RuntimeError(
"ClassyOptimizer.step requires `where` argument to be provided"
)
assert where >= 0 and where < 1, f"Invalid where: {where}"
if self._param_group_schedulers is None:
raise RuntimeError(
"ClassyOptimizer.set_param_groups must be called before step()"
)
self._update_schedule(self._run_schedulers(where, UpdateInterval.STEP))
if closure is None:
self.optimizer.step()
else:
self.optimizer.step(closure)
def _update_schedule(self, param_groups) -> None:
"""Update optimizer based on a new set of param_groups."""
assert len(self.optimizer.param_groups) == len(param_groups)
for group, new_group in zip(self.optimizer.param_groups, param_groups):
assert group["params"] == new_group["params"]
group.update(new_group)
def zero_grad(self):
"""
Clears the gradients of all optimized parameters.
See `torch.optim.Optimizer.zero_grad <https://pytorch.org/docs/stable/
optim.html#torch.optim.Optimizer.zero_grad>`_ for more information.
"""
self.optimizer.zero_grad()
| ClassyVision-main | classy_vision/optim/classy_optimizer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch.optim
from . import ClassyOptimizer, register_optimizer
@register_optimizer("sgd")
class SGD(ClassyOptimizer):
def __init__(
self,
larc_config: Dict[str, Any] = None,
lr: float = 0.1,
momentum: float = 0.0,
weight_decay: float = 0.0,
nesterov: bool = False,
use_larc: bool = False,
):
super().__init__()
self._lr = lr
self._momentum = momentum
self._weight_decay = weight_decay
self._nesterov = nesterov
self._use_larc = use_larc
self._larc_config = larc_config
def prepare(self, param_groups):
self.optimizer = torch.optim.SGD(
param_groups,
lr=self._lr,
nesterov=self._nesterov,
momentum=self._momentum,
weight_decay=self._weight_decay,
)
if self._use_larc:
try:
from apex.parallel.LARC import LARC
except ImportError:
raise RuntimeError("Apex needed for LARC")
self.optimizer = LARC(optimizer=self.optimizer, **self._larc_config)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "SGD":
"""Instantiates a SGD from a configuration.
Args:
config: A configuration for a SGD.
See :func:`__init__` for parameters expected in the config.
Returns:
A SGD instance.
"""
# Default params
config.setdefault("lr", 0.1)
config.setdefault("momentum", 0.0)
config.setdefault("weight_decay", 0.0)
config.setdefault("nesterov", False)
config.setdefault("use_larc", False)
config.setdefault(
"larc_config", {"clip": True, "eps": 1e-08, "trust_coefficient": 0.02}
)
assert (
config["momentum"] >= 0.0
and config["momentum"] < 1.0
and type(config["momentum"]) == float
), "Config must contain a 'momentum' in [0, 1) for SGD optimizer"
assert isinstance(
config["nesterov"], bool
), "Config must contain a boolean 'nesterov' param for SGD optimizer"
assert isinstance(
config["use_larc"], bool
), "Config must contain a boolean 'use_larc' param for SGD optimizer"
return cls(
larc_config=config["larc_config"],
lr=config["lr"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=config["nesterov"],
use_larc=config["use_larc"],
)
| ClassyVision-main | classy_vision/optim/sgd.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import traceback
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from .classy_optimizer import ClassyOptimizer
from .param_scheduler import build_param_scheduler
FILE_ROOT = Path(__file__).parent
OPTIMIZER_REGISTRY = {}
OPTIMIZER_CLASS_NAMES = set()
OPTIMIZER_REGISTRY_TB = {}
OPTIMIZER_CLASS_NAMES_TB = {}
def build_optimizer(config):
"""Builds a ClassyOptimizer from a config.
This assumes a 'name' key in the config which is used to determine what
optimizer class to instantiate. For instance, a config `{"name": "my_optimizer",
"foo": "bar"}` will find a class that was registered as "my_optimizer"
(see :func:`register_optimizer`) and call .from_config on it.
Also builds the param schedulers passed in the config and associates them with the
optimizer. The config should contain an optional "param_schedulers" key containing a
dictionary of param scheduler configs, keyed by the parameter they control. Adds
"num_epochs" to each of the scheduler configs and then calls
:func:`build_param_scheduler` on each config in the dictionary.
"""
return OPTIMIZER_REGISTRY[config["name"]].from_config(config)
def build_optimizer_schedulers(config):
# create a deepcopy since we will be modifying the param scheduler config
param_scheduler_config = copy.deepcopy(config.get("param_schedulers", {}))
# build the param schedulers
for cfg in param_scheduler_config.values():
cfg["num_epochs"] = config["num_epochs"]
param_schedulers = {
param: build_param_scheduler(cfg)
for param, cfg in param_scheduler_config.items()
}
return param_schedulers
def register_optimizer(name, bypass_checks=False):
"""Registers a ClassyOptimizer subclass.
This decorator allows Classy Vision to instantiate a subclass of
ClassyOptimizer from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyOptimizer subclass, like this:
.. code-block:: python
@register_optimizer('my_optimizer')
class MyOptimizer(ClassyOptimizer):
...
To instantiate an optimizer from a configuration file, see
:func:`build_optimizer`."""
def register_optimizer_cls(cls):
if not bypass_checks:
if name in OPTIMIZER_REGISTRY:
msg = "Cannot register duplicate optimizer ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, OPTIMIZER_REGISTRY_TB[name]))
if not issubclass(cls, ClassyOptimizer):
raise ValueError(
"Optimizer ({}: {}) must extend ClassyVisionOptimizer".format(
name, cls.__name__
)
)
if cls.__name__ in OPTIMIZER_CLASS_NAMES:
msg = (
"Cannot register optimizer with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, OPTIMIZER_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
OPTIMIZER_REGISTRY[name] = cls
OPTIMIZER_CLASS_NAMES.add(cls.__name__)
OPTIMIZER_REGISTRY_TB[name] = tb
OPTIMIZER_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_optimizer_cls
# automatically import any Python files in the optim/ directory
import_all_modules(FILE_ROOT, "classy_vision.optim")
from .adam import Adam # isort:skip
from .adamw import AdamW # isort:skip
from .rmsprop import RMSProp # isort:skip
from .rmsprop_tf import RMSPropTF # isort:skip
from .sgd import SGD # isort:skip
from .zero import ZeRO # isort:skip
__all__ = [
"Adam",
"AdamW",
"AdamWMT",
"ClassyOptimizer",
"RMSProp",
"RMSPropTF",
"SGD",
"ZeRO",
"build_optimizer",
"build_optimizer_schedulers",
"register_optimizer",
]
| ClassyVision-main | classy_vision/optim/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Tuple
import torch.optim
from . import ClassyOptimizer, register_optimizer
@register_optimizer("adamw_mt")
class AdamWMT(ClassyOptimizer):
def __init__(
self,
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0.01,
amsgrad: bool = False,
) -> None:
super().__init__()
self._lr = lr
self._betas = betas
self._eps = eps
self._weight_decay = weight_decay
self._amsgrad = amsgrad
def prepare(self, param_groups) -> None:
self.optimizer = torch.optim._multi_tensor.AdamW(
param_groups,
lr=self._lr,
betas=self._betas,
eps=self._eps,
weight_decay=self._weight_decay,
amsgrad=self._amsgrad,
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "AdamWMT":
"""Instantiates a multi-tensor AdamW optimizer from a configuration.
Args:
config: A configuration for AdamWMT.
See :func:`__init__` for parameters expected in the config.
Returns:
An AdamWMT instance.
"""
# Default params
config.setdefault("lr", 0.01)
config.setdefault("betas", [0.9, 0.999])
config.setdefault("eps", 1e-8)
config.setdefault("weight_decay", 0.01)
config.setdefault("amsgrad", False)
# Check if betas is a list and convert it to a tuple
# since a JSON config can only have lists
if type(config["betas"]) == list:
config["betas"] = tuple(config["betas"])
assert (
type(config["betas"]) == tuple
and len(config["betas"]) == 2
and type(config["betas"][0]) == float
and type(config["betas"][1]) == float
and config["betas"][0] >= 0.0
and config["betas"][0] < 1.0
and config["betas"][1] >= 0.0
and config["betas"][1] < 1.0
), "Config must contain a tuple 'betas' in [0, 1) for AdamWMT optimizer"
return cls(
lr=config["lr"],
betas=config["betas"],
eps=config["eps"],
weight_decay=config["weight_decay"],
amsgrad=config["amsgrad"],
)
| ClassyVision-main | classy_vision/optim/adamw_mt.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any
import torch.distributed as dist
from classy_vision.generic.distributed_util import get_primary_rank
from classy_vision.optim import build_optimizer, ClassyOptimizer, register_optimizer
try:
from fairscale.optim.oss import OSS
fairscale_available = True
except ImportError:
fairscale_available = False
@register_optimizer("zero")
class ZeRO(ClassyOptimizer):
def __init__(self, base_optimizer: ClassyOptimizer):
"""Wraps an arbitrary :class:`ClassyOptimizer <classy_vision.optim.ClassyOptimizer>`
optimizer and shards its state as described by ZeRO_.
::
opt = OSS(params, optim=torch.optim.Adam, lr=0.01)
.. _ZeRO: https://arxiv.org/abs/1910.02054
This instance holds all of the parameters for the model (in the .param_groups attribute)
but relies on a wrapped optimizer, which only process an original shard of the parameters.
Every step all the parameters are synced across the replicas. The Fairscale library is used
https://github.com/facebookresearch/fairscale
"""
assert (
fairscale_available
), "The Fairscale library needs to be installed to use this optimizer."
super().__init__()
self.base_optimizer = base_optimizer
def prepare(self, param_groups) -> None:
assert (
dist.is_initialized()
), "torch.distributed needs to be initialized to prepare this rank"
def optimizer_constructor(param_groups: Any, *args, **kwargs):
# ClassyOptimizer have deferred initialization, while OSS needs access to the
# raw optimizer instance, hence the trampoline
logging.debug("Building a ZeRO enabled optimizer")
self.base_optimizer.prepare(param_groups)
return self.base_optimizer.optimizer
self.optimizer = OSS(params=param_groups, optim=optimizer_constructor)
@classmethod
def from_config(cls, config):
return cls(base_optimizer=build_optimizer(config["base_optimizer"]))
def on_epoch(self, where: float) -> None:
# Run the normal LR schedulers
super().on_epoch(where)
# Materialize the optimizer state on the replica in charge of checkpointing
logging.info("Consolidating sharded state on primary rank. Where: %d" % where)
self.consolidate_state_dict()
def consolidate_state_dict(self) -> None:
self.optimizer.consolidate_state_dict(recipient_rank=get_primary_rank())
| ClassyVision-main | classy_vision/optim/zero.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Tuple
import torch.optim
from . import ClassyOptimizer, register_optimizer
@register_optimizer("adamw")
class AdamW(ClassyOptimizer):
def __init__(
self,
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0.01,
amsgrad: bool = False,
) -> None:
super().__init__()
self._lr = lr
self._betas = betas
self._eps = eps
self._weight_decay = weight_decay
self._amsgrad = amsgrad
def prepare(self, param_groups) -> None:
self.optimizer = torch.optim.AdamW(
param_groups,
lr=self._lr,
betas=self._betas,
eps=self._eps,
weight_decay=self._weight_decay,
amsgrad=self._amsgrad,
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "AdamW":
"""Instantiates an AdamW optimizer from a configuration.
Args:
config: A configuration for AdamW.
See :func:`__init__` for parameters expected in the config.
Returns:
An AdamW instance.
"""
# Default params
config.setdefault("lr", 0.01)
config.setdefault("betas", [0.9, 0.999])
config.setdefault("eps", 1e-8)
config.setdefault("weight_decay", 0.01)
config.setdefault("amsgrad", False)
# Check if betas is a list and convert it to a tuple
# since a JSON config can only have lists
if type(config["betas"]) == list:
config["betas"] = tuple(config["betas"])
assert (
type(config["betas"]) == tuple
and len(config["betas"]) == 2
and type(config["betas"][0]) == float
and type(config["betas"][1]) == float
and config["betas"][0] >= 0.0
and config["betas"][0] < 1.0
and config["betas"][1] >= 0.0
and config["betas"][1] < 1.0
), "Config must contain a tuple 'betas' in [0, 1) for AdamW optimizer"
return cls(
lr=config["lr"],
betas=config["betas"],
eps=config["eps"],
weight_decay=config["weight_decay"],
amsgrad=config["amsgrad"],
)
| ClassyVision-main | classy_vision/optim/adamw.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Tuple
import torch.optim
from . import ClassyOptimizer, register_optimizer
@register_optimizer("adam")
class Adam(ClassyOptimizer):
def __init__(
self,
lr: float = 0.1,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0.0,
amsgrad: bool = False,
) -> None:
super().__init__()
self._lr = lr
self._betas = betas
self._eps = eps
self._weight_decay = weight_decay
self._amsgrad = amsgrad
def prepare(self, param_groups) -> None:
self.optimizer = torch.optim.Adam(
param_groups,
lr=self._lr,
betas=self._betas,
eps=self._eps,
weight_decay=self._weight_decay,
amsgrad=self._amsgrad,
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "Adam":
"""Instantiates a Adam from a configuration.
Args:
config: A configuration for a Adam.
See :func:`__init__` for parameters expected in the config.
Returns:
A Adam instance.
"""
# Default params
config.setdefault("lr", 0.1)
config.setdefault("betas", [0.9, 0.999])
config.setdefault("eps", 1e-8)
config.setdefault("weight_decay", 0.0)
config.setdefault("amsgrad", False)
# Check if betas is a list and convert it to a tuple
# since a JSON config can only have lists
if type(config["betas"]) == list:
config["betas"] = tuple(config["betas"])
assert (
type(config["betas"]) == tuple
and len(config["betas"]) == 2
and type(config["betas"][0]) == float
and type(config["betas"][1]) == float
and config["betas"][0] >= 0.0
and config["betas"][0] < 1.0
and config["betas"][1] >= 0.0
and config["betas"][1] < 1.0
), "Config must contain a tuple 'betas' in [0, 1) for Adam optimizer"
return cls(
lr=config["lr"],
betas=config["betas"],
eps=config["eps"],
weight_decay=config["weight_decay"],
amsgrad=config["amsgrad"],
)
| ClassyVision-main | classy_vision/optim/adam.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import torch.optim
from classy_vision.generic.util import is_pos_float
from torch.optim import Optimizer
from . import ClassyOptimizer, register_optimizer
class RMSpropTFOptimizer(Optimizer):
r"""Implements RMSprop algorithm.
NOTE: This code is copied from :class:`torch.optim.RMSProp`, with the epsilon
moved inside the square root to match tensorflow's implementation.
Proposed by G. Hinton in his
`course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
The centered version first appears in `Generating Sequences
With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
The implementation here takes the square root of the gradient average before
adding epsilon (note that TensorFlow interchanges these two operations). The
effective learning rate is thus :math:`\alpha/(\sqrt{v} + \epsilon)` where
:math:`\alpha` is the scheduled learning rate and :math:`v` is the weighted moving
average of the squared gradient.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
momentum (float, optional): momentum factor (default: 0)
alpha (float, optional): smoothing constant (default: 0.99)
eps (float, optional): term added to the square root in the denominator to
improve numerical stability (default: 1e-8)
centered (bool, optional) : if ``True``, compute the centered RMSProp,
the gradient is normalized by an estimation of its variance
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(
self,
params,
lr=1e-2,
alpha=0.99,
eps=1e-8,
weight_decay=0,
momentum=0,
centered=False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
defaults = dict(
lr=lr,
momentum=momentum,
alpha=alpha,
eps=eps,
centered=centered,
weight_decay=weight_decay,
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("momentum", 0)
group.setdefault("centered", False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("RMSprop does not support sparse gradients")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
state["square_avg"] = torch.zeros_like(
p.data, memory_format=torch.preserve_format
)
if group["momentum"] > 0:
state["momentum_buffer"] = torch.zeros_like(
p.data, memory_format=torch.preserve_format
)
if group["centered"]:
state["grad_avg"] = torch.zeros_like(
p.data, memory_format=torch.preserve_format
)
square_avg = state["square_avg"]
alpha = group["alpha"]
state["step"] += 1
if group["weight_decay"] != 0:
grad = grad.add(group["weight_decay"], p.data)
square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
if group["centered"]:
grad_avg = state["grad_avg"]
grad_avg.mul_(alpha).add_(1 - alpha, grad)
avg = (
square_avg.addcmul(-1, grad_avg, grad_avg)
.add_(group["eps"])
.sqrt_()
)
else:
avg = square_avg.add_(group["eps"]).sqrt_()
if group["momentum"] > 0:
buf = state["momentum_buffer"]
buf.mul_(group["momentum"]).addcdiv_(grad, avg)
p.data.add_(-group["lr"], buf)
else:
p.data.addcdiv_(-group["lr"], grad, avg)
return loss
@register_optimizer("rmsprop_tf")
class RMSPropTF(ClassyOptimizer):
def __init__(
self,
lr: float = 0.1,
momentum: float = 0,
weight_decay: float = 0,
alpha: float = 0.99,
eps: float = 1e-8,
centered: bool = False,
) -> None:
super().__init__()
self._lr = lr
self._momentum = momentum
self._weight_decay = weight_decay
self._alpha = alpha
self._eps = eps
self._centered = centered
def prepare(self, param_groups):
self.optimizer = RMSpropTFOptimizer(
param_groups,
lr=self._lr,
momentum=self._momentum,
weight_decay=self._weight_decay,
alpha=self._alpha,
eps=self._eps,
centered=self._centered,
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "RMSPropTF":
"""Instantiates a RMSPropTF from a configuration.
Args:
config: A configuration for a RMSPropTF.
See :func:`__init__` for parameters expected in the config.
Returns:
A RMSPropTF instance.
"""
# Default params
config.setdefault("lr", 0.1)
config.setdefault("momentum", 0.0)
config.setdefault("weight_decay", 0.0)
config.setdefault("alpha", 0.99)
config.setdefault("eps", 1e-8)
config.setdefault("centered", False)
for key in ["momentum", "alpha"]:
assert (
config[key] >= 0.0 and config[key] < 1.0 and type(config[key]) == float
), f"Config must contain a '{key}' in [0, 1) for RMSPropTF optimizer"
assert is_pos_float(
config["eps"]
), f"Config must contain a positive 'eps' for RMSPropTF optimizer"
assert isinstance(
config["centered"], bool
), "Config must contain a boolean 'centered' param for RMSPropTF optimizer"
return cls(
lr=config["lr"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
alpha=config["alpha"],
eps=config["eps"],
centered=config["centered"],
)
| ClassyVision-main | classy_vision/optim/rmsprop_tf.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import traceback
from pathlib import Path
from typing import Any, Dict
from classy_vision.generic.registry_utils import import_all_modules
from fvcore.common.param_scheduler import ParamScheduler
from .classy_vision_param_scheduler import ( # noqa F401
ClassyParamScheduler,
UpdateInterval,
)
FILE_ROOT = Path(__file__).parent
PARAM_SCHEDULER_REGISTRY = {}
PARAM_SCHEDULER_REGISTRY_TB = {}
def build_param_scheduler(config: Dict[str, Any]) -> ParamScheduler:
"""Builds a :class:`ParamScheduler` from a config.
This assumes a 'name' key in the config which is used to determine what
param scheduler class to instantiate. For instance, a config `{"name":
"my_scheduler", "foo": "bar"}` will find a class that was registered as
"my_scheduler" (see :func:`register_param_scheduler`) and call .from_config
on it."""
return PARAM_SCHEDULER_REGISTRY[config["name"]].from_config(config)
def register_param_scheduler(name, bypass_checks=False):
"""Registers a :class:`ParamScheduler` subclass.
This decorator allows Classy Vision to instantiate a subclass of
ParamScheduler from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ParamScheduler subclass that implements a `from_config` classmethod, like
this:
.. code-block:: python
@register_param_scheduler('my_scheduler')
class MyParamScheduler(ParamScheduler):
...
To instantiate a param scheduler from a configuration file, see
:func:`build_param_scheduler`."""
def register_param_scheduler_cls(cls):
if not bypass_checks:
if name in PARAM_SCHEDULER_REGISTRY:
msg = "Cannot register duplicate param scheduler ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, PARAM_SCHEDULER_REGISTRY_TB[name]))
if not issubclass(cls, ParamScheduler):
raise ValueError(
"Param Scheduler ({}: {}) must extend ParamScheduler".format(
name, cls.__name__
)
)
tb = "".join(traceback.format_stack())
PARAM_SCHEDULER_REGISTRY[name] = cls
PARAM_SCHEDULER_REGISTRY_TB[name] = tb
return cls
return register_param_scheduler_cls
# automatically import any Python files in the optim/param_scheduler/ directory
import_all_modules(FILE_ROOT, "classy_vision.optim.param_scheduler")
from .composite_scheduler import CompositeParamScheduler, IntervalScaling # isort:skip
from .fvcore_schedulers import (
ConstantParamScheduler,
CosineParamScheduler,
LinearParamScheduler,
MultiStepParamScheduler,
PolynomialDecayParamScheduler,
StepParamScheduler,
StepWithFixedGammaParamScheduler,
) # isort:skip
__all__ = [
"ParamScheduler",
"ClassyParamScheduler",
"CompositeParamScheduler",
"ConstantParamScheduler",
"CosineParamScheduler",
"LinearParamScheduler",
"MultiStepParamScheduler",
"PolynomialDecayParamScheduler",
"StepParamScheduler",
"UpdateInterval",
"IntervalScaling",
"StepWithFixedGammaParamScheduler",
"build_param_scheduler",
"register_param_scheduler",
]
| ClassyVision-main | classy_vision/optim/param_scheduler/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import auto, Enum
from typing import Any, Dict, Sequence, Union
from fvcore.common import param_scheduler
from . import build_param_scheduler, register_param_scheduler, UpdateInterval
class IntervalScaling(Enum):
RESCALED = auto()
FIXED = auto()
@register_param_scheduler("composite")
class CompositeParamScheduler(param_scheduler.CompositeParamScheduler):
__doc__ = param_scheduler.CompositeParamScheduler.__doc__
def __init__(
self,
schedulers: Sequence[param_scheduler.ParamScheduler],
lengths: Sequence[float],
interval_scaling: Sequence[Union[IntervalScaling, str]],
update_interval: UpdateInterval = UpdateInterval.STEP,
):
scaling_name = {
IntervalScaling.RESCALED: "rescaled",
IntervalScaling.FIXED: "fixed",
}
interval_scaling = [
scaling_name[s] if isinstance(s, IntervalScaling) else s
for s in interval_scaling
]
super().__init__(schedulers, lengths, interval_scaling)
self.update_interval = update_interval
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "CompositeParamScheduler":
"""Instantiates a CompositeParamScheduler from a configuration.
Args:
config: A configuration for a CompositeParamScheduler.
See :func:`__init__` for parameters expected in the config.
Returns:
A CompositeParamScheduler instance.
"""
assert (
"schedulers" in config and "lengths" in config
), "Composite scheduler needs both a list of schedulers and lengths"
interval_scaling = []
if "interval_scaling" in config:
assert len(config["schedulers"]) == len(
config["interval_scaling"]
), "Schedulers and interval scaling must be the same length"
for interval_scale in config["interval_scaling"]:
assert interval_scale in {
"fixed",
"rescaled",
}, "Choices for interval scaling are 'fixed' or 'rescaled'"
interval_scaling.append(IntervalScaling[interval_scale.upper()])
else:
interval_scaling = [IntervalScaling.RESCALED] * len(config["schedulers"])
if "num_epochs" in config: # Propagate value to intermediate schedulers
config["schedulers"] = [
dict(schedule, **{"num_epochs": config["num_epochs"]})
for schedule in config["schedulers"]
]
return cls(
schedulers=[
build_param_scheduler(scheduler) for scheduler in config["schedulers"]
],
lengths=config["lengths"],
update_interval=UpdateInterval.from_config(config, UpdateInterval.STEP),
interval_scaling=interval_scaling,
)
| ClassyVision-main | classy_vision/optim/param_scheduler/composite_scheduler.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Any, Dict
from classy_vision.generic.util import log_class_usage
from fvcore.common import param_scheduler
class UpdateInterval(Enum):
"""
Enum for specifying update frequency for scheduler.
Attributes:
EPOCH (str): Update param before each epoch
STEP (str): Update param before each optimizer step
"""
EPOCH = "epoch"
STEP = "step"
@classmethod
def from_config(
cls, config: Dict[str, Any], default: "UpdateInterval" = None
) -> "UpdateInterval":
"""Fetches the update interval from a config
Args:
config: The config for the parameter scheduler
default: The value to use if the config doesn't specify an update interval.
If not set, STEP is used.
"""
if default is None:
default = cls.STEP
if "update_interval" not in config:
return default
if config.get("update_interval").lower() not in ["step", "epoch"]:
raise ValueError("Choices for update interval are 'step' or 'epoch'")
return cls[config["update_interval"].upper()]
class ClassyParamScheduler(param_scheduler.ParamScheduler):
"""
Base class for Classy parameter schedulers.
Attributes:
update_interval: Specifies how often to update each parameter
(before each epoch or each batch)
"""
def __init__(self, update_interval: UpdateInterval):
"""
Constructor for ClassyParamScheduler
Args:
update_interval: Specifies the frequency of the param updates
"""
self.update_interval = update_interval
log_class_usage("ParamScheduler", self.__class__)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassyParamScheduler":
"""Instantiates a ClassyParamScheduler from a configuration.
Args:
config: A configuration for the ClassyParamScheduler.
Returns:
A ClassyParamScheduler instance.
"""
raise NotImplementedError
| ClassyVision-main | classy_vision/optim/param_scheduler/classy_vision_param_scheduler.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import inspect
from typing import Any, Dict
from fvcore.common import param_scheduler
from . import ClassyParamScheduler, register_param_scheduler, UpdateInterval
"""
The implementation of scheduler classes are moved to fvcore.
This file creates wrappers of the fvcore implementation by adding back
classyvision functionalities.
"""
def _create_classy_scheduler_class(base_class, register_name, default_update_interval):
"""
Add back the following functionalities to the fvcore schedulers:
1. Add `from_config` classmethod that constructs the scheduler from a dict
2. Add `update_interval` attribute
3. Add the class to the scheduler registry
"""
def from_config(cls, config: Dict[str, Any]) -> param_scheduler.ParamScheduler:
config = copy.copy(config)
assert register_name == config.pop("name")
update_interval = UpdateInterval.from_config(config, default_update_interval)
param_names = inspect.signature(base_class).parameters.keys()
# config might contain values that are not used by constructor
kwargs = {p: config[p] for p in param_names if p in config}
# This argument was renamed when moving to fvcore
if "num_updates" in param_names and "num_epochs" in config:
kwargs["num_updates"] = config["num_epochs"]
scheduler = cls(**kwargs)
scheduler.update_interval = update_interval
return scheduler
cls = type(
base_class.__name__,
(base_class, ClassyParamScheduler),
{
"from_config": classmethod(from_config),
"update_interval": default_update_interval,
},
)
if hasattr(base_class, "__doc__"):
cls.__doc__ = base_class.__doc__.replace("num_updates", "num_epochs")
register_param_scheduler(register_name)(cls)
return cls
ConstantParamScheduler = _create_classy_scheduler_class(
param_scheduler.ConstantParamScheduler,
"constant",
default_update_interval=UpdateInterval.EPOCH,
)
CosineParamScheduler = _create_classy_scheduler_class(
param_scheduler.CosineParamScheduler,
"cosine",
default_update_interval=UpdateInterval.STEP,
)
LinearParamScheduler = _create_classy_scheduler_class(
param_scheduler.LinearParamScheduler,
"linear",
default_update_interval=UpdateInterval.STEP,
)
MultiStepParamScheduler = _create_classy_scheduler_class(
param_scheduler.MultiStepParamScheduler,
"multistep",
default_update_interval=UpdateInterval.EPOCH,
)
PolynomialDecayParamScheduler = _create_classy_scheduler_class(
param_scheduler.PolynomialDecayParamScheduler,
"polynomial",
default_update_interval=UpdateInterval.STEP,
)
StepParamScheduler = _create_classy_scheduler_class(
param_scheduler.StepParamScheduler,
"step",
default_update_interval=UpdateInterval.EPOCH,
)
StepWithFixedGammaParamScheduler = _create_classy_scheduler_class(
param_scheduler.StepWithFixedGammaParamScheduler,
"step_with_fixed_gamma",
default_update_interval=UpdateInterval.STEP,
)
| ClassyVision-main | classy_vision/optim/param_scheduler/fvcore_schedulers.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Sequence
from typing import Any, Dict, List, Optional
import torch.nn as nn
from classy_vision.generic.util import is_pos_int
from classy_vision.heads import ClassyHead, register_head
class FullyConvolutionalLinear(nn.Module):
def __init__(self, dim_in, num_classes, act_func="softmax"):
super(FullyConvolutionalLinear, self).__init__()
# Perform FC in a fully convolutional manner. The FC layer will be
# initialized with a different std comparing to convolutional layers.
self.projection = nn.Linear(dim_in, num_classes, bias=True)
# Softmax for evaluation and testing.
if act_func == "softmax":
self.act = nn.Softmax(dim=4)
elif act_func == "sigmoid":
self.act = nn.Sigmoid()
elif act_func == "identity":
# for some tasks eg. regression, we don't want an activation function
self.act = nn.Identity()
else:
raise NotImplementedError(
"{} is not supported as an activation" "function.".format(act_func)
)
def forward(self, x):
# (N, C, T, H, W) -> (N, T, H, W, C).
x = x.permute((0, 2, 3, 4, 1))
x = self.projection(x)
# Performs fully convlutional inference.
if not self.training:
x = self.act(x)
x = x.mean([1, 2, 3])
x = x.flatten(start_dim=1)
return x
@register_head("fully_convolutional_linear")
class FullyConvolutionalLinearHead(ClassyHead):
"""
This head defines a 3d average pooling layer (:class:`torch.nn.AvgPool3d` or
:class:`torch.nn.AdaptiveAvgPool3d` if pool_size is None) followed by a fully
convolutional linear layer. This layer performs a fully-connected projection
during training, when the input size is 1x1x1.
It performs a convolutional projection during testing when the input size
is larger than 1x1x1.
"""
def __init__(
self,
unique_id: str,
num_classes: int,
in_plane: int,
pool_size: Optional[List[int]],
activation_func: str,
use_dropout: Optional[bool] = None,
dropout_ratio: float = 0.5,
):
"""
Constructor for FullyConvolutionalLinearHead.
Args:
unique_id: A unique identifier for the head. Multiple instances of
the same head might be attached to a model, and unique_id is used
to refer to them.
num_classes: Number of classes for the head.
in_plane: Input size for the fully connected layer.
pool_size: Optional kernel size for the 3d pooling layer. If None, use
:class:`torch.nn.AdaptiveAvgPool3d` with output size (1, 1, 1).
activation_func: activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
use_dropout: Whether to apply dropout after the pooling layer.
dropout_ratio: dropout ratio.
"""
super().__init__(unique_id, num_classes)
if pool_size is not None:
self.final_avgpool = nn.AvgPool3d(pool_size, stride=1)
else:
self.final_avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
if use_dropout:
self.dropout = nn.Dropout(p=dropout_ratio)
# we separate average pooling from the fully-convolutional linear projection
# because for multi-path models such as SlowFast model, the input can be
# more than 1 tesnor. In such case, we can define a new head to combine multiple
# tensors via concat or addition, do average pooling, but still reuse
# FullyConvolutionalLinear inside of it.
self.head_fcl = FullyConvolutionalLinear(
in_plane, num_classes, act_func=activation_func
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "FullyConvolutionalLinearHead":
"""Instantiates a FullyConvolutionalLinearHead from a configuration.
Args:
config: A configuration for a FullyConvolutionalLinearHead.
See :func:`__init__` for parameters expected in the config.
Returns:
A FullyConvolutionalLinearHead instance.
"""
required_args = ["in_plane", "num_classes"]
for arg in required_args:
assert arg in config, "argument %s is required" % arg
config.update({"activation_func": config.get("activation_func", "softmax")})
config.update({"use_dropout": config.get("use_dropout", False)})
pool_size = config.get("pool_size", None)
if pool_size is not None:
assert isinstance(pool_size, Sequence) and len(pool_size) == 3
for pool_size_dim in pool_size:
assert is_pos_int(pool_size_dim)
assert is_pos_int(config["in_plane"])
assert is_pos_int(config["num_classes"])
num_classes = config.get("num_classes", None)
in_plane = config["in_plane"]
return cls(
config["unique_id"],
num_classes,
in_plane,
pool_size,
config["activation_func"],
config["use_dropout"],
config.get("dropout_ratio", 0.5),
)
def forward(self, x):
out = self.final_avgpool(x)
if hasattr(self, "dropout"):
out = self.dropout(out)
out = self.head_fcl(out)
return out
| ClassyVision-main | classy_vision/heads/fully_convolutional_linear_head.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import traceback
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from .classy_head import ClassyHead
FILE_ROOT = Path(__file__).parent
HEAD_REGISTRY = {}
HEAD_CLASS_NAMES = set()
HEAD_REGISTRY_TB = {}
HEAD_CLASS_NAMES_TB = {}
def register_head(name, bypass_checks=False):
"""Registers a ClassyHead subclass.
This decorator allows Classy Vision to instantiate a subclass of
ClassyHead from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyHead subclass, like this:
.. code-block:: python
@register_head("my_head")
class MyHead(ClassyHead):
...
To instantiate a head from a configuration file, see
:func:`build_head`."""
def register_head_cls(cls):
if not bypass_checks:
if name in HEAD_REGISTRY:
msg = (
"Cannot register duplicate head ({}). Already registered at \n{}\n"
)
raise ValueError(msg.format(name, HEAD_REGISTRY_TB[name]))
if not issubclass(cls, ClassyHead):
raise ValueError(
"Head ({}: {}) must extend ClassyHead".format(name, cls.__name__)
)
if cls.__name__ in HEAD_CLASS_NAMES:
msg = (
"Cannot register head with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, HEAD_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
HEAD_REGISTRY[name] = cls
HEAD_CLASS_NAMES.add(cls.__name__)
HEAD_REGISTRY_TB[name] = tb
HEAD_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_head_cls
def build_head(config):
"""Builds a ClassyHead from a config.
This assumes a 'name' key in the config which is used to determine what
head class to instantiate. For instance, a config `{"name": "my_head",
"foo": "bar"}` will find a class that was registered as "my_head"
(see :func:`register_head`) and call .from_config on it."""
assert "name" in config, "Expect name in config"
assert "unique_id" in config, "Expect a global unique id in config"
assert config["name"] in HEAD_REGISTRY, "unknown head {}".format(config["name"])
name = config["name"]
head_config = copy.deepcopy(config)
del head_config["name"]
return HEAD_REGISTRY[name].from_config(head_config)
# automatically import any Python files in the heads/ directory
import_all_modules(FILE_ROOT, "classy_vision.heads")
from .fully_connected_head import FullyConnectedHead # isort:skip
from .fully_convolutional_linear_head import FullyConvolutionalLinearHead # isort:skip
from .identity_head import IdentityHead # isort:skip
from .vision_transformer_head import VisionTransformerHead # isort:skip
__all__ = [
"ClassyHead",
"FullyConnectedHead",
"FullyConvolutionalLinearHead",
"IdentityHead",
"VisionTransformerHead",
"build_head",
"register_head",
]
| ClassyVision-main | classy_vision/heads/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import torch.nn as nn
from classy_vision.generic.util import get_torch_version, is_pos_int
from classy_vision.heads import ClassyHead, register_head
NORMALIZE_L2 = "l2"
RELU_IN_PLACE = True
@register_head("fully_connected")
class FullyConnectedHead(ClassyHead):
"""This head defines a 2d average pooling layer
(:class:`torch.nn.AdaptiveAvgPool2d`) followed by a fully connected
layer (:class:`torch.nn.Linear`).
"""
def __init__(
self,
unique_id: str,
num_classes: Optional[int],
in_plane: int,
conv_planes: Optional[int] = None,
activation: Optional[nn.Module] = None,
zero_init_bias: bool = False,
normalize_inputs: Optional[str] = None,
):
"""Constructor for FullyConnectedHead
Args:
unique_id: A unique identifier for the head. Multiple instances of
the same head might be attached to a model, and unique_id is used
to refer to them.
num_classes: Number of classes for the head. If None, then the fully
connected layer is not applied.
in_plane: Input size for the fully connected layer.
conv_planes: If specified, applies a 1x1 convolutional layer to the input
before passing it to the average pooling layer. The convolution is also
followed by a BatchNorm and an activation.
activation: The activation to be applied after the convolutional layer.
Unused if `conv_planes` is not specified.
zero_init_bias: Zero initialize the bias
normalize_inputs: If specified, normalize the inputs after performing
average pooling using the specified method. Supports "l2" normalization.
"""
super().__init__(unique_id, num_classes)
assert num_classes is None or is_pos_int(num_classes)
assert is_pos_int(in_plane)
if conv_planes is not None and activation is None:
raise TypeError("activation cannot be None if conv_planes is specified")
if normalize_inputs is not None and normalize_inputs != NORMALIZE_L2:
raise ValueError(
f"Unsupported value for normalize_inputs: {normalize_inputs}"
)
self.conv = (
nn.Conv2d(in_plane, conv_planes, kernel_size=1, bias=False)
if conv_planes
else None
)
self.bn = nn.BatchNorm2d(conv_planes) if conv_planes else None
self.activation = activation
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = (
None
if num_classes is None
else nn.Linear(
in_plane if conv_planes is None else conv_planes, num_classes
)
)
self.normalize_inputs = normalize_inputs
if zero_init_bias:
self.fc.bias.data.zero_()
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "FullyConnectedHead":
"""Instantiates a FullyConnectedHead from a configuration.
Args:
config: A configuration for a FullyConnectedHead.
See :func:`__init__` for parameters expected in the config.
Returns:
A FullyConnectedHead instance.
"""
num_classes = config.get("num_classes", None)
in_plane = config["in_plane"]
silu = None if get_torch_version() < [1, 7] else nn.SiLU()
activation = {"relu": nn.ReLU(RELU_IN_PLACE), "silu": silu}[
config.get("activation", "relu")
]
if activation is None:
raise RuntimeError("SiLU activation is only supported since PyTorch 1.7")
return cls(
config["unique_id"],
num_classes,
in_plane,
conv_planes=config.get("conv_planes", None),
activation=activation,
zero_init_bias=config.get("zero_init_bias", False),
normalize_inputs=config.get("normalize_inputs", None),
)
def forward(self, x):
out = x
if self.conv is not None:
out = self.activation(self.bn(self.conv(x)))
out = self.avgpool(out)
out = out.flatten(start_dim=1)
if self.normalize_inputs is not None:
if self.normalize_inputs == NORMALIZE_L2:
out = nn.functional.normalize(out, p=2.0, dim=1)
if self.fc is not None:
out = self.fc(out)
return out
| ClassyVision-main | classy_vision/heads/fully_connected_head.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import torch.nn as nn
class ClassyHead(nn.Module):
"""
Base class for heads that can be attached to :class:`ClassyModel`.
A head is a regular :class:`torch.nn.Module` that can be attached to a
pretrained model. This enables a form of transfer learning: utilizing a
model trained for one dataset to extract features that can be used for
other problems. A head must be attached to a :class:`models.ClassyBlock`
within a :class:`models.ClassyModel`.
"""
def __init__(
self, unique_id: Optional[str] = None, num_classes: Optional[int] = None
):
"""
Constructs a ClassyHead.
Args:
unique_id: A unique identifier for the head. Multiple instances of
the same head might be attached to a model, and unique_id is used
to refer to them.
num_classes: Number of classes for the head.
"""
super().__init__()
self.unique_id = unique_id or self.__class__.__name__
self.num_classes = num_classes
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassyHead":
"""Instantiates a ClassyHead from a configuration.
Args:
config: A configuration for the ClassyHead.
Returns:
A ClassyHead instance.
"""
raise NotImplementedError
def forward(self, x):
"""
Performs inference on the head.
This is a regular PyTorch method, refer to :class:`torch.nn.Module` for
more details
"""
raise NotImplementedError
| ClassyVision-main | classy_vision/heads/classy_head.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Vision Transformer head implementation from https://arxiv.org/abs/2010.11929.
References:
https://github.com/google-research/vision_transformer
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import copy
from collections import OrderedDict
from typing import Optional
import torch.nn as nn
from classy_vision.heads import ClassyHead, register_head
from ..models.lecun_normal_init import lecun_normal_init
NORMALIZE_L2 = "l2"
@register_head("vision_transformer_head")
class VisionTransformerHead(ClassyHead):
def __init__(
self,
unique_id: str,
in_plane: int,
num_classes: Optional[int] = None,
hidden_dim: Optional[int] = None,
normalize_inputs: Optional[str] = None,
):
"""
Args:
unique_id: A unique identifier for the head
in_plane: Input size for the fully connected layer
num_classes: Number of output classes for the head
hidden_dim: If not None, a hidden layer with the specific dimension is added
normalize_inputs: If specified, normalize the inputs using the specified
method. Supports "l2" normalization.
"""
super().__init__(unique_id, num_classes)
if normalize_inputs is not None and normalize_inputs != NORMALIZE_L2:
raise ValueError(
f"Unsupported value for normalize_inputs: {normalize_inputs}"
)
if num_classes is None:
layers = []
elif hidden_dim is None:
layers = [("head", nn.Linear(in_plane, num_classes))]
else:
layers = [
("pre_logits", nn.Linear(in_plane, hidden_dim)),
("act", nn.Tanh()),
("head", nn.Linear(hidden_dim, num_classes)),
]
self.layers = nn.Sequential(OrderedDict(layers))
self.normalize_inputs = normalize_inputs
self.init_weights()
def init_weights(self):
if hasattr(self.layers, "pre_logits"):
lecun_normal_init(
self.layers.pre_logits.weight, fan_in=self.layers.pre_logits.in_features
)
nn.init.zeros_(self.layers.pre_logits.bias)
if hasattr(self.layers, "head"):
nn.init.zeros_(self.layers.head.weight)
nn.init.zeros_(self.layers.head.bias)
@classmethod
def from_config(cls, config):
config = copy.deepcopy(config)
return cls(**config)
def forward(self, x):
if self.normalize_inputs is not None:
if self.normalize_inputs == NORMALIZE_L2:
x = nn.functional.normalize(x, p=2.0, dim=1)
return self.layers(x)
| ClassyVision-main | classy_vision/heads/vision_transformer_head.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from classy_vision.heads import ClassyHead, register_head
@register_head("identity")
class IdentityHead(ClassyHead):
"""This head returns the input without changing it. This can
be attached to a model, if the output of the model is the
desired result.
"""
def forward(self, x):
return x
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "IdentityHead":
"""Instantiates a IdentityHead from a configuration.
Args:
config: A configuration for a IdentityHead.
See :func:`__init__` for parameters expected in the config.
Returns:
A IdentityHead instance.
"""
return cls(config["unique_id"])
| ClassyVision-main | classy_vision/heads/identity_head.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import types
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from classy_vision.generic.util import log_class_usage
from classy_vision.heads.classy_head import ClassyHead
from .classy_block import ClassyBlock
class _ClassyModelMeta(type):
"""Metaclass to return a ClassyModel instance wrapped by a ClassyModelWrapper."""
def __call__(cls, *args, **kwargs):
"""Override the __call__ function for the metaclass.
This is called when a new instance of a class with this class as its metaclass
is initialized. For example -
.. code-block:: python
class MyClass(metaclass=_ClassyModelMeta):
wrapper_cls = MyWrapper
my_class_instance = MyClass() # returned instance will be a MyWrapper
"""
classy_model = super().__call__(*args, **kwargs)
wrapper_cls = cls.wrapper_cls
if wrapper_cls is not None:
# wrap the ClassyModel instance with a wrapper class and return that instead
classy_model = wrapper_cls(classy_model)
return classy_model
class _ClassyModelMethod:
"""Class to override ClassyModel method calls to ensure the wrapper is returned.
This helps override calls like model.cuda() which return self, to return the
wrapper instead of the underlying classy_model.
"""
def __init__(self, wrapper, classy_method):
self.wrapper = wrapper
self.classy_method = classy_method
def __call__(self, *args, **kwargs):
ret_val = self.classy_method(*args, **kwargs)
if ret_val is self.wrapper.classy_model:
# if the method is returning the classy_model, return the wrapper instead
ret_val = self.wrapper
return ret_val
class ClassyModelWrapper:
"""Base ClassyModel wrapper class.
This class acts as a thin pass through wrapper which lets users modify the behavior
of ClassyModels, such as changing the return output of the forward() call.
This wrapper acts as a ClassyModel by itself and the underlying model can be
accessed by the `classy_model` attribute.
"""
# TODO: Make this torchscriptable by inheriting from nn.Module / ClassyModel
def __init__(self, classy_model):
self.classy_model = classy_model
def __getattr__(self, name):
if name != "classy_model" and hasattr(self, "classy_model"):
attr = getattr(self.classy_model, name)
if isinstance(attr, types.MethodType):
attr = _ClassyModelMethod(self, attr)
return attr
else:
return super().__getattr__(name)
def __setattr__(self, name, value):
# __setattr__ works differently from __getattr__ and is called even when the
# attribute is a method, like forward.
if name not in ["classy_model", "forward"] and hasattr(self, "classy_model"):
setattr(self.classy_model, name, value)
else:
super().__setattr__(name, value)
def __delattr__(self, name):
if name != "classy_model" and hasattr(self, "classy_model"):
delattr(self.classy_model, name)
else:
return super().__delattr__(name)
def forward(self, *args, **kwargs):
return self.classy_model(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def __repr__(self):
return f"Classy {type(self.classy_model)}:\n{self.classy_model.__repr__()}"
@property
def __class__(self):
return self.classy_model.__class__
class ClassyModelHeadExecutorWrapper(ClassyModelWrapper):
"""Wrapper which changes the forward to also execute and return head output."""
def forward(self, *args, **kwargs):
out = self.classy_model(*args, **kwargs)
if len(self._heads) == 0:
return out
# heads have been attached to the model, return their output instead
head_outputs = self.execute_heads()
if len(head_outputs) == 1:
return list(head_outputs.values())[0]
else:
return head_outputs
class ClassyModel(nn.Module, metaclass=_ClassyModelMeta):
"""Base class for models in classy vision.
A model refers either to a specific architecture (e.g. ResNet50) or a
family of architectures (e.g. ResNet). Models can take arguments in the
constructor in order to configure different behavior (e.g.
hyperparameters). Classy Models must implement :func:`from_config` in
order to allow instantiation from a configuration file. Like regular
PyTorch models, Classy Models must also implement :func:`forward`, where
the bulk of the inference logic lives.
Classy Models also have some advanced functionality for production
fine-tuning systems. For example, we allow users to train a trunk
model and then attach heads to the model via the attachable
blocks. Making your model support the trunk-heads paradigm is
completely optional.
NOTE: Advanced users can modify the behavior of their implemented models by
specifying the `wrapper_cls` class attribute, which should be a class
derived from :class:`ClassyModelWrapper` (see the documentation for that class
for more information). Users can set it to `None` to skip wrapping their model
and to make their model torchscriptable. This is set to
:class:`ClassyModelHeadExecutorWrapper` by default.
"""
wrapper_cls = ClassyModelHeadExecutorWrapper
_attachable_block_names: List[str]
__jit_unused_properties__ = ["attachable_block_names", "head_outputs"]
def __init__(self):
"""Constructor for ClassyModel."""
super().__init__()
self._attachable_blocks = {}
self._attachable_block_names = []
self._heads = nn.ModuleDict()
self._head_outputs = {}
log_class_usage("Model", self.__class__)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassyModel":
"""Instantiates a ClassyModel from a configuration.
Args:
config: A configuration for the ClassyModel.
Returns:
A ClassyModel instance.
"""
raise NotImplementedError
@classmethod
def from_model(
cls,
model: nn.Module,
input_shape: Optional[Tuple] = None,
model_depth: Optional[int] = None,
):
"""Converts an :class:`nn.Module` to a `ClassyModel`.
Args:
model: The model to convert
For the remaining args, look at the corresponding properties of ClassyModel
Returns:
A ClassyModel instance.
"""
return _ClassyModelAdapter(
model, input_shape=input_shape, model_depth=model_depth
)
@classmethod
def from_checkpoint(cls, checkpoint):
from . import build_model
model = build_model(checkpoint["input_args"]["config"]["model"])
model.set_classy_state(checkpoint["classy_state_dict"]["base_model"])
return model
def get_classy_state(self, deep_copy=False):
"""Get the state of the ClassyModel.
The returned state is used for checkpointing.
NOTE: For advanced users, the structure of the returned dict is -
`{"model": {"trunk": trunk_state, "heads": heads_state}}`.
The trunk state is the state of the model when no heads are attached.
Args:
deep_copy: If True, creates a deep copy of the state Dict. Otherwise, the
returned Dict's state will be tied to the object's.
Returns:
A state dictionary containing the state of the model.
"""
attached_heads = self.get_heads()
# clear heads to get the state of the model without any heads, which we refer to
# as the trunk state. If the model doesn't have heads attached, all of the
# model's state lives in the trunk.
self.clear_heads()
trunk_state_dict = self.state_dict()
self.set_heads(attached_heads)
head_state_dict = {}
for block, heads in attached_heads.items():
head_state_dict[block] = {
head.unique_id: head.state_dict() for head in heads
}
model_state_dict = {
"model": {"trunk": trunk_state_dict, "heads": head_state_dict}
}
if deep_copy:
model_state_dict = copy.deepcopy(model_state_dict)
return model_state_dict
def load_head_states(self, state, strict=True):
"""Load only the state (weights) of the heads.
For a trunk-heads model, this function allows the user to
only update the head state of the model. Useful for attaching
fine-tuned heads to a pre-trained trunk.
Args:
state (Dict): Contains the classy model state under key "model"
"""
for block_name, head_states in state["model"]["heads"].items():
for head_name, head_state in head_states.items():
self._heads[block_name][head_name].load_state_dict(head_state, strict)
def set_classy_state(self, state, strict=True):
"""Set the state of the ClassyModel.
Args:
state_dict: The state dictionary. Must be the output of a call to
:func:`get_classy_state`.
This is used to load the state of the model from a checkpoint.
"""
# load the state for heads
self.load_head_states(state, strict)
# clear the heads to set the trunk's state. This is done because when heads are
# attached to modules, we wrap them by ClassyBlocks, thereby changing the
# structure of the model and its state dict. So, the trunk state is always
# fetched / set when there are no blocks attached.
attached_heads = self.get_heads()
self.clear_heads()
self.load_state_dict(state["model"]["trunk"], strict)
# set the heads back again
self.set_heads(attached_heads)
def forward(self, x):
"""
Perform computation of blocks in the order define in get_blocks.
"""
raise NotImplementedError
def extract_features(self, x):
"""
Extract features from the model.
Derived classes can implement this method to extract the features before
applying the final fully connected layer.
"""
return self.forward(x)
def _build_attachable_block(self, name, module):
"""
Add a wrapper to the module to allow to attach heads to the module.
"""
if name in self._attachable_blocks:
raise ValueError("Found duplicated block name {}".format(name))
block = ClassyBlock(name, module)
self._attachable_blocks[name] = block
self._attachable_block_names.append(name)
return block
@property
def attachable_block_names(self):
"""
Return names of all attachable blocks.
"""
return self._attachable_block_names
def clear_heads(self):
# clear all existing heads
self._heads.clear()
self._head_outputs.clear()
self._strip_classy_blocks(self)
self._attachable_blocks = {}
self._attachable_block_names = []
def _strip_classy_blocks(self, module):
for name, child_module in module.named_children():
if isinstance(child_module, ClassyBlock):
module.add_module(name, child_module.wrapped_module())
self._strip_classy_blocks(child_module)
def _make_module_attachable(self, module, module_name):
found = False
for name, child_module in module.named_children():
if name == module_name:
module.add_module(
name, self._build_attachable_block(name, child_module)
)
found = True
# do not exit - we will check all possible modules and raise an
# exception if there are duplicates
found_in_child = self._make_module_attachable(child_module, module_name)
found = found or found_in_child
return found
def set_heads(self, heads: Dict[str, List[ClassyHead]]):
"""Attach all the heads to corresponding blocks.
A head is expected to be a ClassyHead object. For more
details, see :class:`classy_vision.heads.ClassyHead`.
Args:
heads (Dict): a mapping between attachable block name
and a list of heads attached to that block. For
example, if you have two different teams that want to
attach two different heads for downstream classifiers to
the 15th block, then they would use:
.. code-block:: python
heads = {"block15":
[classifier_head1, classifier_head2]
}
"""
self.clear_heads()
head_ids = set()
for block_name, block_heads in heads.items():
if not self._make_module_attachable(self, block_name):
raise KeyError(f"{block_name} not found in the model")
for head in block_heads:
if head.unique_id in head_ids:
raise ValueError("head id {} already exists".format(head.unique_id))
head_ids.add(head.unique_id)
self._heads[block_name] = nn.ModuleDict(
{head.unique_id: head for head in block_heads}
)
def get_heads(self):
"""Returns the heads on the model
Function returns the heads a dictionary of block names to
`nn.Modules <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_
attached to that block.
"""
return {
block_name: list(heads.values())
for block_name, heads in self._heads.items()
}
@property
def head_outputs(self):
"""Return outputs of all heads in the format of Dict[head_id, output]
Head outputs are cached during a forward pass.
"""
return self._head_outputs.copy()
def get_block_outputs(self) -> Dict[str, torch.Tensor]:
outputs = {}
for name, block in self._attachable_blocks.items():
outputs[name] = block.output
return outputs
def execute_heads(self) -> Dict[str, torch.Tensor]:
block_outs = self.get_block_outputs()
outputs = {}
for block_name, heads in self._heads.items():
for head in heads.values():
outputs[head.unique_id] = head(block_outs[block_name])
self._head_outputs = outputs
return outputs
@property
def input_shape(self):
"""Returns the input shape that the model can accept, excluding the batch dimension.
By default it returns (3, 224, 224).
"""
return (3, 224, 224)
class _ClassyModelAdapter(ClassyModel):
"""
Class which adapts an `nn.Module <https://pytorch.org/docs/stable/
nn.html#torch.nn.Module>`_ to a ClassyModel by wrapping the model.
The only required argument is the model, the additional args are needed
to get some additional capabilities from Classy Vision to work.
"""
def __init__(
self,
model: nn.Module,
input_shape: Optional[Tuple] = None,
model_depth: Optional[int] = None,
):
super().__init__()
self.model = model
self._input_shape = input_shape
self._model_depth = model_depth
def forward(self, x):
return self.model(x)
def extract_features(self, x):
if hasattr(self.model, "extract_features"):
return self.model.extract_features(x)
return super().extract_features(x)
@property
def input_shape(self):
if self._input_shape is not None:
return self._input_shape
return super().input_shape
| ClassyVision-main | classy_vision/models/classy_model.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch.nn as nn
def r2plus1_unit(
dim_in,
dim_out,
temporal_stride,
spatial_stride,
groups,
inplace_relu,
bn_eps,
bn_mmt,
dim_mid=None,
):
"""
Implementation of `R(2+1)D unit <https://arxiv.org/abs/1711.11248>`_.
Decompose one 3D conv into one 2D spatial conv and one 1D temporal conv.
Choose the middle dimensionality so that the total No. of parameters
in 2D spatial conv and 1D temporal conv is unchanged.
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temporal_stride (int): the temporal stride of the bottleneck.
spatial_stride (int): the spatial_stride of the bottleneck.
groups (int): number of groups for the convolution.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
bn_eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dim_mid (Optional[int]): If not None, use the provided channel dimension
for the output of the 2D spatial conv. If None, compute the output
channel dimension of the 2D spatial conv so that the total No. of
model parameters remains unchanged.
"""
if dim_mid is None:
dim_mid = int(dim_out * dim_in * 3 * 3 * 3 / (dim_in * 3 * 3 + dim_out * 3))
logging.info(
"dim_in: %d, dim_out: %d. Set dim_mid to %d" % (dim_in, dim_out, dim_mid)
)
# 1x3x3 group conv, BN, ReLU
conv_middle = nn.Conv3d(
dim_in,
dim_mid,
[1, 3, 3], # kernel
stride=[1, spatial_stride, spatial_stride],
padding=[0, 1, 1],
groups=groups,
bias=False,
)
conv_middle_bn = nn.BatchNorm3d(dim_mid, eps=bn_eps, momentum=bn_mmt)
conv_middle_relu = nn.ReLU(inplace=inplace_relu)
# 3x1x1 group conv
conv = nn.Conv3d(
dim_mid,
dim_out,
[3, 1, 1], # kernel
stride=[temporal_stride, 1, 1],
padding=[1, 0, 0],
groups=groups,
bias=False,
)
return nn.Sequential(conv_middle, conv_middle_bn, conv_middle_relu, conv)
| ClassyVision-main | classy_vision/models/r2plus1_util.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import warnings
import torch
import torch.nn as nn
from classy_vision.generic.util import get_torch_version
from torch import Tensor
def lecun_normal_init(tensor, fan_in):
if get_torch_version() >= [1, 7]:
trunc_normal_ = nn.init.trunc_normal_
else:
def trunc_normal_(
tensor: Tensor,
mean: float = 0.0,
std: float = 1.0,
a: float = -2.0,
b: float = 2.0,
) -> Tensor:
# code copied from https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
# commit: e9b369c
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
trunc_normal_(tensor, std=math.sqrt(1 / fan_in))
| ClassyVision-main | classy_vision/models/lecun_normal_init.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import math
from typing import Any, Dict
import numpy as np
import torch.nn as nn
from classy_vision.models import register_model
from .anynet import (
ActivationType,
AnyNet,
AnyNetParams,
BlockType,
RELU_IN_PLACE,
StemType,
)
def _quantize_float(f, q):
"""Converts a float to closest non-zero int divisible by q."""
return int(round(f / q) * q)
def _adjust_widths_groups_compatibilty(stage_widths, bottleneck_ratios, group_widths):
"""Adjusts the compatibility of widths and groups,
depending on the bottleneck ratio."""
# Compute all widths for the current settings
widths = [int(w * b) for w, b in zip(stage_widths, bottleneck_ratios)]
groud_widths_min = [min(g, w_bot) for g, w_bot in zip(group_widths, widths)]
# Compute the adjusted widths so that stage and group widths fit
ws_bot = [_quantize_float(w_bot, g) for w_bot, g in zip(widths, groud_widths_min)]
stage_widths = [int(w_bot / b) for w_bot, b in zip(ws_bot, bottleneck_ratios)]
return stage_widths, groud_widths_min
class RegNetParams(AnyNetParams):
def __init__(
self,
depth: int,
w_0: int,
w_a: float,
w_m: float,
group_width: int,
bottleneck_multiplier: float = 1.0,
stem_type: StemType = StemType.SIMPLE_STEM_IN,
stem_width: int = 32,
block_type: BlockType = BlockType.RES_BOTTLENECK_BLOCK,
activation: ActivationType = ActivationType.RELU,
use_se: bool = True,
se_ratio: float = 0.25,
bn_epsilon: float = 1e-05,
bn_momentum: bool = 0.1,
):
assert (
w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % 8 == 0
), "Invalid RegNet settings"
self.depth = depth
self.w_0 = w_0
self.w_a = w_a
self.w_m = w_m
self.group_width = group_width
self.bottleneck_multiplier = bottleneck_multiplier
self.stem_type = stem_type
self.block_type = block_type
self.activation = activation
self.stem_width = stem_width
self.use_se = use_se
self.se_ratio = se_ratio if use_se else None
self.bn_epsilon = bn_epsilon
self.bn_momentum = bn_momentum
self.relu_in_place = RELU_IN_PLACE
def get_expanded_params(self):
"""Programatically compute all the per-block settings,
given the RegNet parameters.
The first step is to compute the quantized linear block parameters,
in log space. Key parameters are:
- `w_a` is the width progression slope
- `w_0` is the initial width
- `w_m` is the width stepping in the log space
In other terms
`log(block_width) = log(w_0) + w_m * block_capacity`,
with `bock_capacity` ramping up following the w_0 and w_a params.
This block width is finally quantized to multiples of 8.
The second step is to compute the parameters per stage,
taking into account the skip connection and the final 1x1 convolutions.
We use the fact that the output width is constant within a stage
"""
QUANT = 8
STRIDE = 2
# Compute the block widths. Each stage has one unique block width
widths_cont = np.arange(self.depth) * self.w_a + self.w_0
block_capacity = np.round(np.log(widths_cont / self.w_0) / np.log(self.w_m))
block_widths = (
np.round(np.divide(self.w_0 * np.power(self.w_m, block_capacity), QUANT))
* QUANT
)
num_stages = len(np.unique(block_widths))
block_widths = block_widths.astype(int).tolist()
# Convert to per stage parameters
split_helper = zip(
block_widths + [0],
[0] + block_widths,
block_widths + [0],
[0] + block_widths,
)
splits = [w != wp or r != rp for w, wp, r, rp in split_helper]
stage_widths = [w for w, t in zip(block_widths, splits[:-1]) if t]
stage_depths = np.diff([d for d, t in enumerate(splits) if t]).tolist()
strides = [STRIDE] * num_stages
bottleneck_multipliers = [self.bottleneck_multiplier] * num_stages
group_widths = [self.group_width] * num_stages
# Adjust the compatibility of stage widths and group widths
stage_widths, group_widths = _adjust_widths_groups_compatibilty(
stage_widths, bottleneck_multipliers, group_widths
)
return zip(
stage_widths, strides, stage_depths, group_widths, bottleneck_multipliers
)
@register_model("regnet")
class RegNet(AnyNet):
"""Implementation of RegNet, a particular form of AnyNets.
See https://arxiv.org/abs/2003.13678 for introduction to RegNets, and details about
RegNetX and RegNetY models.
See https://arxiv.org/abs/2103.06877 for details about RegNetZ models.
"""
def __init__(self, params: RegNetParams):
super().__init__(params)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "RegNet":
"""Instantiates a RegNet from a configuration.
Args:
config: A configuration for a RegNet.
See `RegNetParams` for parameters expected in the config.
Returns:
A RegNet instance.
"""
params = RegNetParams(
depth=config["depth"],
w_0=config["w_0"],
w_a=config["w_a"],
w_m=config["w_m"],
group_width=config["group_width"],
bottleneck_multiplier=config.get("bottleneck_multiplier", 1.0),
stem_type=StemType[config.get("stem_type", "simple_stem_in").upper()],
stem_width=config.get("stem_width", 32),
block_type=BlockType[
config.get("block_type", "res_bottleneck_block").upper()
],
activation=ActivationType[config.get("activation", "relu").upper()],
use_se=config.get("use_se", True),
se_ratio=config.get("se_ratio", 0.25),
bn_epsilon=config.get("bn_epsilon", 1e-05),
bn_momentum=config.get("bn_momentum", 0.1),
)
return cls(params)
def forward(self, x, *args, **kwargs):
x = self.stem(x)
x = self.trunk_output(x)
return x
def init_weights(self):
# Performs ResNet-style weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
# Register some "classic" RegNets
class _RegNet(RegNet):
def __init__(self, params: RegNetParams):
super().__init__(params)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "RegNet":
config = copy.deepcopy(config)
config.pop("name")
if "heads" in config:
config.pop("heads")
return cls(**config)
@register_model("regnet_y_400mf")
class RegNetY400mf(_RegNet):
def __init__(self, **kwargs):
# Output size: 440 feature maps
super().__init__(
RegNetParams(depth=16, w_0=48, w_a=27.89, w_m=2.09, group_width=8, **kwargs)
)
@register_model("regnet_y_800mf")
class RegNetY800mf(_RegNet):
def __init__(self, **kwargs):
# Output size: 768 feature maps
super().__init__(
RegNetParams(depth=14, w_0=56, w_a=38.84, w_m=2.4, group_width=16, **kwargs)
)
@register_model("regnet_y_1.6gf")
class RegNetY1_6gf(_RegNet):
def __init__(self, **kwargs):
# Output size: 888 feature maps
super().__init__(
RegNetParams(
depth=27, w_0=48, w_a=20.71, w_m=2.65, group_width=24, **kwargs
)
)
@register_model("regnet_y_3.2gf")
class RegNetY3_2gf(_RegNet):
def __init__(self, **kwargs):
# Output size: 1512 feature maps
super().__init__(
RegNetParams(
depth=21, w_0=80, w_a=42.63, w_m=2.66, group_width=24, **kwargs
)
)
@register_model("regnet_y_8gf")
class RegNetY8gf(_RegNet):
def __init__(self, **kwargs):
# Output size: 2016 feature maps
super().__init__(
RegNetParams(
depth=17, w_0=192, w_a=76.82, w_m=2.19, group_width=56, **kwargs
)
)
@register_model("regnet_y_16gf")
class RegNetY16gf(_RegNet):
def __init__(self, **kwargs):
# Output size: 3024 feature maps
super().__init__(
RegNetParams(
depth=18, w_0=200, w_a=106.23, w_m=2.48, group_width=112, **kwargs
)
)
@register_model("regnet_y_32gf")
class RegNetY32gf(_RegNet):
def __init__(self, **kwargs):
# Output size: 3712 feature maps
super().__init__(
RegNetParams(
depth=20, w_0=232, w_a=115.89, w_m=2.53, group_width=232, **kwargs
)
)
@register_model("regnet_x_400mf")
class RegNetX400mf(_RegNet):
def __init__(self, **kwargs):
super().__init__(
RegNetParams(
depth=22,
w_0=24,
w_a=24.48,
w_m=2.54,
group_width=16,
use_se=False,
**kwargs,
)
)
@register_model("regnet_x_800mf")
class RegNetX800mf(_RegNet):
def __init__(self, **kwargs):
super().__init__(
RegNetParams(
depth=16,
w_0=56,
w_a=35.73,
w_m=2.28,
group_width=16,
use_se=False,
**kwargs,
)
)
@register_model("regnet_x_1.6gf")
class RegNetX1_6gf(_RegNet):
def __init__(self, **kwargs):
super().__init__(
RegNetParams(
depth=18,
w_0=80,
w_a=34.01,
w_m=2.25,
group_width=24,
use_se=False,
**kwargs,
)
)
@register_model("regnet_x_3.2gf")
class RegNetX3_2gf(_RegNet):
def __init__(self, **kwargs):
super().__init__(
RegNetParams(
depth=25,
w_0=88,
w_a=26.31,
w_m=2.25,
group_width=48,
use_se=False,
**kwargs,
)
)
@register_model("regnet_x_8gf")
class RegNetX8gf(_RegNet):
def __init__(self, **kwargs):
super().__init__(
RegNetParams(
depth=23,
w_0=80,
w_a=49.56,
w_m=2.88,
group_width=120,
use_se=False,
**kwargs,
)
)
@register_model("regnet_x_16gf")
class RegNetX16gf(_RegNet):
def __init__(self, **kwargs):
super().__init__(
RegNetParams(
depth=22,
w_0=216,
w_a=55.59,
w_m=2.1,
group_width=128,
use_se=False,
**kwargs,
)
)
@register_model("regnet_x_32gf")
class RegNetX32gf(_RegNet):
def __init__(self, **kwargs):
super().__init__(
RegNetParams(
depth=23,
w_0=320,
w_a=69.86,
w_m=2.0,
group_width=168,
use_se=False,
**kwargs,
)
)
# note that RegNetZ models are trained with a convolutional head, i.e. the
# fully_connected ClassyHead with conv_planes > 0.
@register_model("regnet_z_500mf")
class RegNetZ500mf(_RegNet):
def __init__(self, **kwargs):
super().__init__(
RegNetParams(
depth=21,
w_0=16,
w_a=10.7,
w_m=2.51,
group_width=4,
bottleneck_multiplier=4.0,
block_type=BlockType.RES_BOTTLENECK_LINEAR_BLOCK,
activation=ActivationType.SILU,
**kwargs,
)
)
# this is supposed to be trained with a resolution of 256x256
@register_model("regnet_z_4gf")
class RegNetZ4gf(_RegNet):
def __init__(self, **kwargs):
super().__init__(
RegNetParams(
depth=28,
w_0=48,
w_a=14.5,
w_m=2.226,
group_width=8,
bottleneck_multiplier=4.0,
block_type=BlockType.RES_BOTTLENECK_LINEAR_BLOCK,
activation=ActivationType.SILU,
**kwargs,
)
)
# -----------------------------------------------------------------------------------
# The following models were not part of the original publication,
# (https://arxiv.org/abs/2003.13678v1), but are larger versions of the
# published models, obtained in the same manner.
@register_model("regnet_y_64gf")
class RegNetY64gf(_RegNet):
def __init__(self, **kwargs):
# Output size: 2976 feature maps
super().__init__(
RegNetParams(
depth=20, w_0=352, w_a=147.48, w_m=2.4, group_width=328, **kwargs
)
)
@register_model("regnet_y_128gf")
class RegNetY128gf(_RegNet):
def __init__(self, **kwargs):
# Output size: 7392 feature maps
super().__init__(
RegNetParams(
depth=27, w_0=456, w_a=160.83, w_m=2.52, group_width=264, **kwargs
)
)
@register_model("regnet_y_256gf")
class RegNetY256gf(_RegNet):
def __init__(self, **kwargs):
# Output size: 5088 feature maps
super().__init__(
RegNetParams(
depth=27, w_0=640, w_a=124.47, w_m=2.04, group_width=848, **kwargs
)
)
| ClassyVision-main | classy_vision/models/regnet.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import OrderedDict
from enum import auto, Enum
from typing import Any, Dict, Optional, Sequence
import torch.nn as nn
from classy_vision.generic.util import get_torch_version
from classy_vision.models import ClassyModel, register_model
from classy_vision.models.squeeze_and_excitation_layer import SqueezeAndExcitationLayer
RELU_IN_PLACE = True
# The different possible blocks
class BlockType(Enum):
VANILLA_BLOCK = auto()
RES_BASIC_BLOCK = auto()
RES_BOTTLENECK_BLOCK = auto()
RES_BOTTLENECK_LINEAR_BLOCK = auto()
# The different possible Stems
class StemType(Enum):
RES_STEM_CIFAR = auto()
RES_STEM_IN = auto()
SIMPLE_STEM_IN = auto()
# The different possible activations
class ActivationType(Enum):
RELU = auto()
SILU = auto()
class BasicTransform(nn.Sequential):
"""Basic transformation: [3x3 conv, BN, Relu] x2."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
bn_epsilon: float,
bn_momentum: float,
activation: nn.Module,
):
super().__init__()
self.a = nn.Sequential(
nn.Conv2d(width_in, width_out, 3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(width_out, eps=bn_epsilon, momentum=bn_momentum),
activation,
nn.Conv2d(width_out, width_out, 3, stride=1, padding=1, bias=False),
)
self.final_bn = nn.BatchNorm2d(width_out, eps=bn_epsilon, momentum=bn_momentum)
self.depth = 2
class ResStemCifar(nn.Sequential):
"""ResNet stem for CIFAR: 3x3, BN, ReLU."""
def __init__(
self,
width_in: int,
width_out: int,
bn_epsilon: float,
bn_momentum: float,
activation: nn.Module,
):
super().__init__()
self.stem = nn.Sequential(
nn.Conv2d(width_in, width_out, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(width_out, eps=bn_epsilon, momentum=bn_momentum),
activation,
)
self.depth = 2
class ResStemIN(nn.Sequential):
"""ResNet stem for ImageNet: 7x7, BN, ReLU, MaxPool."""
def __init__(
self,
width_in: int,
width_out: int,
bn_epsilon: float,
bn_momentum: float,
activation: nn.Module,
):
super().__init__()
self.stem = nn.Sequential(
nn.Conv2d(width_in, width_out, 7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(width_out, eps=bn_epsilon, momentum=bn_momentum),
activation,
nn.MaxPool2d(3, stride=2, padding=1),
)
self.depth = 3
class SimpleStemIN(nn.Sequential):
"""Simple stem for ImageNet: 3x3, BN, ReLU."""
def __init__(
self,
width_in: int,
width_out: int,
bn_epsilon: float,
bn_momentum: float,
activation: nn.Module,
):
super().__init__()
self.stem = nn.Sequential(
nn.Conv2d(width_in, width_out, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(width_out, eps=bn_epsilon, momentum=bn_momentum),
activation,
)
self.depth = 2
class VanillaBlock(nn.Sequential):
"""Vanilla block: [3x3 conv, BN, Relu] x2."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
bn_epsilon: float,
bn_momentum: float,
activation: nn.Module,
*args,
**kwargs,
):
super().__init__()
self.a = nn.Sequential(
nn.Conv2d(width_in, width_out, 3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(width_out, eps=bn_epsilon, momentum=bn_momentum),
activation,
)
self.b = nn.Sequential(
nn.Conv2d(width_out, width_out, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(width_out, eps=bn_epsilon, momentum=bn_momentum),
activation,
)
self.depth = 2
class ResBasicBlock(nn.Module):
"""Residual basic block: x + F(x), F = basic transform."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
bn_epsilon: float,
bn_momentum: float,
activation: nn.Module,
*args,
**kwargs,
):
super().__init__()
self.proj_block = (width_in != width_out) or (stride != 1)
if self.proj_block:
self.proj = nn.Conv2d(
width_in, width_out, 1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(width_out, eps=bn_epsilon, momentum=bn_momentum)
self.f = BasicTransform(
width_in, width_out, stride, bn_epsilon, bn_momentum, activation
)
self.activation = activation
# The projection and transform happen in parallel,
# and ReLU is not counted with respect to depth
self.depth = self.f.depth
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
return self.activation(x)
class BottleneckTransform(nn.Sequential):
"""Bottleneck transformation: 1x1, 3x3 [+SE], 1x1."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
bn_epsilon: float,
bn_momentum: float,
activation: nn.Module,
group_width: int,
bottleneck_multiplier: float,
se_ratio: Optional[float],
):
super().__init__()
w_b = int(round(width_out * bottleneck_multiplier))
g = w_b // group_width
self.a = nn.Sequential(
nn.Conv2d(width_in, w_b, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(w_b, eps=bn_epsilon, momentum=bn_momentum),
activation,
)
self.b = nn.Sequential(
nn.Conv2d(w_b, w_b, 3, stride=stride, padding=1, groups=g, bias=False),
nn.BatchNorm2d(w_b, eps=bn_epsilon, momentum=bn_momentum),
activation,
)
if se_ratio:
# The SE reduction ratio is defined with respect to the
# beginning of the block
width_se_out = int(round(se_ratio * width_in))
self.se = SqueezeAndExcitationLayer(
in_planes=w_b,
reduction_ratio=None,
reduced_planes=width_se_out,
activation=activation,
)
self.c = nn.Conv2d(w_b, width_out, 1, stride=1, padding=0, bias=False)
self.final_bn = nn.BatchNorm2d(width_out, eps=bn_epsilon, momentum=bn_momentum)
self.depth = 3 if not se_ratio else 4
class ResBottleneckBlock(nn.Module):
"""Residual bottleneck block: x + F(x), F = bottleneck transform."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
bn_epsilon: float,
bn_momentum: float,
activation: nn.Module,
group_width: int = 1,
bottleneck_multiplier: float = 1.0,
se_ratio: Optional[float] = None,
):
super().__init__()
# Use skip connection with projection if shape changes
self.proj_block = (width_in != width_out) or (stride != 1)
if self.proj_block:
self.proj = nn.Conv2d(
width_in, width_out, 1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(width_out, eps=bn_epsilon, momentum=bn_momentum)
self.f = BottleneckTransform(
width_in,
width_out,
stride,
bn_epsilon,
bn_momentum,
activation,
group_width,
bottleneck_multiplier,
se_ratio,
)
self.activation = activation
# The projection and transform happen in parallel,
# and activation is not counted with respect to depth
self.depth = self.f.depth
def forward(self, x, *args):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
return self.activation(x)
class ResBottleneckLinearBlock(nn.Module):
"""Residual linear bottleneck block: x + F(x), F = bottleneck transform."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
bn_epsilon: float,
bn_momentum: float,
activation: nn.Module,
group_width: int = 1,
bottleneck_multiplier: float = 4.0,
se_ratio: Optional[float] = None,
):
super().__init__()
self.has_skip = (width_in == width_out) and (stride == 1)
self.f = BottleneckTransform(
width_in,
width_out,
stride,
bn_epsilon,
bn_momentum,
activation,
group_width,
bottleneck_multiplier,
se_ratio,
)
self.depth = self.f.depth
def forward(self, x):
return x + self.f(x) if self.has_skip else self.f(x)
class AnyStage(nn.Sequential):
"""AnyNet stage (sequence of blocks w/ the same output shape)."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
depth: int,
block_constructor: nn.Module,
activation: nn.Module,
group_width: int,
bottleneck_multiplier: float,
params: "AnyNetParams",
stage_index: int = 0,
):
super().__init__()
self.stage_depth = 0
for i in range(depth):
block = block_constructor(
width_in if i == 0 else width_out,
width_out,
stride if i == 0 else 1,
params.bn_epsilon,
params.bn_momentum,
activation,
group_width,
bottleneck_multiplier,
params.se_ratio,
)
self.stage_depth += block.depth
self.add_module(f"block{stage_index}-{i}", block)
class AnyNetParams:
def __init__(
self,
depths: Sequence[int],
widths: Sequence[int],
group_widths: Sequence[int],
bottleneck_multipliers: Sequence[int],
strides: Sequence[int],
stem_type: StemType = StemType.SIMPLE_STEM_IN,
stem_width: int = 32,
block_type: BlockType = BlockType.RES_BOTTLENECK_BLOCK,
activation: ActivationType = ActivationType.RELU,
use_se: bool = True,
se_ratio: float = 0.25,
bn_epsilon: float = 1e-05,
bn_momentum: bool = 0.1,
):
self.depths = depths
self.widths = widths
self.group_widths = group_widths
self.bottleneck_multipliers = bottleneck_multipliers
self.strides = strides
self.stem_type = stem_type
self.stem_width = stem_width
self.block_type = block_type
self.activation = activation
self.use_se = use_se
self.se_ratio = se_ratio if use_se else None
self.bn_epsilon = bn_epsilon
self.bn_momentum = bn_momentum
self.relu_in_place = RELU_IN_PLACE
def get_expanded_params(self):
"""Return an iterator over AnyNet parameters for each stage."""
return zip(
self.widths,
self.strides,
self.depths,
self.group_widths,
self.bottleneck_multipliers,
)
@register_model("anynet")
class AnyNet(ClassyModel):
"""Implementation of an AnyNet.
See https://arxiv.org/abs/2003.13678 for details.
"""
def __init__(self, params: AnyNetParams):
super().__init__()
silu = None if get_torch_version() < [1, 7] else nn.SiLU()
activation = {
ActivationType.RELU: nn.ReLU(params.relu_in_place),
ActivationType.SILU: silu,
}[params.activation]
if activation is None:
raise RuntimeError("SiLU activation is only supported since PyTorch 1.7")
# Ad hoc stem
self.stem = {
StemType.RES_STEM_CIFAR: ResStemCifar,
StemType.RES_STEM_IN: ResStemIN,
StemType.SIMPLE_STEM_IN: SimpleStemIN,
}[params.stem_type](
3,
params.stem_width,
params.bn_epsilon,
params.bn_momentum,
activation,
)
# Instantiate all the AnyNet blocks in the trunk
block_fun = {
BlockType.VANILLA_BLOCK: VanillaBlock,
BlockType.RES_BASIC_BLOCK: ResBasicBlock,
BlockType.RES_BOTTLENECK_BLOCK: ResBottleneckBlock,
BlockType.RES_BOTTLENECK_LINEAR_BLOCK: ResBottleneckLinearBlock,
}[params.block_type]
current_width = params.stem_width
self.trunk_depth = 0
blocks = []
for i, (
width_out,
stride,
depth,
group_width,
bottleneck_multiplier,
) in enumerate(params.get_expanded_params()):
blocks.append(
(
f"block{i+1}",
AnyStage(
current_width,
width_out,
stride,
depth,
block_fun,
activation,
group_width,
bottleneck_multiplier,
params,
stage_index=i + 1,
),
)
)
self.trunk_depth += blocks[-1][1].stage_depth
current_width = width_out
self.trunk_output = nn.Sequential(OrderedDict(blocks))
# Init weights and good to go
self.init_weights()
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "AnyNet":
"""Instantiates an AnyNet from a configuration.
Args:
config: A configuration for an AnyNet.
See `AnyNetParams` for parameters expected in the config.
Returns:
An AnyNet instance.
"""
params = AnyNetParams(
depths=config["depths"],
widths=config["widths"],
group_widths=config["group_widths"],
bottleneck_multipliers=config["bottleneck_multipliers"],
strides=config["strides"],
stem_type=StemType[config.get("stem_type", "simple_stem_in").upper()],
stem_width=config.get("stem_width", 32),
block_type=BlockType[
config.get("block_type", "res_bottleneck_block").upper()
],
activation=ActivationType[config.get("activation", "relu").upper()],
use_se=config.get("use_se", True),
se_ratio=config.get("se_ratio", 0.25),
bn_epsilon=config.get("bn_epsilon", 1e-05),
bn_momentum=config.get("bn_momentum", 0.1),
)
return cls(params)
def forward(self, x, *args, **kwargs):
x = self.stem(x)
x = self.trunk_output(x)
return x
def init_weights(self):
# Performs ResNet-style weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
| ClassyVision-main | classy_vision/models/anynet.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Vision Transformer implementation from https://arxiv.org/abs/2010.11929.
References:
https://github.com/google-research/vision_transformer
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import copy
import logging
import math
from collections import OrderedDict
from functools import partial
from typing import Dict, List, Mapping, NamedTuple, Union
import torch
import torch.nn as nn
from classy_vision.models import ClassyModel, register_model
from .lecun_normal_init import lecun_normal_init
LayerNorm = partial(nn.LayerNorm, eps=1e-6)
class ConvStemLayer(NamedTuple):
kernel: int
stride: int
out_channels: int
def get_same_padding_for_kernel_size(kernel_size):
"""
Returns the required padding for "same" style convolutions
"""
if kernel_size % 2 == 0:
raise ValueError(f"Only odd sized kernels are supported, got {kernel_size}")
return (kernel_size - 1) // 2
class MLPBlock(nn.Sequential):
"""Transformer MLP block."""
def __init__(self, in_dim, mlp_dim, dropout_rate):
super().__init__()
self.linear_1 = nn.Linear(in_dim, mlp_dim)
self.act = nn.GELU()
self.dropout_1 = nn.Dropout(dropout_rate)
self.linear_2 = nn.Linear(mlp_dim, in_dim)
self.dropout_2 = nn.Dropout(dropout_rate)
self.init_weights()
def init_weights(self):
nn.init.xavier_uniform_(self.linear_1.weight)
nn.init.xavier_uniform_(self.linear_2.weight)
nn.init.normal_(self.linear_1.bias, std=1e-6)
nn.init.normal_(self.linear_2.bias, std=1e-6)
class EncoderBlock(nn.Module):
"""Transformer encoder block.
From @myleott -
There are at least three common structures.
1) Attention is all you need had the worst one, where the layernorm came after each
block and was in the residual path.
2) BERT improved upon this by moving the layernorm to the beginning of each block
(and adding an extra layernorm at the end).
3) There's a further improved version that also moves the layernorm outside of the
residual path, which is what this implementation does.
Figure 1 of this paper compares versions 1 and 3:
https://openreview.net/pdf?id=B1x8anVFPr
Figure 7 of this paper compares versions 2 and 3 for BERT:
https://arxiv.org/abs/1909.08053
"""
def __init__(
self, num_heads, hidden_dim, mlp_dim, dropout_rate, attention_dropout_rate
):
super().__init__()
self.ln_1 = LayerNorm(hidden_dim)
self.self_attention = nn.MultiheadAttention(
hidden_dim, num_heads, dropout=attention_dropout_rate
) # uses correct initialization by default
self.dropout = nn.Dropout(dropout_rate)
self.ln_2 = LayerNorm(hidden_dim)
self.mlp = MLPBlock(hidden_dim, mlp_dim, dropout_rate)
self.num_heads = num_heads
def forward(self, input):
x = self.ln_1(input)
x, _ = self.self_attention(query=x, key=x, value=x, need_weights=False)
x = self.dropout(x)
x = x + input
y = self.ln_2(x)
y = self.mlp(y)
return x + y
def flops(self, x):
flops = 0
seq_len, batch_size, hidden_dim = x.shape
num_elems = x.numel() // batch_size
flops += num_elems * 6 # ln_1 (* 2), x + input, ln_2 (* 2), x + y
# self_attention
# calculations are based on the fact that head_dim * num_heads = hidden_dim
# so we collapse (hidden_dim // num_heads) * num_heads to hidden_dim
flops += 3 * seq_len * (hidden_dim + 1) * hidden_dim # projection with bias
flops += hidden_dim * seq_len # scaling
flops += hidden_dim * seq_len * seq_len # attention weights
flops += self.num_heads * seq_len * seq_len # softmax
flops += hidden_dim * seq_len * seq_len # attention application
flops += seq_len * (hidden_dim + 1) * hidden_dim # out projection with bias
# mlp
mlp_dim = self.mlp.linear_1.out_features
flops += seq_len * (hidden_dim + 1) * mlp_dim # linear_1
flops += seq_len * mlp_dim # act
flops += seq_len * (mlp_dim + 1) * hidden_dim # linear_2
return flops * batch_size
def activations(self, out, x):
# we only count activations for matrix multiplications
activations = 0
seq_len, batch_size, hidden_dim = x.shape
# self_attention
activations += 3 * seq_len * hidden_dim # projection
activations += self.num_heads * seq_len * seq_len # attention weights
activations += hidden_dim * seq_len # attention application
activations += hidden_dim * seq_len # out projection
# mlp
mlp_dim = self.mlp.linear_1.out_features
activations += seq_len * mlp_dim # linear_1
activations += seq_len * hidden_dim # linear_2
return activations
class Encoder(nn.Module):
"""Transformer Encoder."""
def __init__(
self,
seq_length,
num_layers,
num_heads,
hidden_dim,
mlp_dim,
dropout_rate,
attention_dropout_rate,
):
super().__init__()
self.pos_embedding = nn.Parameter(
torch.empty(seq_length, 1, hidden_dim).normal_(std=0.02)
)
self.dropout = nn.Dropout(dropout_rate)
layers = []
for i in range(num_layers):
layers.append(
(
f"layer_{i}",
EncoderBlock(
num_heads,
hidden_dim,
mlp_dim,
dropout_rate,
attention_dropout_rate,
),
)
)
self.layers = nn.Sequential(OrderedDict(layers))
self.ln = LayerNorm(hidden_dim)
def forward(self, x):
x = x + self.pos_embedding # should broadcast to the same shape
return self.ln(self.layers(self.dropout(x)))
@register_model("vision_transformer")
class VisionTransformer(ClassyModel):
"""Vision Transformer as per https://arxiv.org/abs/2010.11929."""
def __init__(
self,
image_size,
patch_size,
num_layers,
num_heads,
hidden_dim,
mlp_dim,
dropout_rate=0,
attention_dropout_rate=0,
classifier="token",
conv_stem_layers: Union[List[ConvStemLayer], List[Dict], None] = None,
):
super().__init__()
assert image_size % patch_size == 0, "Input shape indivisible by patch size"
assert classifier in ["token", "gap"], "Unexpected classifier mode"
self.image_size = image_size
self.patch_size = patch_size
self.hidden_dim = hidden_dim
self.mlp_dim = mlp_dim
self.attention_dropout_rate = attention_dropout_rate
self.dropout_rate = dropout_rate
self.classifier = classifier
input_channels = 3
self.conv_stem_layers = conv_stem_layers
if conv_stem_layers is None:
# conv_proj is a more efficient version of reshaping, permuting and projecting
# the input
self.conv_proj = nn.Conv2d(
input_channels, hidden_dim, kernel_size=patch_size, stride=patch_size
)
else:
prev_channels = input_channels
self.conv_proj = nn.Sequential()
for i, conv_stem_layer in enumerate(conv_stem_layers):
if isinstance(conv_stem_layer, Mapping):
conv_stem_layer = ConvStemLayer(**conv_stem_layer)
kernel = conv_stem_layer.kernel
stride = conv_stem_layer.stride
out_channels = conv_stem_layer.out_channels
padding = get_same_padding_for_kernel_size(kernel)
self.conv_proj.add_module(
f"conv_{i}",
nn.Conv2d(
prev_channels,
out_channels,
kernel_size=kernel,
stride=stride,
padding=padding,
bias=False,
),
)
self.conv_proj.add_module(f"bn_{i}", nn.BatchNorm2d(out_channels))
self.conv_proj.add_module(f"relu_{i}", nn.ReLU())
prev_channels = out_channels
self.conv_proj.add_module(
f"conv_{i + 1}", nn.Conv2d(prev_channels, hidden_dim, kernel_size=1)
)
seq_length = (image_size // patch_size) ** 2
if self.classifier == "token":
# add a class token
self.class_token = nn.Parameter(torch.zeros(1, 1, hidden_dim))
seq_length += 1
self.encoder = Encoder(
seq_length,
num_layers,
num_heads,
hidden_dim,
mlp_dim,
dropout_rate,
attention_dropout_rate,
)
self.trunk_output = nn.Identity()
self.seq_length = seq_length
self.init_weights()
def init_weights(self):
if self.conv_stem_layers is None:
lecun_normal_init(
self.conv_proj.weight,
fan_in=self.conv_proj.in_channels
* self.conv_proj.kernel_size[0]
* self.conv_proj.kernel_size[1],
)
nn.init.zeros_(self.conv_proj.bias)
@classmethod
def from_config(cls, config):
config = copy.deepcopy(config)
config.pop("name")
config.pop("heads", None)
return cls(**config)
def forward(self, x: torch.Tensor):
assert x.ndim == 4, "Unexpected input shape"
n, c, h, w = x.shape
p = self.patch_size
assert h == w == self.image_size
n_h = h // p
n_w = w // p
# (n, c, h, w) -> (n, hidden_dim, n_h, n_w)
x = self.conv_proj(x)
# (n, hidden_dim, n_h, n_w) -> (n, hidden_dim, (n_h * n_w))
x = x.reshape(n, self.hidden_dim, n_h * n_w)
# (n, hidden_dim, (n_h * n_w)) -> ((n_h * n_w), n, hidden_dim)
# the self attention layer expects inputs in the format (S, N, E)
# where S is the source sequence length, N is the batch size, E is the
# embedding dimension
x = x.permute(2, 0, 1)
if self.classifier == "token":
# expand the class token to the full batch
batch_class_token = self.class_token.expand(-1, n, -1)
x = torch.cat([batch_class_token, x], dim=0)
x = self.encoder(x)
if self.classifier == "token":
# just return the output for the class token
x = x[0, :, :]
else:
x = x.mean(dim=0)
return self.trunk_output(x)
def set_classy_state(self, state, strict=True):
# shape of pos_embedding is (seq_length, 1, hidden_dim)
pos_embedding = state["model"]["trunk"]["encoder.pos_embedding"]
seq_length, n, hidden_dim = pos_embedding.shape
if n != 1:
raise ValueError(
f"Unexpected position embedding shape: {pos_embedding.shape}"
)
if hidden_dim != self.hidden_dim:
raise ValueError(
f"Position embedding hidden_dim incorrect: {hidden_dim}"
f", expected: {self.hidden_dim}"
)
new_seq_length = self.seq_length
if new_seq_length != seq_length:
# need to interpolate the weights for the position embedding
# we do this by reshaping the positions embeddings to a 2d grid, performing
# an interpolation in the (h, w) space and then reshaping back to a 1d grid
if self.classifier == "token":
# the class token embedding shouldn't be interpolated so we split it up
seq_length -= 1
new_seq_length -= 1
pos_embedding_token = pos_embedding[:1, :, :]
pos_embedding_img = pos_embedding[1:, :, :]
else:
pos_embedding_token = pos_embedding[:0, :, :] # empty data
pos_embedding_img = pos_embedding
# (seq_length, 1, hidden_dim) -> (1, hidden_dim, seq_length)
pos_embedding_img = pos_embedding_img.permute(1, 2, 0)
seq_length_1d = int(math.sqrt(seq_length))
assert (
seq_length_1d * seq_length_1d == seq_length
), "seq_length is not a perfect square"
logging.info(
"Interpolating the position embeddings from image "
f"{seq_length_1d * self.patch_size} to size {self.image_size}"
)
# (1, hidden_dim, seq_length) -> (1, hidden_dim, seq_l_1d, seq_l_1d)
pos_embedding_img = pos_embedding_img.reshape(
1, hidden_dim, seq_length_1d, seq_length_1d
)
new_seq_length_1d = self.image_size // self.patch_size
# use bicubic interpolation - it gives significantly better results in
# the test `test_resolution_change`
new_pos_embedding_img = torch.nn.functional.interpolate(
pos_embedding_img,
size=new_seq_length_1d,
mode="bicubic",
align_corners=True,
)
# (1, hidden_dim, new_seq_l_1d, new_seq_l_1d) -> (1, hidden_dim, new_seq_l)
new_pos_embedding_img = new_pos_embedding_img.reshape(
1, hidden_dim, new_seq_length
)
# (1, hidden_dim, new_seq_length) -> (new_seq_length, 1, hidden_dim)
new_pos_embedding_img = new_pos_embedding_img.permute(2, 0, 1)
new_pos_embedding = torch.cat(
[pos_embedding_token, new_pos_embedding_img], dim=0
)
state["model"]["trunk"]["encoder.pos_embedding"] = new_pos_embedding
super().set_classy_state(state, strict=strict)
@property
def input_shape(self):
return (3, self.image_size, self.image_size)
@register_model("vit_b_32")
class ViTB32(VisionTransformer):
def __init__(
self,
image_size=224,
dropout_rate=0,
attention_dropout_rate=0,
classifier="token",
):
super().__init__(
image_size=image_size,
patch_size=32,
num_layers=12,
num_heads=12,
hidden_dim=768,
mlp_dim=3072,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
classifier=classifier,
)
@register_model("vit_b_16")
class ViTB16(VisionTransformer):
def __init__(
self,
image_size=224,
dropout_rate=0,
attention_dropout_rate=0,
classifier="token",
):
super().__init__(
image_size=image_size,
patch_size=16,
num_layers=12,
num_heads=12,
hidden_dim=768,
mlp_dim=3072,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
classifier=classifier,
)
@register_model("vit_l_32")
class ViTL32(VisionTransformer):
def __init__(
self,
image_size=224,
dropout_rate=0,
attention_dropout_rate=0,
classifier="token",
):
super().__init__(
image_size=image_size,
patch_size=32,
num_layers=24,
num_heads=16,
hidden_dim=1024,
mlp_dim=4096,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
classifier=classifier,
)
@register_model("vit_l_16")
class ViTL16(VisionTransformer):
def __init__(
self,
image_size=224,
dropout_rate=0,
attention_dropout_rate=0,
classifier="token",
):
super().__init__(
image_size=image_size,
patch_size=16,
num_layers=24,
num_heads=16,
hidden_dim=1024,
mlp_dim=4096,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
classifier=classifier,
)
@register_model("vit_h_14")
class ViTH14(VisionTransformer):
def __init__(
self,
image_size=224,
dropout_rate=0,
attention_dropout_rate=0,
classifier="token",
):
super().__init__(
image_size=image_size,
patch_size=14,
num_layers=32,
num_heads=16,
hidden_dim=1280,
mlp_dim=5120,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
classifier=classifier,
)
| ClassyVision-main | classy_vision/models/vision_transformer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import torch.nn as nn
class ClassyBlock(nn.Module):
"""
This is a thin wrapper for head execution, which records the output of
wrapped module for executing the heads forked from this module.
"""
def __init__(self, name, module):
super().__init__()
self.name = name
self.output = torch.zeros(0)
self._module = module
# `ClassyBlock` isn't thread safe since it saves state. To avoid
# doing this, the recommended workflow is to set `Model.wrapper_cls = None`
# before instantiation (see the docs for `ClassyModel`). We support this
# environment variable for older use cases but using it is not recommended.
self._is_output_stateless = os.environ.get("CLASSY_BLOCK_STATELESS") == "1"
def wrapped_module(self):
return self._module
def forward(self, input):
if hasattr(self, "_is_output_stateless"):
if self._is_output_stateless:
return self._module(input)
output = self._module(input)
self.output = output
return output
| ClassyVision-main | classy_vision/models/classy_block.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch.nn as nn
class SqueezeAndExcitationLayer(nn.Module):
"""Squeeze and excitation layer, as per https://arxiv.org/pdf/1709.01507.pdf"""
def __init__(
self,
in_planes,
reduction_ratio: Optional[int] = 16,
reduced_planes: Optional[int] = None,
activation: Optional[nn.Module] = None,
):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# Either reduction_ratio is defined, or out_planes is defined,
# neither both nor none of them
assert bool(reduction_ratio) != bool(reduced_planes)
if activation is None:
activation = nn.ReLU()
reduced_planes = (
in_planes // reduction_ratio if reduced_planes is None else reduced_planes
)
self.excitation = nn.Sequential(
nn.Conv2d(in_planes, reduced_planes, kernel_size=1, stride=1, bias=True),
activation,
nn.Conv2d(reduced_planes, in_planes, kernel_size=1, stride=1, bias=True),
nn.Sigmoid(),
)
def forward(self, x):
x_squeezed = self.avgpool(x)
x_excited = self.excitation(x_squeezed)
x_scaled = x * x_excited
return x_scaled
| ClassyVision-main | classy_vision/models/squeeze_and_excitation_layer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# TODO: Some batch-normalization operations are superfluous and can be removed.
# dependencies:
import math
from collections import OrderedDict
from typing import Any, Dict, Sequence
import torch
import torch.nn as nn
from classy_vision.generic.util import is_pos_int
from . import register_model
from .classy_model import ClassyModel
from .squeeze_and_excitation_layer import SqueezeAndExcitationLayer
# global setting for in-place ReLU:
INPLACE = True
class _DenseLayer(nn.Sequential):
"""Single layer of a DenseNet."""
def __init__(
self,
in_planes,
growth_rate=32,
expansion=4,
use_se=False,
se_reduction_ratio=16,
):
# assertions:
assert is_pos_int(in_planes)
assert is_pos_int(growth_rate)
assert is_pos_int(expansion)
# add all layers to layer
super(_DenseLayer, self).__init__()
intermediate = expansion * growth_rate
self.add_module("norm-1", nn.BatchNorm2d(in_planes))
self.add_module("relu-1", nn.ReLU(inplace=INPLACE))
self.add_module(
"conv-1",
nn.Conv2d(in_planes, intermediate, kernel_size=1, stride=1, bias=False),
)
self.add_module("norm-2", nn.BatchNorm2d(intermediate))
self.add_module("relu-2", nn.ReLU(inplace=INPLACE))
self.add_module(
"conv-2",
nn.Conv2d(
intermediate,
growth_rate,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
)
if use_se:
self.add_module(
"se",
SqueezeAndExcitationLayer(
growth_rate, reduction_ratio=se_reduction_ratio
),
)
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
return torch.cat([x, new_features], 1)
class _Transition(nn.Sequential):
"""
Transition layer to reduce spatial resolution.
"""
def __init__(self, in_planes, out_planes, reduction=2):
# assertions:
assert is_pos_int(in_planes)
assert is_pos_int(out_planes)
assert is_pos_int(reduction)
# create layers for pooling:
super(_Transition, self).__init__()
self.add_module("pool-norm", nn.BatchNorm2d(in_planes))
self.add_module("pool-relu", nn.ReLU(inplace=INPLACE))
self.add_module(
"pool-conv",
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False),
)
self.add_module(
"pool-pool", nn.AvgPool2d(kernel_size=reduction, stride=reduction)
)
@register_model("densenet")
class DenseNet(ClassyModel):
def __init__(
self,
num_blocks,
num_classes,
init_planes,
growth_rate,
expansion,
small_input,
final_bn_relu,
use_se=False,
se_reduction_ratio=16,
):
"""
Implementation of a standard densely connected network (DenseNet).
Contains the following attachable blocks:
block{block_idx}-{idx}: This is the output of each dense block,
indexed by the block index and the index of the dense layer
transition-{idx}: This is the output of the transition layers
trunk_output: The final output of the `DenseNet`. This is
where a `fully_connected` head is normally attached.
Args:
small_input: set to `True` for 32x32 sized image inputs.
final_bn_relu: set to `False` to exclude the final batchnorm and
ReLU layers. These settings are useful when training Siamese
networks.
use_se: Enable squeeze and excitation
se_reduction_ratio: The reduction ratio to apply in the excitation
stage. Only used if `use_se` is `True`.
"""
super().__init__()
# assertions:
assert isinstance(num_blocks, Sequence)
assert all(is_pos_int(b) for b in num_blocks)
assert num_classes is None or is_pos_int(num_classes)
assert is_pos_int(init_planes)
assert is_pos_int(growth_rate)
assert is_pos_int(expansion)
assert type(small_input) == bool
# initial convolutional block:
self._num_classes = num_classes
self.num_blocks = num_blocks
self.small_input = small_input
if self.small_input:
self.initial_block = nn.Sequential(
nn.Conv2d(
3, init_planes, kernel_size=3, stride=1, padding=1, bias=False
),
nn.BatchNorm2d(init_planes),
nn.ReLU(inplace=INPLACE),
)
else:
self.initial_block = nn.Sequential(
nn.Conv2d(
3, init_planes, kernel_size=7, stride=2, padding=3, bias=False
),
nn.BatchNorm2d(init_planes),
nn.ReLU(inplace=INPLACE),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
# loop over spatial resolutions:
num_planes = init_planes
blocks = nn.Sequential()
for idx, num_layers in enumerate(num_blocks):
# add dense block
block = self._make_dense_block(
num_layers,
num_planes,
idx,
growth_rate=growth_rate,
expansion=expansion,
use_se=use_se,
se_reduction_ratio=se_reduction_ratio,
)
blocks.add_module(f"block_{idx}", block)
num_planes = num_planes + num_layers * growth_rate
# add transition layer:
if idx != len(num_blocks) - 1:
trans = _Transition(num_planes, num_planes // 2)
blocks.add_module(f"transition-{idx}", trans)
num_planes = num_planes // 2
blocks.add_module(
"trunk_output", self._make_trunk_output_block(num_planes, final_bn_relu)
)
self.features = blocks
# initialize weights of convolutional and batchnorm layers:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_trunk_output_block(self, num_planes, final_bn_relu):
layers = nn.Sequential()
if final_bn_relu:
# final batch normalization:
layers.add_module("norm-final", nn.BatchNorm2d(num_planes))
layers.add_module("relu-final", nn.ReLU(inplace=INPLACE))
return layers
def _make_dense_block(
self,
num_layers,
in_planes,
block_idx,
growth_rate=32,
expansion=4,
use_se=False,
se_reduction_ratio=16,
):
assert is_pos_int(in_planes)
assert is_pos_int(growth_rate)
assert is_pos_int(expansion)
# create a block of dense layers at same resolution:
layers = OrderedDict()
for idx in range(num_layers):
layers[f"block{block_idx}-{idx}"] = _DenseLayer(
in_planes + idx * growth_rate,
growth_rate=growth_rate,
expansion=expansion,
use_se=use_se,
se_reduction_ratio=se_reduction_ratio,
)
return nn.Sequential(layers)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "DenseNet":
"""Instantiates a DenseNet from a configuration.
Args:
config: A configuration for a DenseNet.
See :func:`__init__` for parameters expected in the config.
Returns:
A DenseNet instance.
"""
assert "num_blocks" in config
config = {
"num_blocks": config["num_blocks"],
"num_classes": config.get("num_classes"),
"init_planes": config.get("init_planes", 64),
"growth_rate": config.get("growth_rate", 32),
"expansion": config.get("expansion", 4),
"small_input": config.get("small_input", False),
"final_bn_relu": config.get("final_bn_relu", True),
"use_se": config.get("use_se", False),
"se_reduction_ratio": config.get("se_reduction_ratio", 16),
}
return cls(**config)
# forward pass in DenseNet:
def forward(self, x):
# initial convolutional block:
out = self.initial_block(x)
# evaluate all dense blocks:
out = self.features(out)
return out
| ClassyVision-main | classy_vision/models/densenet.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import traceback
from collections import defaultdict
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from classy_vision.heads import build_head
from .classy_model import ClassyModel
FILE_ROOT = Path(__file__).parent
MODEL_REGISTRY = {}
MODEL_CLASS_NAMES = set()
MODEL_REGISTRY_TB = {}
MODEL_CLASS_NAMES_TB = {}
def register_model(name, bypass_checks=False):
"""Registers a :class:`ClassyModel` subclass.
This decorator allows Classy Vision to instantiate a subclass of
:class:`ClassyModel` from a configuration file, even if the class itself is
not part of the Classy Vision framework. To use it, apply this decorator to
a ClassyModel subclass, like this:
.. code-block:: python
@register_model('resnet')
class ResidualNet(ClassyModel):
...
To instantiate a model from a configuration file, see
:func:`build_model`."""
def register_model_cls(cls):
if not bypass_checks:
if name in MODEL_REGISTRY:
msg = (
"Cannot register duplicate model ({}). Already registered at \n{}\n"
)
raise ValueError(msg.format(name, MODEL_REGISTRY_TB[name]))
if not issubclass(cls, ClassyModel):
raise ValueError(
"Model ({}: {}) must extend ClassyModel".format(name, cls.__name__)
)
if cls.__name__ in MODEL_CLASS_NAMES:
msg = (
"Cannot register model with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, MODEL_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
MODEL_REGISTRY[name] = cls
MODEL_CLASS_NAMES.add(cls.__name__)
MODEL_REGISTRY_TB[name] = tb
MODEL_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_model_cls
def build_model(config):
"""Builds a ClassyModel from a config.
This assumes a 'name' key in the config which is used to determine what
model class to instantiate. For instance, a config `{"name": "my_model",
"foo": "bar"}` will find a class that was registered as "my_model"
(see :func:`register_model`) and call .from_config on it."""
assert config["name"] in MODEL_REGISTRY, f"unknown model: {config['name']}"
model = MODEL_REGISTRY[config["name"]].from_config(config)
if "heads" in config:
heads = defaultdict(list)
for head_config in config["heads"]:
assert "fork_block" in head_config, "Expect fork_block in config"
fork_block = head_config["fork_block"]
updated_config = copy.deepcopy(head_config)
del updated_config["fork_block"]
head = build_head(updated_config)
heads[fork_block].append(head)
model.set_heads(heads)
return model
# automatically import any Python files in the models/ directory
import_all_modules(FILE_ROOT, "classy_vision.models")
from .anynet import AnyNet # isort:skip
from .classy_block import ClassyBlock
from .classy_model import ( # isort:skip
ClassyModelWrapper, # isort:skip
ClassyModelHeadExecutorWrapper, # isort:skip
) # isort:skip
from .densenet import DenseNet # isort:skip
from .efficientnet import EfficientNet # isort:skip
from .lecun_normal_init import lecun_normal_init # isort:skip
from .mlp import MLP # isort:skip
from .regnet import RegNet # isort:skip
from .resnet import ResNet # isort:skip
from .resnext import ResNeXt # isort:skip
from .resnext3d import ResNeXt3D # isort:skip
from .squeeze_and_excitation_layer import SqueezeAndExcitationLayer # isort:skip
from .vision_transformer import VisionTransformer # isort:skip
__all__ = [
"AnyNet",
"ClassyBlock",
"ClassyModel",
"ClassyModelHeadExecutorWrapper",
"ClassyModelWrapper",
"DenseNet",
"EfficientNet",
"MLP",
"RegNet",
"ResNet",
"ResNeXt",
"ResNeXt3D",
"SqueezeAndExcitationLayer",
"VisionTransformer",
"build_model",
"lecun_normal_init",
"register_model",
]
| ClassyVision-main | classy_vision/models/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""MLP model."""
from typing import Any, Dict
import torch.nn as nn
from . import register_model
from .classy_model import ClassyModel
@register_model("mlp")
class MLP(ClassyModel):
"""MLP model using ReLU. Useful for testing on CPUs."""
def __init__(
self,
input_dim,
output_dim,
hidden_dims,
dropout,
first_dropout,
use_batchnorm,
first_batchnorm,
):
super().__init__()
layers = []
# If first_batchnorm is set, must be using batchnorm
assert not first_batchnorm or use_batchnorm
self._num_inputs = input_dim
self._num_classes = output_dim
self._model_depth = len(hidden_dims) + 1
if dropout > 0 and first_dropout:
layers.append(nn.Dropout(p=dropout))
if use_batchnorm and first_batchnorm:
layers.append(nn.BatchNorm1d(input_dim))
for dim in hidden_dims:
layers.append(nn.Linear(input_dim, dim))
if use_batchnorm:
layers.append(nn.BatchNorm1d(dim))
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
layers.append(nn.ReLU(inplace=True))
input_dim = dim
layers.append(nn.Linear(input_dim, output_dim))
self.mlp = nn.Sequential(*layers)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "MLP":
"""Instantiates a MLP from a configuration.
Args:
config: A configuration for a MLP.
See :func:`__init__` for parameters expected in the config.
Returns:
A MLP instance.
"""
assert (key in config for key in ["input_dim", "output_dim", "hidden_dims"])
output_dim = config["output_dim"]
return cls(
input_dim=config["input_dim"],
output_dim=output_dim,
hidden_dims=config["hidden_dims"],
dropout=config.get("dropout", 0),
first_dropout=config.get("first_dropout", False),
use_batchnorm=config.get("use_batchnorm", False),
first_batchnorm=config.get("first_batchnorm", False),
)
def forward(self, x):
batchsize_per_replica = x.shape[0]
out = x.view(batchsize_per_replica, -1)
out = self.mlp(out)
return out
| ClassyVision-main | classy_vision/models/mlp.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import math
from collections import OrderedDict
from typing import Any, Dict, NamedTuple, Optional
import torch
import torch.nn as nn
from classy_vision.generic.util import get_torch_version
from classy_vision.models import ClassyModel, register_model
class BlockParams(NamedTuple):
num_repeat: int
kernel_size: int
stride: int
expand_ratio: float
input_filters: int
output_filters: int
se_ratio: float
id_skip: bool
class EfficientNetParams(NamedTuple):
width_coefficient: float
depth_coefficient: float
resolution: int
dropout_rate: float
BLOCK_PARAMS = [
BlockParams(1, 3, 1, 1, 32, 16, 0.25, True),
BlockParams(2, 3, 2, 6, 16, 24, 0.25, True),
BlockParams(2, 5, 2, 6, 24, 40, 0.25, True),
BlockParams(3, 3, 2, 6, 40, 80, 0.25, True),
BlockParams(3, 5, 1, 6, 80, 112, 0.25, True),
BlockParams(4, 5, 2, 6, 112, 192, 0.25, True),
BlockParams(1, 3, 1, 6, 192, 320, 0.25, True),
]
MODEL_PARAMS = {
"B0": EfficientNetParams(1.0, 1.0, 224, 0.2),
"B1": EfficientNetParams(1.0, 1.1, 240, 0.2),
"B2": EfficientNetParams(1.1, 1.2, 260, 0.3),
"B3": EfficientNetParams(1.2, 1.4, 300, 0.3),
"B4": EfficientNetParams(1.4, 1.8, 380, 0.4),
"B5": EfficientNetParams(1.6, 2.2, 456, 0.4),
"B6": EfficientNetParams(1.8, 2.6, 528, 0.5),
"B7": EfficientNetParams(2.0, 3.1, 600, 0.5),
}
def swish(x):
"""
Swish activation function.
"""
return x * torch.sigmoid(x)
def drop_connect(inputs, is_training, drop_connect_rate):
"""
Apply drop connect to random inputs in a batch.
"""
if not is_training:
return inputs
keep_prob = 1 - drop_connect_rate
# compute drop connect tensor
batch_size = inputs.shape[0]
random_tensor = keep_prob
random_tensor += torch.rand(
[batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device
)
binary_tensor = torch.floor(random_tensor)
outputs = (inputs / keep_prob) * binary_tensor
return outputs
def scale_width(num_filters, width_coefficient, width_divisor, min_width):
"""
Calculates the scaled number of filters based on the width coefficient and
rounds the result by the width divisor.
"""
if not width_coefficient:
return num_filters
num_filters *= width_coefficient
min_width = min_width or width_divisor
new_filters = max(
min_width,
(int(num_filters + width_divisor / 2) // width_divisor) * width_divisor,
)
# Do not round down by more than 10%
if new_filters < 0.9 * num_filters:
new_filters += width_divisor
return int(new_filters)
def scale_depth(num_repeats, depth_coefficient):
"""
Calculates the scaled number of repeats based on the depth coefficient.
"""
if not depth_coefficient:
return num_repeats
return int(math.ceil(depth_coefficient * num_repeats))
def get_same_padding_for_kernel_size(kernel_size):
"""
Returns the required padding for "same" style convolutions
"""
if kernel_size % 2 == 0:
raise ValueError(f"Only odd sized kernels are supported, got {kernel_size}")
return (kernel_size - 1) // 2
class MBConvBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block.
"""
def __init__(
self,
input_filters: int,
output_filters: int,
expand_ratio: float,
kernel_size: int,
stride: int,
se_ratio: float,
id_skip: bool,
use_se: bool,
bn_momentum: float,
bn_epsilon: float,
):
assert se_ratio is None or (0 < se_ratio <= 1)
super().__init__()
self.bn_momentum = bn_momentum
self.bn_epsilon = bn_epsilon
self.has_se = use_se and se_ratio is not None
self.se_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.id_skip = id_skip
self.expand_ratio = expand_ratio
self.stride = stride
self.input_filters = input_filters
self.output_filters = output_filters
self.relu_fn = swish if get_torch_version() < [1, 7] else nn.SiLU()
# used to track the depth of the block
self.depth = 0
# Expansion phase
expanded_filters = input_filters * expand_ratio
if expand_ratio != 1:
self.expand_conv = nn.Conv2d(
in_channels=input_filters,
out_channels=expanded_filters,
kernel_size=1,
stride=1,
padding=0,
bias=False,
)
self.bn0 = nn.BatchNorm2d(
num_features=expanded_filters,
momentum=self.bn_momentum,
eps=self.bn_epsilon,
)
self.depth += 1
# Depthwise convolution phase
self.depthwise_conv = nn.Conv2d(
in_channels=expanded_filters,
out_channels=expanded_filters,
groups=expanded_filters,
kernel_size=kernel_size,
stride=stride,
padding=get_same_padding_for_kernel_size(kernel_size),
bias=False,
)
self.bn1 = nn.BatchNorm2d(
num_features=expanded_filters,
momentum=self.bn_momentum,
eps=self.bn_epsilon,
)
self.depth += 1
if self.has_se:
# Squeeze and Excitation layer
num_reduced_filters = max(1, int(input_filters * se_ratio))
self.se_reduce = nn.Conv2d(
in_channels=expanded_filters,
out_channels=num_reduced_filters,
kernel_size=1,
stride=1,
padding=0,
bias=True,
)
self.se_expand = nn.Conv2d(
in_channels=num_reduced_filters,
out_channels=expanded_filters,
kernel_size=1,
stride=1,
padding=0,
bias=True,
)
self.depth += 2
# Output phase
self.project_conv = nn.Conv2d(
in_channels=expanded_filters,
out_channels=output_filters,
kernel_size=1,
stride=1,
padding=0,
bias=False,
)
self.bn2 = nn.BatchNorm2d(
num_features=output_filters, momentum=self.bn_momentum, eps=self.bn_epsilon
)
self.depth += 1
def forward(self, inputs, drop_connect_rate=None):
# Expansion and Depthwise Convolution
if self.expand_ratio != 1:
x = self.relu_fn(self.bn0(self.expand_conv(inputs)))
else:
x = inputs
x = self.relu_fn(self.bn1(self.depthwise_conv(x)))
# Squeeze and Excitation
if self.has_se:
# squeeze x in the spatial dimensions
x_squeezed = self.se_avgpool(x)
x_expanded = self.se_expand(self.relu_fn(self.se_reduce(x_squeezed)))
x = torch.sigmoid(x_expanded) * x
x = self.bn2(self.project_conv(x))
# Skip connection and Drop Connect
if self.id_skip:
if self.stride == 1 and self.input_filters == self.output_filters:
# only apply drop connect if a skip connection is present
if drop_connect_rate:
x = drop_connect(x, self.training, drop_connect_rate)
x = x + inputs
return x
@register_model("efficientnet")
class EfficientNet(ClassyModel):
"""
Implementation of EfficientNet, https://arxiv.org/pdf/1905.11946.pdf
References:
https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
https://github.com/lukemelas/EfficientNet-PyTorch
NOTE: the original implementation uses the names depth_divisor and min_depth
to refer to the number of channels, which is confusing, since the paper
refers to the channel dimension as width. We use the width_divisor and
min_width names instead.
"""
def __init__(
self,
num_classes: int,
model_params: EfficientNetParams,
bn_momentum: float,
bn_epsilon: float,
width_divisor: int,
min_width: Optional[int],
drop_connect_rate: float,
use_se: bool,
):
super().__init__()
self.num_classes = num_classes
self.image_resolution = model_params.resolution
# use the swish activation function
self.relu_fn = swish
# width and depth parameters
width_coefficient = model_params.width_coefficient
depth_coefficient = model_params.depth_coefficient
# drop connect rate
self.drop_connect_rate = drop_connect_rate
# input dimensions
in_channels = 3
# Stem
out_channels = 32
out_channels = scale_width(
out_channels, width_coefficient, width_divisor, min_width
)
self.conv_stem = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False,
)
self.bn0 = nn.BatchNorm2d(
num_features=out_channels, momentum=bn_momentum, eps=bn_epsilon
)
# Build blocks
blocks = OrderedDict()
for block_idx, block_params in enumerate(BLOCK_PARAMS):
assert block_params.num_repeat > 0, "num_repeat has to be > 0"
# Update block input and output filters based on the width_coefficient,
# and the number of repeats based on the depth_coefficient
block_params = block_params._replace(
input_filters=scale_width(
block_params.input_filters,
width_coefficient,
width_divisor,
min_width,
),
output_filters=scale_width(
block_params.output_filters,
width_coefficient,
width_divisor,
min_width,
),
num_repeat=scale_depth(block_params.num_repeat, depth_coefficient),
)
block_name = f"block{block_idx}-0"
# The first block needs to take care of the stride and filter size increase
blocks[block_name] = MBConvBlock(
block_params.input_filters,
block_params.output_filters,
block_params.expand_ratio,
block_params.kernel_size,
block_params.stride,
block_params.se_ratio,
block_params.id_skip,
use_se,
bn_momentum,
bn_epsilon,
)
if block_params.num_repeat > 1:
block_params = block_params._replace(
input_filters=block_params.output_filters, stride=1
)
for i in range(1, block_params.num_repeat):
block_name = f"block{block_idx}-{i}"
blocks[block_name] = MBConvBlock(
block_params.input_filters,
block_params.output_filters,
block_params.expand_ratio,
block_params.kernel_size,
block_params.stride,
block_params.se_ratio,
block_params.id_skip,
use_se,
bn_momentum,
bn_epsilon,
)
self.blocks = nn.Sequential(blocks)
# Head
in_channels = block_params.output_filters
out_channels = 1280
out_channels = scale_width(
out_channels, width_coefficient, width_divisor, min_width
)
self.conv_head = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False,
)
self.bn1 = nn.BatchNorm2d(
num_features=out_channels, momentum=bn_momentum, eps=bn_epsilon
)
# add a trunk_output module to attach heads to
self.trunk_output = nn.Identity()
self.avg_pooling = nn.AdaptiveAvgPool2d((1, 1))
# Fully connected layer
self.fc = nn.Linear(out_channels, num_classes)
if model_params.dropout_rate > 0:
self.dropout = nn.Dropout(p=model_params.dropout_rate)
else:
self.dropout = None
# initialize weights
self.init_weights()
@classmethod
def from_config(cls, config):
"""Instantiates an EfficientNet from a configuration.
Args:
config: A configuration for an EfficientNet.
See :func:`__init__` for parameters expected in the config.
Returns:
A ResNeXt instance.
"""
config = copy.deepcopy(config)
del config["name"]
if "heads" in config:
del config["heads"]
if "model_name" in config:
assert (
config["model_name"] in MODEL_PARAMS
), f"Unknown model_name: {config['model_name']}"
model_params = MODEL_PARAMS[config["model_name"]]
del config["model_name"]
else:
assert "model_params" in config, "Need either model_name or model_params"
model_params = EfficientNetParams(**config["model_params"])
config["model_params"] = model_params
return cls(**config)
def init_weights(self):
for module in self.modules():
if isinstance(module, nn.Conv2d):
kernel_height, kernel_width = module.kernel_size
out_channels = module.out_channels
fan_out = kernel_height * kernel_width * out_channels
nn.init.normal_(module.weight, mean=0.0, std=math.sqrt(2.0 / fan_out))
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
init_range = 1.0 / math.sqrt(module.out_features)
nn.init.uniform_(module.weight, -init_range, init_range)
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
def forward(self, inputs):
# Stem
outputs = self.relu_fn(self.bn0(self.conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self.blocks):
drop_connect_rate = self.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self.blocks)
outputs = block(outputs, drop_connect_rate=drop_connect_rate)
# Conv head
outputs = self.relu_fn(self.bn1(self.conv_head(outputs)))
# Trunk output (identity function)
outputs = self.trunk_output(outputs)
# Average Pooling
outputs = self.avg_pooling(outputs).view(outputs.size(0), -1)
# Dropout
if self.dropout is not None:
outputs = self.dropout(outputs)
# Fully connected layer
outputs = self.fc(outputs)
return outputs
@property
def input_shape(self):
return (3, self.image_resolution, self.image_resolution)
class _EfficientNet(EfficientNet):
def __init__(self, **kwargs):
super().__init__(
bn_momentum=0.01,
bn_epsilon=1e-3,
drop_connect_rate=0.2,
num_classes=1000,
width_divisor=8,
min_width=None,
use_se=True,
**kwargs,
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "EfficientNet":
config = copy.deepcopy(config)
config.pop("name")
if "heads" in config:
config.pop("heads")
return cls(**config)
@register_model("efficientnet_b0")
class EfficientNetB0(_EfficientNet):
def __init__(self, **kwargs):
super().__init__(model_params=MODEL_PARAMS["B0"])
@register_model("efficientnet_b1")
class EfficientNetB1(_EfficientNet):
def __init__(self, **kwargs):
super().__init__(model_params=MODEL_PARAMS["B1"])
@register_model("efficientnet_b2")
class EfficientNetB2(_EfficientNet):
def __init__(self, **kwargs):
super().__init__(model_params=MODEL_PARAMS["B2"])
@register_model("efficientnet_b3")
class EfficientNetB3(_EfficientNet):
def __init__(self, **kwargs):
super().__init__(model_params=MODEL_PARAMS["B3"])
@register_model("efficientnet_b4")
class EfficientNetB4(_EfficientNet):
def __init__(self, **kwargs):
super().__init__(model_params=MODEL_PARAMS["B4"])
@register_model("efficientnet_b5")
class EfficientNetB5(_EfficientNet):
def __init__(self, **kwargs):
super().__init__(model_params=MODEL_PARAMS["B5"])
@register_model("efficientnet_b6")
class EfficientNetB6(_EfficientNet):
def __init__(self, **kwargs):
super().__init__(model_params=MODEL_PARAMS["B6"])
@register_model("efficientnet_b7")
class EfficientNetB7(_EfficientNet):
def __init__(self, **kwargs):
super().__init__(model_params=MODEL_PARAMS["B7"])
| ClassyVision-main | classy_vision/models/efficientnet.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Implementation of ResNeXt (https://arxiv.org/pdf/1611.05431.pdf)
"""
import copy
import math
import re
import warnings
from collections import OrderedDict
from typing import Any, Dict, Optional, Sequence
import torch.nn as nn
from classy_vision.generic.util import is_pos_int, is_pos_int_tuple
from . import register_model
from .classy_model import ClassyModel
from .squeeze_and_excitation_layer import SqueezeAndExcitationLayer
# version number for the current implementation
VERSION = 0.2
# global setting for in-place ReLU:
INPLACE = True
def conv3x3(in_planes, out_planes, stride=1, groups=1):
"""helper function for constructing 3x3 grouped convolution"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
groups=groups,
bias=False,
)
def conv1x1(in_planes, out_planes, stride=1):
"""helper function for constructing 1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class GenericLayer(nn.Module):
"""
Parent class for 2-layer (BasicLayer) and 3-layer (BottleneckLayer)
bottleneck layer class
"""
def __init__(
self,
convolutional_block,
in_planes,
out_planes,
stride=1,
mid_planes_and_cardinality=None,
reduction=4,
final_bn_relu=True,
use_se=False,
se_reduction_ratio=16,
):
# assertions on inputs:
assert is_pos_int(in_planes) and is_pos_int(out_planes)
assert (is_pos_int(stride) or is_pos_int_tuple(stride)) and is_pos_int(
reduction
)
# set object fields:
super(GenericLayer, self).__init__()
self.convolutional_block = convolutional_block
self.final_bn_relu = final_bn_relu
# final batchnorm and relu layer:
if final_bn_relu:
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU(inplace=INPLACE)
# define down-sampling layer (if direct residual impossible):
self.downsample = None
if (stride != 1 and stride != (1, 1)) or in_planes != out_planes:
self.downsample = nn.Sequential(
conv1x1(in_planes, out_planes, stride=stride),
nn.BatchNorm2d(out_planes),
)
self.se = (
SqueezeAndExcitationLayer(out_planes, reduction_ratio=se_reduction_ratio)
if use_se
else None
)
def forward(self, x):
# if required, perform downsampling along shortcut connection:
if self.downsample is None:
residual = x
else:
residual = self.downsample(x)
# forward pass through convolutional block:
out = self.convolutional_block(x)
if self.final_bn_relu:
out = self.bn(out)
if self.se is not None:
out = self.se(out)
# add residual connection, perform rely + batchnorm, and return result:
out += residual
if self.final_bn_relu:
out = self.relu(out)
return out
class BasicLayer(GenericLayer):
"""
ResNeXt layer with `in_planes` input planes and `out_planes`
output planes.
"""
def __init__(
self,
in_planes,
out_planes,
stride=1,
mid_planes_and_cardinality=None,
reduction=1,
final_bn_relu=True,
use_se=False,
se_reduction_ratio=16,
):
# assertions on inputs:
assert is_pos_int(in_planes) and is_pos_int(out_planes)
assert (is_pos_int(stride) or is_pos_int_tuple(stride)) and is_pos_int(
reduction
)
# define convolutional block:
convolutional_block = nn.Sequential(
conv3x3(in_planes, out_planes, stride=stride),
nn.BatchNorm2d(out_planes),
nn.ReLU(inplace=INPLACE),
conv3x3(out_planes, out_planes),
)
# call constructor of generic layer:
super().__init__(
convolutional_block,
in_planes,
out_planes,
stride=stride,
reduction=reduction,
final_bn_relu=final_bn_relu,
use_se=use_se,
se_reduction_ratio=se_reduction_ratio,
)
class BottleneckLayer(GenericLayer):
"""
ResNeXt bottleneck layer with `in_planes` input planes, `out_planes`
output planes, and a bottleneck `reduction`.
"""
def __init__(
self,
in_planes,
out_planes,
stride=1,
mid_planes_and_cardinality=None,
reduction=4,
final_bn_relu=True,
use_se=False,
se_reduction_ratio=16,
):
# assertions on inputs:
assert is_pos_int(in_planes) and is_pos_int(out_planes)
assert (is_pos_int(stride) or is_pos_int_tuple(stride)) and is_pos_int(
reduction
)
# define convolutional layers:
bottleneck_planes = int(math.ceil(out_planes / reduction))
cardinality = 1
if mid_planes_and_cardinality is not None:
mid_planes, cardinality = mid_planes_and_cardinality
bottleneck_planes = mid_planes * cardinality
convolutional_block = nn.Sequential(
conv1x1(in_planes, bottleneck_planes),
nn.BatchNorm2d(bottleneck_planes),
nn.ReLU(inplace=INPLACE),
conv3x3(
bottleneck_planes, bottleneck_planes, stride=stride, groups=cardinality
),
nn.BatchNorm2d(bottleneck_planes),
nn.ReLU(inplace=INPLACE),
conv1x1(bottleneck_planes, out_planes),
)
# call constructor of generic layer:
super(BottleneckLayer, self).__init__(
convolutional_block,
in_planes,
out_planes,
stride=stride,
reduction=reduction,
final_bn_relu=final_bn_relu,
use_se=use_se,
se_reduction_ratio=se_reduction_ratio,
)
class SmallInputInitialBlock(nn.Module):
"""
ResNeXt initial block for small input with `in_planes` input planes
"""
def __init__(self, init_planes):
super().__init__()
self._module = nn.Sequential(
conv3x3(3, init_planes, stride=1),
nn.BatchNorm2d(init_planes),
nn.ReLU(inplace=INPLACE),
)
def forward(self, x):
return self._module(x)
class InitialBlock(nn.Module):
"""
ResNeXt initial block with `in_planes` input planes
"""
def __init__(self, init_planes):
super().__init__()
self._module = nn.Sequential(
nn.Conv2d(3, init_planes, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(init_planes),
nn.ReLU(inplace=INPLACE),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
def forward(self, x):
return self._module(x)
@register_model("resnext")
class ResNeXt(ClassyModel):
__jit_unused_properties__ = ClassyModel.__jit_unused_properties__ + ["model_depth"]
def __init__(
self,
num_blocks,
init_planes: int = 64,
reduction: int = 4,
small_input: bool = False,
zero_init_bn_residuals: bool = False,
base_width_and_cardinality: Optional[Sequence] = None,
basic_layer: bool = False,
final_bn_relu: bool = True,
use_se: bool = False,
se_reduction_ratio: int = 16,
):
"""
Implementation of `ResNeXt <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
small_input: set to `True` for 32x32 sized image inputs.
final_bn_relu: set to `False` to exclude the final batchnorm and
ReLU layers. These settings are useful when training Siamese
networks.
use_se: Enable squeeze and excitation
se_reduction_ratio: The reduction ratio to apply in the excitation
stage. Only used if `use_se` is `True`.
"""
super().__init__()
# assertions on inputs:
assert isinstance(num_blocks, Sequence)
assert all(is_pos_int(n) for n in num_blocks)
assert is_pos_int(init_planes) and is_pos_int(reduction)
assert type(small_input) == bool
assert (
type(zero_init_bn_residuals) == bool
), "zero_init_bn_residuals must be a boolean, set to true if gamma of last\
BN of residual block should be initialized to 0.0, false for 1.0"
assert base_width_and_cardinality is None or (
isinstance(base_width_and_cardinality, Sequence)
and len(base_width_and_cardinality) == 2
and is_pos_int(base_width_and_cardinality[0])
and is_pos_int(base_width_and_cardinality[1])
)
assert isinstance(use_se, bool), "use_se has to be a boolean"
# initial convolutional block:
self.num_blocks = num_blocks
self.small_input = small_input
self._make_initial_block(small_input, init_planes, basic_layer)
# compute number of planes at each spatial resolution:
out_planes = [init_planes * 2**i * reduction for i in range(len(num_blocks))]
in_planes = [init_planes] + out_planes[:-1]
# create subnetworks for each spatial resolution:
blocks = []
for idx in range(len(out_planes)):
mid_planes_and_cardinality = None
if base_width_and_cardinality is not None:
w, c = base_width_and_cardinality
mid_planes_and_cardinality = (w * 2**idx, c)
new_block = self._make_resolution_block(
in_planes[idx],
out_planes[idx],
idx,
num_blocks[idx], # num layers
stride=1 if idx == 0 else 2,
mid_planes_and_cardinality=mid_planes_and_cardinality,
reduction=reduction,
final_bn_relu=final_bn_relu or (idx != (len(out_planes) - 1)),
use_se=use_se,
se_reduction_ratio=se_reduction_ratio,
)
blocks.append(new_block)
self.blocks = nn.Sequential(*blocks)
self.out_planes = out_planes[-1]
self._num_classes = out_planes
# initialize weights:
self._initialize_weights(zero_init_bn_residuals)
def _initialize_weights(self, zero_init_bn_residuals):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Init BatchNorm gamma to 0.0 for last BN layer, it gets 0.2-0.3% higher
# final val top1 for larger batch sizes.
if zero_init_bn_residuals:
for m in self.modules():
if isinstance(m, GenericLayer):
if hasattr(m, "bn"):
nn.init.constant_(m.bn.weight, 0)
def _make_initial_block(self, small_input, init_planes, basic_layer):
if small_input:
self.initial_block = SmallInputInitialBlock(init_planes)
self.layer_type = BasicLayer
else:
self.initial_block = InitialBlock(init_planes)
self.layer_type = BasicLayer if basic_layer else BottleneckLayer
# helper function that creates ResNet blocks at single spatial resolution:
def _make_resolution_block(
self,
in_planes,
out_planes,
resolution_idx,
num_blocks,
stride=1,
mid_planes_and_cardinality=None,
reduction=4,
final_bn_relu=True,
use_se=False,
se_reduction_ratio=16,
):
# add the desired number of residual blocks:
blocks = OrderedDict()
for idx in range(num_blocks):
block_name = "block{}-{}".format(resolution_idx, idx)
blocks[block_name] = self.layer_type(
in_planes if idx == 0 else out_planes,
out_planes,
stride=stride if idx == 0 else 1, # only first block has stride
mid_planes_and_cardinality=mid_planes_and_cardinality,
reduction=reduction,
final_bn_relu=final_bn_relu or (idx != (num_blocks - 1)),
use_se=use_se,
se_reduction_ratio=se_reduction_ratio,
)
return nn.Sequential(blocks)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ResNeXt":
"""Instantiates a ResNeXt from a configuration.
Args:
config: A configuration for a ResNeXt.
See :func:`__init__` for parameters expected in the config.
Returns:
A ResNeXt instance.
"""
assert "num_blocks" in config
basic_layer = config.get("basic_layer", False)
config = {
"num_blocks": config["num_blocks"],
"init_planes": config.get("init_planes", 64),
"reduction": config.get("reduction", 1 if basic_layer else 4),
"base_width_and_cardinality": config.get("base_width_and_cardinality"),
"small_input": config.get("small_input", False),
"basic_layer": basic_layer,
"final_bn_relu": config.get("final_bn_relu", True),
"zero_init_bn_residuals": config.get("zero_init_bn_residuals", False),
"use_se": config.get("use_se", False),
"se_reduction_ratio": config.get("se_reduction_ratio", 16),
}
return cls(**config)
# forward pass in residual network:
def forward(self, x):
# initial convolutional block:
out = self.initial_block(x)
# evaluate all residual blocks:
# TODO: (kaizh) T43794289 exit early if there is no block that has heads
out = self.blocks(out)
return out
def _convert_model_state(self, state):
"""Convert model state from the old implementation to the current format.
Updates the state dict in place and returns True if the state dict was updated.
"""
pattern = r"blocks\.(?P<block_id_0>[0-9]*)\.(?P<block_id_1>[0-9]*)\._module\."
repl = r"blocks.\g<block_id_0>.block\g<block_id_0>-\g<block_id_1>."
trunk_dict = state["model"]["trunk"]
new_trunk_dict = {}
replaced_keys = False
for key, value in trunk_dict.items():
new_key = re.sub(pattern, repl, key)
if new_key != key:
replaced_keys = True
new_trunk_dict[new_key] = value
state["model"]["trunk"] = new_trunk_dict
state["version"] = VERSION
return replaced_keys
def get_classy_state(self, deep_copy=False):
state = super().get_classy_state(deep_copy=deep_copy)
state["version"] = VERSION
return state
def set_classy_state(self, state, strict=True):
version = state.get("version")
if version is None:
# convert the weights from the previous implementation of ResNeXt to the
# current one
if not self._convert_model_state(state):
raise RuntimeError("ResNeXt state conversion failed")
message = (
"Provided state dict is from an old implementation of ResNeXt. "
"This has been deprecated and will be removed soon."
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
elif version != VERSION:
raise ValueError(
f"Unsupported ResNeXt version: {version}. Expected: {VERSION}"
)
super().set_classy_state(state, strict)
class _ResNeXt(ResNeXt):
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ResNeXt":
config = copy.deepcopy(config)
config.pop("name")
if "heads" in config:
config.pop("heads")
return cls(**config)
@register_model("resnet18")
class ResNet18(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[2, 2, 2, 2],
basic_layer=True,
zero_init_bn_residuals=True,
reduction=1,
**kwargs,
)
@register_model("resnet34")
class ResNet34(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 4, 6, 3],
basic_layer=True,
zero_init_bn_residuals=True,
reduction=1,
**kwargs,
)
@register_model("resnet50")
class ResNet50(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 4, 6, 3],
basic_layer=False,
zero_init_bn_residuals=True,
**kwargs,
)
@register_model("resnet101")
class ResNet101(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 4, 23, 3],
basic_layer=False,
zero_init_bn_residuals=True,
**kwargs,
)
@register_model("resnet152")
class ResNet152(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 8, 36, 3],
basic_layer=False,
zero_init_bn_residuals=True,
**kwargs,
)
# Note, the ResNeXt models all have weight decay enabled for the batch
# norm parameters. We have found empirically that this gives better
# results when training on ImageNet (~0.5pp of top-1 acc) and brings
# our results on track with reported ImageNet results...but for
# training on other datasets, we have observed losses in accuracy (for
# example, the dataset used in https://arxiv.org/abs/1805.00932).
@register_model("resnext50_32x4d")
class ResNeXt50(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 4, 6, 3],
basic_layer=False,
zero_init_bn_residuals=True,
base_width_and_cardinality=(4, 32),
**kwargs,
)
@register_model("resnext101_32x4d")
class ResNeXt101(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 4, 23, 3],
basic_layer=False,
zero_init_bn_residuals=True,
base_width_and_cardinality=(4, 32),
**kwargs,
)
@register_model("resnext152_32x4d")
class ResNeXt152(_ResNeXt):
def __init__(self, **kwargs):
super().__init__(
num_blocks=[3, 8, 36, 3],
basic_layer=False,
zero_init_bn_residuals=True,
base_width_and_cardinality=(4, 32),
**kwargs,
)
| ClassyVision-main | classy_vision/models/resnext.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict, Sequence
import torch
import torch.nn as nn
from classy_vision.generic.util import is_pos_int, is_pos_int_list
from . import register_model
from .classy_model import ClassyModel
from .resnext3d_stage import ResStage
from .resnext3d_stem import R2Plus1DStem, ResNeXt3DStem
model_stems = {
"r2plus1d_stem": R2Plus1DStem,
"resnext3d_stem": ResNeXt3DStem,
# For more types of model stem, add them below
}
class ResNeXt3DBase(ClassyModel):
def __init__(
self,
input_key,
input_planes,
clip_crop_size,
frames_per_clip,
num_blocks,
stem_name,
stem_planes,
stem_temporal_kernel,
stem_spatial_kernel,
stem_maxpool,
):
"""
ResNeXt3DBase implements everything in ResNeXt3D model except the
construction of 4 stages. See more details in ResNeXt3D.
"""
super(ResNeXt3DBase, self).__init__()
self._input_key = input_key
self.input_planes = input_planes
self.clip_crop_size = clip_crop_size
self.frames_per_clip = frames_per_clip
self.num_blocks = num_blocks
assert stem_name in model_stems, "unknown stem: %s" % stem_name
self.stem = model_stems[stem_name](
stem_temporal_kernel,
stem_spatial_kernel,
input_planes,
stem_planes,
stem_maxpool,
)
@staticmethod
def _parse_config(config):
ret_config = {}
required_args = [
"input_planes",
"clip_crop_size",
"skip_transformation_type",
"residual_transformation_type",
"frames_per_clip",
"num_blocks",
]
for arg in required_args:
assert arg in config, "resnext3d model requires argument %s" % arg
ret_config[arg] = config[arg]
# Default setting for model stem, which is considered as stage 0. Stage
# index starts from 0 as implemented in ResStageBase._block_name() method.
# stem_planes: No. of output channles of conv op in stem
# stem_temporal_kernel: temporal size of conv op in stem
# stem_spatial_kernel: spatial size of conv op in stem
# stem_maxpool: by default, spatial maxpool op is disabled in stem
ret_config.update(
{
"input_key": config.get("input_key", None),
"stem_name": config.get("stem_name", "resnext3d_stem"),
"stem_planes": config.get("stem_planes", 64),
"stem_temporal_kernel": config.get("stem_temporal_kernel", 3),
"stem_spatial_kernel": config.get("stem_spatial_kernel", 7),
"stem_maxpool": config.get("stem_maxpool", False),
}
)
# Default setting for model stages 1, 2, 3 and 4
# stage_planes: No. of output channel of 1st conv op in stage 1
# stage_temporal_kernel_basis: Basis of temporal kernel sizes for each of
# the stage.
# temporal_conv_1x1: if True, do temporal convolution in the fist
# 1x1 Conv3d. Otherwise, do it in the second 3x3 Conv3d (default settting)
# stage_temporal_stride: temporal stride for each stage
# stage_spatial_stride: spatial stride for each stage
# num_groups: No. of groups in 2nd (group) conv in the residual transformation
# width_per_group: No. of channels per group in 2nd (group) conv in the
# residual transformation
ret_config.update(
{
"stage_planes": config.get("stage_planes", 256),
"stage_temporal_kernel_basis": config.get(
"stage_temporal_kernel_basis", [[3], [3], [3], [3]]
),
"temporal_conv_1x1": config.get(
"temporal_conv_1x1", [False, False, False, False]
),
"stage_temporal_stride": config.get(
"stage_temporal_stride", [1, 2, 2, 2]
),
"stage_spatial_stride": config.get(
"stage_spatial_stride", [1, 2, 2, 2]
),
"num_groups": config.get("num_groups", 1),
"width_per_group": config.get("width_per_group", 64),
}
)
# Default setting for model parameter initialization
ret_config.update(
{
"zero_init_residual_transform": config.get(
"zero_init_residual_transform", False
)
}
)
assert is_pos_int_list(ret_config["num_blocks"])
assert is_pos_int(ret_config["stem_planes"])
assert is_pos_int(ret_config["stem_temporal_kernel"])
assert is_pos_int(ret_config["stem_spatial_kernel"])
assert type(ret_config["stem_maxpool"]) == bool
assert is_pos_int(ret_config["stage_planes"])
assert isinstance(ret_config["stage_temporal_kernel_basis"], Sequence)
assert all(
is_pos_int_list(l) for l in ret_config["stage_temporal_kernel_basis"]
)
assert isinstance(ret_config["temporal_conv_1x1"], Sequence)
assert is_pos_int_list(ret_config["stage_temporal_stride"])
assert is_pos_int_list(ret_config["stage_spatial_stride"])
assert is_pos_int(ret_config["num_groups"])
assert is_pos_int(ret_config["width_per_group"])
return ret_config
def _init_parameter(self, zero_init_residual_transform):
for m in self.modules():
if isinstance(m, nn.Conv3d):
if (
hasattr(m, "final_transform_op")
and m.final_transform_op
and zero_init_residual_transform
):
nn.init.constant_(m.weight, 0)
else:
nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm3d) and m.affine:
if (
hasattr(m, "final_transform_op")
and m.final_transform_op
and zero_init_residual_transform
):
batchnorm_weight = 0.0
else:
batchnorm_weight = 1.0
nn.init.constant_(m.weight, batchnorm_weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0.0, std=0.01)
nn.init.constant_(m.bias, 0)
def set_classy_state(self, state, strict=True):
# We need to support both regular checkpoint loading and 2D conv weight
# inflation into 3D conv weight in this function.
self.load_head_states(state)
# clear the heads to set the trunk state
attached_heads = self.get_heads()
self.clear_heads()
current_state = self.state_dict()
for name, weight_src in state["model"]["trunk"].items():
if name not in current_state:
logging.warning(
f"weight {name} is not found in current ResNeXt3D state"
)
continue
weight_tgt = current_state[name]
assert (
weight_src.dim() == weight_tgt.dim()
), "weight of source- and target 3D convolution should have same dimension"
if (
weight_src.dim() == 5
and weight_src.shape[2] == 1
and weight_tgt.shape[2] > 1
):
# Find a source weight tensor where temporal dimension is 1. If the
# temporal dimension of the current weight tensor with the same name
# is larger than 1, we inflate the source weight tensor before
# loading it. Such parameter inflation was first introduced in
# the paper (https://arxiv.org/abs/1705.07750). It can achieve a
# better initialization compared to random initialization.
assert (
weight_src.shape[-2:] == weight_tgt.shape[-2:]
and weight_src.shape[:2] == weight_tgt.shape[:2]
), "weight shapes of source- and target 3D convolution mismatch"
weight_src_inflated = (
weight_src.repeat(1, 1, weight_tgt.shape[2], 1, 1)
/ weight_tgt.shape[2]
)
weight_src = weight_src_inflated
else:
assert all(
weight_src.size(d) == weight_tgt.size(d)
for d in range(weight_src.dim())
), "the shapes of source and target weight mismatch: %s Vs %s" % (
str(weight_src.size()),
str(weight_tgt.size()),
)
current_state[name] = weight_src.clone()
self.load_state_dict(current_state, strict=strict)
# set the heads back again
self.set_heads(attached_heads)
def forward(self, x):
"""
Args:
x (dict or torch.Tensor): video input.
When its type is dict, the dataset is a video dataset, and its
content is like {"video": torch.tensor, "audio": torch.tensor}.
When its type is torch.Tensor, the dataset is an image dataset.
"""
assert isinstance(x, dict) or isinstance(
x, torch.Tensor
), "x must be either a dictionary or a torch.Tensor"
if isinstance(x, dict):
assert self._input_key is not None and self._input_key in x, (
"input key (%s) not in the input" % self._input_key
)
x = x[self._input_key]
else:
assert (
self._input_key is None
), "when input of forward pass is a tensor, input key should not be set"
assert x.dim() == 4 or x.dim() == 5, "tensor x must be 4D/5D tensor"
if x.dim() == 4:
# x is a 4D tensor of size N x C x H x W and is prepared from an
# image dataset. We insert a temporal axis make it 5D of size
# N x C x T x H x W
x = torch.unsqueeze(x, 2)
out = self.stem([x])
out = self.stages(out)
return out
@property
def input_shape(self):
"""
Shape of video model input can vary in the following cases
- At training stage, input are video frame croppings of fixed size.
- At test stage, input are original video frames to support Fully Convolutional
evaluation and its size can vary video by video
"""
# Input shape is used by tensorboard hook. We put the input shape at
# training stage for profiling and visualization purpose.
return (
self.input_planes,
self.frames_per_clip,
self.clip_crop_size,
self.clip_crop_size,
)
@property
def input_key(self):
return self._input_key
@register_model("resnext3d")
class ResNeXt3D(ResNeXt3DBase):
"""
Implementation of:
1. Conventional `post-activated 3D ResNe(X)t <https://arxiv.org/
abs/1812.03982>`_.
2. `Pre-activated 3D ResNe(X)t <https://arxiv.org/abs/1811.12814>`_.
The model consists of one stem, a number of stages, and one or multiple
heads that are attached to different blocks in the stage.
"""
def __init__(
self,
input_key,
input_planes,
clip_crop_size,
skip_transformation_type,
residual_transformation_type,
frames_per_clip,
num_blocks,
stem_name,
stem_planes,
stem_temporal_kernel,
stem_spatial_kernel,
stem_maxpool,
stage_planes,
stage_temporal_kernel_basis,
temporal_conv_1x1,
stage_temporal_stride,
stage_spatial_stride,
num_groups,
width_per_group,
zero_init_residual_transform,
):
"""
Args:
input_key (str): a key that can index into model input that is
of dict type.
input_planes (int): the channel dimension of the input. Normally 3 is used
for rgb input.
clip_crop_size (int): spatial cropping size of video clip at train time.
skip_transformation_type (str): the type of skip transformation.
residual_transformation_type (str): the type of residual transformation.
frames_per_clip (int): Number of frames in a video clip.
num_blocks (list): list of the number of blocks in stages.
stem_name (str): name of model stem.
stem_planes (int): the output dimension of the convolution in the model
stem.
stem_temporal_kernel (int): the temporal kernel size of the convolution
in the model stem.
stem_spatial_kernel (int): the spatial kernel size of the convolution
in the model stem.
stem_maxpool (bool): If true, perform max pooling.
stage_planes (int): the output channel dimension of the 1st residual stage
stage_temporal_kernel_basis (list): Basis of temporal kernel sizes for
each of the stage.
temporal_conv_1x1 (bool): Only useful for BottleneckTransformation.
In a pathaway, if True, do temporal convolution in the first 1x1
Conv3d. Otherwise, do it in the second 3x3 Conv3d.
stage_temporal_stride (int): the temporal stride of the residual
transformation.
stage_spatial_stride (int): the spatial stride of the the residual
transformation.
num_groups (int): number of groups for the convolution.
num_groups = 1 is for standard ResNet like networks, and
num_groups > 1 is for ResNeXt like networks.
width_per_group (int): Number of channels per group in 2nd (group)
conv in the residual transformation in the first stage
zero_init_residual_transform (bool): if true, the weight of last
operation, which could be either BatchNorm3D in post-activated
transformation or Conv3D in pre-activated transformation, in the
residual transformation is initialized to zero
"""
super(ResNeXt3D, self).__init__(
input_key,
input_planes,
clip_crop_size,
frames_per_clip,
num_blocks,
stem_name,
stem_planes,
stem_temporal_kernel,
stem_spatial_kernel,
stem_maxpool,
)
num_stages = len(num_blocks)
out_planes = [stage_planes * 2**i for i in range(num_stages)]
in_planes = [stem_planes] + out_planes[:-1]
inner_planes = [
num_groups * width_per_group * 2**i for i in range(num_stages)
]
stages = []
for s in range(num_stages):
stage = ResStage(
s + 1, # stem is viewed as stage 0, and following stages start from 1
[in_planes[s]],
[out_planes[s]],
[inner_planes[s]],
[stage_temporal_kernel_basis[s]],
[temporal_conv_1x1[s]],
[stage_temporal_stride[s]],
[stage_spatial_stride[s]],
[num_blocks[s]],
[num_groups],
skip_transformation_type,
residual_transformation_type,
disable_pre_activation=(s == 0),
final_stage=(s == (num_stages - 1)),
)
stages.append(stage)
self.stages = nn.Sequential(*stages)
self._init_parameter(zero_init_residual_transform)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ResNeXt3D":
"""Instantiates a ResNeXt3D from a configuration.
Args:
config: A configuration for a ResNeXt3D.
See :func:`__init__` for parameters expected in the config.
Returns:
A ResNeXt3D instance.
"""
ret_config = ResNeXt3D._parse_config(config)
return cls(**ret_config)
| ClassyVision-main | classy_vision/models/resnext3d.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Implementation of ResNet (https://arxiv.org/pdf/1512.03385.pdf) as a special
case of ResNeXt (https://arxiv.org/pdf/1611.05431.pdf)
"""
from . import register_model
from .resnext import ResNeXt
# global setting for in-place ReLU:
INPLACE = True
@register_model("resnet")
class ResNet(ResNeXt):
"""
ResNet is a special case of :class:`ResNeXt`.
"""
def __init__(self, **kwargs):
"""
See :func:`ResNeXt.__init__`
"""
assert (
kwargs["base_width_and_cardinality"] is None
), "base_width_and_cardinality should be None for ResNet"
super().__init__(**kwargs)
| ClassyVision-main | classy_vision/models/resnet.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import torch.nn as nn
from .resnext3d_block import ResBlock
class ResStageBase(nn.Module):
def __init__(
self,
stage_idx,
dim_in,
dim_out,
dim_inner,
temporal_kernel_basis,
temporal_conv_1x1,
temporal_stride,
spatial_stride,
num_blocks,
num_groups,
):
super(ResStageBase, self).__init__()
assert (
len(
{
len(dim_in),
len(dim_out),
len(temporal_kernel_basis),
len(temporal_conv_1x1),
len(temporal_stride),
len(spatial_stride),
len(num_blocks),
len(dim_inner),
len(num_groups),
}
)
== 1
)
self.stage_idx = stage_idx
self.num_blocks = num_blocks
self.num_pathways = len(self.num_blocks)
self.temporal_kernel_sizes = [
(temporal_kernel_basis[i] * num_blocks[i])[: num_blocks[i]]
for i in range(len(temporal_kernel_basis))
]
def _block_name(self, pathway_idx, stage_idx, block_idx):
return "pathway{}-stage{}-block{}".format(pathway_idx, stage_idx, block_idx)
def _pathway_name(self, pathway_idx):
return "pathway{}".format(pathway_idx)
def forward(self, inputs):
output = []
for p in range(self.num_pathways):
x = inputs[p]
pathway_module = getattr(self, self._pathway_name(p))
output.append(pathway_module(x))
return output
class ResStage(ResStageBase):
"""
Stage of 3D ResNet. It expects to have one or more tensors as input for
single pathway (C2D, I3D, SlowOnly), and multi-pathway (SlowFast) cases.
More details can be found here:
"Slowfast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(
self,
stage_idx,
dim_in,
dim_out,
dim_inner,
temporal_kernel_basis,
temporal_conv_1x1,
temporal_stride,
spatial_stride,
num_blocks,
num_groups,
skip_transformation_type,
residual_transformation_type,
inplace_relu=True,
bn_eps=1e-5,
bn_mmt=0.1,
disable_pre_activation=False,
final_stage=False,
):
"""
The `__init__` method of any subclass should also contain these arguments.
ResStage builds p streams, where p can be greater or equal to one.
Args:
stage_idx (int): integer index of stage.
dim_in (list): list of p the channel dimensions of the input.
Different channel dimensions control the input dimension of
different pathways.
dim_out (list): list of p the channel dimensions of the output.
Different channel dimensions control the input dimension of
different pathways.
dim_inner (list): list of the p inner channel dimensions of the
input.
Different channel dimensions control the input dimension of
different pathways.
temporal_kernel_basis (list): Basis of temporal kernel sizes for each of
the stage.
temporal_conv_1x1 (list): Only useful for BottleneckBlock.
In a pathaway, if True, do temporal convolution in the fist 1x1 Conv3d.
Otherwise, do it in the second 3x3 Conv3d
temporal_stride (list): the temporal stride of the bottleneck.
spatial_stride (list): the spatial_stride of the bottleneck.
num_blocks (list): list of p numbers of blocks for each of the
pathway.
num_groups (list): list of number of p groups for the convolution.
num_groups=1 is for standard ResNet like networks, and
num_groups>1 is for ResNeXt like networks.
skip_transformation_type (str): the type of skip transformation
residual_transformation_type (str): the type of residual transformation
disable_pre_activation (bool): If true, disable the preactivation,
which includes BatchNorm3D and ReLU.
final_stage (bool): If true, this is the last stage in the model.
"""
super(ResStage, self).__init__(
stage_idx,
dim_in,
dim_out,
dim_inner,
temporal_kernel_basis,
temporal_conv_1x1,
temporal_stride,
spatial_stride,
num_blocks,
num_groups,
)
for p in range(self.num_pathways):
blocks = []
for i in range(self.num_blocks[p]):
# Retrieve the transformation function.
# Construct the block.
block_disable_pre_activation = (
True if disable_pre_activation and i == 0 else False
)
res_block = ResBlock(
dim_in[p] if i == 0 else dim_out[p],
dim_out[p],
dim_inner[p],
self.temporal_kernel_sizes[p][i],
temporal_conv_1x1[p],
temporal_stride[p] if i == 0 else 1,
spatial_stride[p] if i == 0 else 1,
skip_transformation_type,
residual_transformation_type,
num_groups=num_groups[p],
inplace_relu=inplace_relu,
bn_eps=bn_eps,
bn_mmt=bn_mmt,
disable_pre_activation=block_disable_pre_activation,
)
block_name = self._block_name(p, stage_idx, i)
blocks.append((block_name, res_block))
if final_stage and (
residual_transformation_type == "preactivated_bottleneck_transformation"
):
# For pre-activation residual transformation, we conduct
# activation in the final stage before continuing forward pass
# through the head
activate_bn = nn.BatchNorm3d(dim_out[p])
activate_relu = nn.ReLU(inplace=True)
activate_bn_name = "-".join([block_name, "bn"])
activate_relu_name = "-".join([block_name, "relu"])
blocks.append((activate_bn_name, activate_bn))
blocks.append((activate_relu_name, activate_relu))
self.add_module(self._pathway_name(p), nn.Sequential(OrderedDict(blocks)))
| ClassyVision-main | classy_vision/models/resnext3d_stage.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence
import torch.nn as nn
from .r2plus1_util import r2plus1_unit
class ResNeXt3DStemSinglePathway(nn.Module):
"""
ResNe(X)t 3D basic stem module. Assume a single pathway.
Performs spatiotemporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
maxpool=True,
inplace_relu=True,
bn_eps=1e-5,
bn_mmt=0.1,
):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
maxpool (bool): If true, perform max pooling.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
bn_eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
"""
super(ResNeXt3DStemSinglePathway, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.bn_eps = bn_eps
self.bn_mmt = bn_mmt
self.maxpool = maxpool
# Construct the stem layer.
self._construct_stem(dim_in, dim_out)
def _construct_stem(self, dim_in, dim_out):
self.conv = nn.Conv3d(
dim_in,
dim_out,
self.kernel,
stride=self.stride,
padding=self.padding,
bias=False,
)
self.bn = nn.BatchNorm3d(dim_out, eps=self.bn_eps, momentum=self.bn_mmt)
self.relu = nn.ReLU(self.inplace_relu)
if self.maxpool:
self.pool_layer = nn.MaxPool3d(
kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]
)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
if self.maxpool:
x = self.pool_layer(x)
return x
class R2Plus1DStemSinglePathway(ResNeXt3DStemSinglePathway):
"""
R(2+1)D basic stem module. Assume a single pathway.
Performs spatial convolution, temporal convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
maxpool=True,
inplace_relu=True,
bn_eps=1e-5,
bn_mmt=0.1,
):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
maxpool (bool): If true, perform max pooling.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
bn_eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
"""
super(R2Plus1DStemSinglePathway, self).__init__(
dim_in,
dim_out,
kernel,
stride,
padding,
maxpool=maxpool,
inplace_relu=inplace_relu,
bn_eps=bn_eps,
bn_mmt=bn_mmt,
)
def _construct_stem(self, dim_in, dim_out):
assert (
self.stride[1] == self.stride[2]
), "Only support identical height stride and width stride"
self.conv = r2plus1_unit(
dim_in,
dim_out,
self.stride[0], # temporal_stride
self.stride[1], # spatial_stride
1, # groups
self.inplace_relu,
self.bn_eps,
self.bn_mmt,
dim_mid=45, # hard-coded middle channels
)
self.bn = nn.BatchNorm3d(dim_out, eps=self.bn_eps, momentum=self.bn_mmt)
self.relu = nn.ReLU(self.inplace_relu)
if self.maxpool:
self.pool_layer = nn.MaxPool3d(
kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]
)
class ResNeXt3DStemMultiPathway(nn.Module):
"""
Video 3D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool
on input data tensor for one or multiple pathways.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
bn_eps=1e-5,
bn_mmt=0.1,
maxpool=(True,),
):
"""
The `__init__` method of any subclass should also contain these
arguments. List size of 1 for single pathway models (C2D, I3D, SlowOnly
and etc), list size of 2 for two pathway models (SlowFast).
Args:
dim_in (list): the list of channel dimensions of the inputs.
dim_out (list): the output dimension of the convolution in the stem
layer.
kernel (list): the kernels' size of the convolutions in the stem
layers. Temporal kernel size, height kernel size, width kernel
size in order.
stride (list): the stride sizes of the convolutions in the stem
layer. Temporal kernel stride, height kernel size, width kernel
size in order.
padding (list): the paddings' sizes of the convolutions in the stem
layer. Temporal padding size, height padding size, width padding
size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
bn_eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
maxpool (iterable): At training time, when crop size is 224 x 224, do max
pooling. When crop size is 112 x 112, skip max pooling.
Default value is a (True,)
"""
super(ResNeXt3DStemMultiPathway, self).__init__()
assert (
len({len(dim_in), len(dim_out), len(kernel), len(stride), len(padding)})
== 1
), "Input pathway dimensions are not consistent."
self.num_pathways = len(dim_in)
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.bn_eps = bn_eps
self.bn_mmt = bn_mmt
self.maxpool = maxpool
# Construct the stem layer.
self._construct_stem(dim_in, dim_out)
def _construct_stem(self, dim_in, dim_out):
assert isinstance(dim_in, Sequence)
assert all(dim > 0 for dim in dim_in)
assert isinstance(dim_out, Sequence)
assert all(dim > 0 for dim in dim_out)
self.blocks = {}
for p in range(len(dim_in)):
stem = ResNeXt3DStemSinglePathway(
dim_in[p],
dim_out[p],
self.kernel[p],
self.stride[p],
self.padding[p],
inplace_relu=self.inplace_relu,
bn_eps=self.bn_eps,
bn_mmt=self.bn_mmt,
maxpool=self.maxpool[p],
)
stem_name = self._stem_name(p)
self.add_module(stem_name, stem)
self.blocks[stem_name] = stem
def _stem_name(self, path_idx):
return "stem-path{}".format(path_idx)
def forward(self, x):
assert (
len(x) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
for p in range(len(x)):
stem_name = self._stem_name(p)
x[p] = self.blocks[stem_name](x[p])
return x
class R2Plus1DStemMultiPathway(ResNeXt3DStemMultiPathway):
"""
Video R(2+1)D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool
on input data tensor for one or multiple pathways.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
bn_eps=1e-5,
bn_mmt=0.1,
maxpool=(True,),
):
"""
The `__init__` method of any subclass should also contain these
arguments. List size of 1 for single pathway models (C2D, I3D, SlowOnly
and etc), list size of 2 for two pathway models (SlowFast).
Args:
dim_in (list): the list of channel dimensions of the inputs.
dim_out (list): the output dimension of the convolution in the stem
layer.
kernel (list): the kernels' size of the convolutions in the stem
layers. Temporal kernel size, height kernel size, width kernel
size in order.
stride (list): the stride sizes of the convolutions in the stem
layer. Temporal kernel stride, height kernel size, width kernel
size in order.
padding (list): the paddings' sizes of the convolutions in the stem
layer. Temporal padding size, height padding size, width padding
size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
bn_eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
maxpool (iterable): At training time, when crop size is 224 x 224, do max
pooling. When crop size is 112 x 112, skip max pooling.
Default value is a (True,)
"""
super(R2Plus1DStemMultiPathway, self).__init__(
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=inplace_relu,
bn_eps=bn_eps,
bn_mmt=bn_mmt,
maxpool=maxpool,
)
def _construct_stem(self, dim_in, dim_out):
assert isinstance(dim_in, Sequence)
assert all(dim > 0 for dim in dim_in)
assert isinstance(dim_out, Sequence)
assert all(dim > 0 for dim in dim_out)
self.blocks = {}
for p in range(len(dim_in)):
stem = R2Plus1DStemSinglePathway(
dim_in[p],
dim_out[p],
self.kernel[p],
self.stride[p],
self.padding[p],
inplace_relu=self.inplace_relu,
bn_eps=self.bn_eps,
bn_mmt=self.bn_mmt,
maxpool=self.maxpool[p],
)
stem_name = self._stem_name(p)
self.add_module(stem_name, stem)
self.blocks[stem_name] = stem
class ResNeXt3DStem(nn.Module):
def __init__(
self, temporal_kernel, spatial_kernel, input_planes, stem_planes, maxpool
):
super(ResNeXt3DStem, self).__init__()
self._construct_stem(
temporal_kernel, spatial_kernel, input_planes, stem_planes, maxpool
)
def _construct_stem(
self, temporal_kernel, spatial_kernel, input_planes, stem_planes, maxpool
):
self.stem = ResNeXt3DStemMultiPathway(
[input_planes],
[stem_planes],
[[temporal_kernel, spatial_kernel, spatial_kernel]],
[[1, 2, 2]], # stride
[
[temporal_kernel // 2, spatial_kernel // 2, spatial_kernel // 2]
], # padding
maxpool=[maxpool],
)
def forward(self, x):
return self.stem(x)
class R2Plus1DStem(ResNeXt3DStem):
def __init__(
self, temporal_kernel, spatial_kernel, input_planes, stem_planes, maxpool
):
super(R2Plus1DStem, self).__init__(
temporal_kernel, spatial_kernel, input_planes, stem_planes, maxpool
)
def _construct_stem(
self, temporal_kernel, spatial_kernel, input_planes, stem_planes, maxpool
):
self.stem = R2Plus1DStemMultiPathway(
[input_planes],
[stem_planes],
[[temporal_kernel, spatial_kernel, spatial_kernel]],
[[1, 2, 2]], # stride
[
[temporal_kernel // 2, spatial_kernel // 2, spatial_kernel // 2]
], # padding
maxpool=[maxpool],
)
| ClassyVision-main | classy_vision/models/resnext3d_stem.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from .r2plus1_util import r2plus1_unit
class BasicTransformation(nn.Module):
"""
Basic transformation: 3x3x3 group conv, 3x3x3 group conv
"""
def __init__(
self,
dim_in,
dim_out,
temporal_stride,
spatial_stride,
groups,
inplace_relu=True,
bn_eps=1e-5,
bn_mmt=0.1,
**kwargs,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temporal_stride (int): the temporal stride of the bottleneck.
spatial_stride (int): the spatial_stride of the bottleneck.
groups (int): number of groups for the convolution.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
bn_eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
"""
super(BasicTransformation, self).__init__()
self._construct_model(
dim_in,
dim_out,
temporal_stride,
spatial_stride,
groups,
inplace_relu,
bn_eps,
bn_mmt,
)
def _construct_model(
self,
dim_in,
dim_out,
temporal_stride,
spatial_stride,
groups,
inplace_relu,
bn_eps,
bn_mmt,
):
# 3x3x3 group conv, BN, ReLU.
branch2a = nn.Conv3d(
dim_in,
dim_out,
[3, 3, 3], # kernel
stride=[temporal_stride, spatial_stride, spatial_stride],
padding=[1, 1, 1],
groups=groups,
bias=False,
)
branch2a_bn = nn.BatchNorm3d(dim_out, eps=bn_eps, momentum=bn_mmt)
branch2a_relu = nn.ReLU(inplace=inplace_relu)
# 3x3x3 group conv, BN, ReLU.
branch2b = nn.Conv3d(
dim_out,
dim_out,
[3, 3, 3], # kernel
stride=[1, 1, 1],
padding=[1, 1, 1],
groups=groups,
bias=False,
)
branch2b_bn = nn.BatchNorm3d(dim_out, eps=bn_eps, momentum=bn_mmt)
branch2b_bn.final_transform_op = True
self.transform = nn.Sequential(
branch2a, branch2a_bn, branch2a_relu, branch2b, branch2b_bn
)
def forward(self, x):
return self.transform(x)
class BasicR2Plus1DTransformation(BasicTransformation):
"""
Basic transformation: 3x3x3 group conv, 3x3x3 group conv
"""
def __init__(
self,
dim_in,
dim_out,
temporal_stride,
spatial_stride,
groups,
inplace_relu=True,
bn_eps=1e-5,
bn_mmt=0.1,
**kwargs,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temporal_stride (int): the temporal stride of the bottleneck.
spatial_stride (int): the spatial_stride of the bottleneck.
groups (int): number of groups for the convolution.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
bn_eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
"""
super(BasicR2Plus1DTransformation, self).__init__(
dim_in,
dim_out,
temporal_stride,
spatial_stride,
groups,
inplace_relu=inplace_relu,
bn_eps=bn_eps,
bn_mmt=bn_mmt,
)
def _construct_model(
self,
dim_in,
dim_out,
temporal_stride,
spatial_stride,
groups,
inplace_relu,
bn_eps,
bn_mmt,
):
# Implementation of R(2+1)D operation <https://arxiv.org/abs/1711.11248>.
# decompose the original 3D conv into one 2D spatial conv and one
# 1D temporal conv
branch2a = r2plus1_unit(
dim_in,
dim_out,
temporal_stride,
spatial_stride,
groups,
inplace_relu,
bn_eps,
bn_mmt,
)
branch2a_bn = nn.BatchNorm3d(dim_out, eps=bn_eps, momentum=bn_mmt)
branch2a_relu = nn.ReLU(inplace=inplace_relu)
branch2b = r2plus1_unit(
dim_out,
dim_out,
1, # temporal_stride
1, # spatial_stride
groups,
inplace_relu,
bn_eps,
bn_mmt,
)
branch2b_bn = nn.BatchNorm3d(dim_out, eps=bn_eps, momentum=bn_mmt)
branch2b_bn.final_transform_op = True
self.transform = nn.Sequential(
branch2a, branch2a_bn, branch2a_relu, branch2b, branch2b_bn
)
class PostactivatedBottleneckTransformation(nn.Module):
"""
Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of
temporal kernel.
"""
def __init__(
self,
dim_in,
dim_out,
temporal_stride,
spatial_stride,
num_groups,
dim_inner,
temporal_kernel_size=3,
temporal_conv_1x1=True,
spatial_stride_1x1=False,
inplace_relu=True,
bn_eps=1e-5,
bn_mmt=0.1,
**kwargs,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temporal_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
temporal_conv_1x1 (bool): if True, do temporal convolution in the fist
1x1 Conv3d. Otherwise, do it in the second 3x3 Conv3d
temporal_stride (int): the temporal stride of the bottleneck.
spatial_stride (int): the spatial_stride of the bottleneck.
num_groups (int): number of groups for the convolution.
dim_inner (int): the inner dimension of the block.
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
spatial_stride_1x1 (bool): if True, apply spatial_stride to 1x1 conv.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
bn_eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
"""
super(PostactivatedBottleneckTransformation, self).__init__()
(temporal_kernel_size_1x1, temporal_kernel_size_3x3) = (
(temporal_kernel_size, 1)
if temporal_conv_1x1
else (1, temporal_kernel_size)
)
# MSRA -> stride=2 is on 1x1; TH/C2 -> stride=2 is on 3x3.
(str1x1, str3x3) = (
(spatial_stride, 1) if spatial_stride_1x1 else (1, spatial_stride)
)
# Tx1x1 conv, BN, ReLU.
self.branch2a = nn.Conv3d(
dim_in,
dim_inner,
kernel_size=[temporal_kernel_size_1x1, 1, 1],
stride=[1, str1x1, str1x1],
padding=[temporal_kernel_size_1x1 // 2, 0, 0],
bias=False,
)
self.branch2a_bn = nn.BatchNorm3d(dim_inner, eps=bn_eps, momentum=bn_mmt)
self.branch2a_relu = nn.ReLU(inplace=inplace_relu)
# Tx3x3 group conv, BN, ReLU.
self.branch2b = nn.Conv3d(
dim_inner,
dim_inner,
[temporal_kernel_size_3x3, 3, 3],
stride=[temporal_stride, str3x3, str3x3],
padding=[temporal_kernel_size_3x3 // 2, 1, 1],
groups=num_groups,
bias=False,
)
self.branch2b_bn = nn.BatchNorm3d(dim_inner, eps=bn_eps, momentum=bn_mmt)
self.branch2b_relu = nn.ReLU(inplace=inplace_relu)
# 1x1x1 conv, BN.
self.branch2c = nn.Conv3d(
dim_inner,
dim_out,
kernel_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
bias=False,
)
self.branch2c_bn = nn.BatchNorm3d(dim_out, eps=bn_eps, momentum=bn_mmt)
self.branch2c_bn.final_transform_op = True
def forward(self, x):
# Explicitly forward every layer.
# Branch2a.
x = self.branch2a(x)
x = self.branch2a_bn(x)
x = self.branch2a_relu(x)
# Branch2b.
x = self.branch2b(x)
x = self.branch2b_bn(x)
x = self.branch2b_relu(x)
# Branch2c
x = self.branch2c(x)
x = self.branch2c_bn(x)
return x
class PreactivatedBottleneckTransformation(nn.Module):
"""
Bottleneck transformation with pre-activation, which includes BatchNorm3D
and ReLu. Conv3D kernsl are Tx1x1, 1x3x3, 1x1x1, where T is the size of
temporal kernel (https://arxiv.org/abs/1603.05027).
"""
def __init__(
self,
dim_in,
dim_out,
temporal_stride,
spatial_stride,
num_groups,
dim_inner,
temporal_kernel_size=3,
temporal_conv_1x1=True,
spatial_stride_1x1=False,
inplace_relu=True,
bn_eps=1e-5,
bn_mmt=0.1,
disable_pre_activation=False,
**kwargs,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temporal_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
temporal_conv_1x1 (bool): if True, do temporal convolution in the fist
1x1 Conv3d. Otherwise, do it in the second 3x3 Conv3d
temporal_stride (int): the temporal stride of the bottleneck.
spatial_stride (int): the spatial_stride of the bottleneck.
num_groups (int): number of groups for the convolution.
dim_inner (int): the inner dimension of the block.
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
spatial_stride_1x1 (bool): if True, apply spatial_stride to 1x1 conv.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
bn_eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
disable_pre_activation (bool): If true, disable pre activation,
including BatchNorm3D and ReLU.
"""
super(PreactivatedBottleneckTransformation, self).__init__()
(temporal_kernel_size_1x1, temporal_kernel_size_3x3) = (
(temporal_kernel_size, 1)
if temporal_conv_1x1
else (1, temporal_kernel_size)
)
(str1x1, str3x3) = (
(spatial_stride, 1) if spatial_stride_1x1 else (1, spatial_stride)
)
self.disable_pre_activation = disable_pre_activation
if not disable_pre_activation:
self.branch2a_bn = nn.BatchNorm3d(dim_in, eps=bn_eps, momentum=bn_mmt)
self.branch2a_relu = nn.ReLU(inplace=inplace_relu)
self.branch2a = nn.Conv3d(
dim_in,
dim_inner,
kernel_size=[temporal_kernel_size_1x1, 1, 1],
stride=[1, str1x1, str1x1],
padding=[temporal_kernel_size_1x1 // 2, 0, 0],
bias=False,
)
# Tx3x3 group conv, BN, ReLU.
self.branch2b_bn = nn.BatchNorm3d(dim_inner, eps=bn_eps, momentum=bn_mmt)
self.branch2b_relu = nn.ReLU(inplace=inplace_relu)
self.branch2b = nn.Conv3d(
dim_inner,
dim_inner,
[temporal_kernel_size_3x3, 3, 3],
stride=[temporal_stride, str3x3, str3x3],
padding=[temporal_kernel_size_3x3 // 2, 1, 1],
groups=num_groups,
bias=False,
)
# 1x1x1 conv, BN.
self.branch2c_bn = nn.BatchNorm3d(dim_inner, eps=bn_eps, momentum=bn_mmt)
self.branch2c_relu = nn.ReLU(inplace=inplace_relu)
self.branch2c = nn.Conv3d(
dim_inner,
dim_out,
kernel_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
bias=False,
)
self.branch2c.final_transform_op = True
def forward(self, x):
# Branch2a
if not self.disable_pre_activation:
x = self.branch2a_bn(x)
x = self.branch2a_relu(x)
x = self.branch2a(x)
# Branch2b
x = self.branch2b_bn(x)
x = self.branch2b_relu(x)
x = self.branch2b(x)
# Branch2c
x = self.branch2c_bn(x)
x = self.branch2c_relu(x)
x = self.branch2c(x)
return x
residual_transformations = {
"basic_r2plus1d_transformation": BasicR2Plus1DTransformation,
"basic_transformation": BasicTransformation,
"postactivated_bottleneck_transformation": PostactivatedBottleneckTransformation,
"preactivated_bottleneck_transformation": PreactivatedBottleneckTransformation,
# For more types of residual transformations, add them below
}
class PostactivatedShortcutTransformation(nn.Module):
"""
Skip connection used in ResNet3D model.
"""
def __init__(
self,
dim_in,
dim_out,
temporal_stride,
spatial_stride,
bn_eps=1e-5,
bn_mmt=0.1,
**kwargs,
):
super(PostactivatedShortcutTransformation, self).__init__()
# Use skip connection with projection if dim or spatial/temporal res change.
assert (dim_in != dim_out) or (spatial_stride != 1) or (temporal_stride != 1)
self.branch1 = nn.Conv3d(
dim_in,
dim_out,
kernel_size=1,
stride=[temporal_stride, spatial_stride, spatial_stride],
padding=0,
bias=False,
)
self.branch1_bn = nn.BatchNorm3d(dim_out, eps=bn_eps, momentum=bn_mmt)
def forward(self, x):
return self.branch1_bn(self.branch1(x))
class PreactivatedShortcutTransformation(nn.Module):
"""
Skip connection with pre-activation, which includes BatchNorm3D and ReLU,
in ResNet3D model (https://arxiv.org/abs/1603.05027).
"""
def __init__(
self,
dim_in,
dim_out,
temporal_stride,
spatial_stride,
inplace_relu=True,
bn_eps=1e-5,
bn_mmt=0.1,
disable_pre_activation=False,
**kwargs,
):
super(PreactivatedShortcutTransformation, self).__init__()
# Use skip connection with projection if dim or spatial/temporal res change.
assert (dim_in != dim_out) or (spatial_stride != 1) or (temporal_stride != 1)
if not disable_pre_activation:
self.branch1_bn = nn.BatchNorm3d(dim_in, eps=bn_eps, momentum=bn_mmt)
self.branch1_relu = nn.ReLU(inplace=inplace_relu)
self.branch1 = nn.Conv3d(
dim_in,
dim_out,
kernel_size=1,
stride=[temporal_stride, spatial_stride, spatial_stride],
padding=0,
bias=False,
)
def forward(self, x):
if hasattr(self, "branch1_bn") and hasattr(self, "branch1_relu"):
x = self.branch1_relu(self.branch1_bn(x))
x = self.branch1(x)
return x
skip_transformations = {
"postactivated_shortcut": PostactivatedShortcutTransformation,
"preactivated_shortcut": PreactivatedShortcutTransformation,
# For more types of skip transformations, add them below
}
class ResBlock(nn.Module):
"""
Residual block with skip connection.
"""
def __init__(
self,
dim_in,
dim_out,
dim_inner,
temporal_kernel_size,
temporal_conv_1x1,
temporal_stride,
spatial_stride,
skip_transformation_type,
residual_transformation_type,
num_groups=1,
inplace_relu=True,
bn_eps=1e-5,
bn_mmt=0.1,
disable_pre_activation=False,
):
"""
ResBlock class constructs redisual blocks. More details can be found in:
"Deep residual learning for image recognition."
https://arxiv.org/abs/1512.03385
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
dim_inner (int): the inner dimension of the block.
temporal_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
temporal_conv_1x1 (bool): Only useful for PostactivatedBottleneckTransformation.
if True, do temporal convolution in the fist 1x1 Conv3d.
Otherwise, do it in the second 3x3 Conv3d
temporal_stride (int): the temporal stride of the bottleneck.
spatial_stride (int): the spatial_stride of the bottleneck.
stride (int): the stride of the bottleneck.
skip_transformation_type (str): the type of skip transformation
residual_transformation_type (str): the type of residual transformation
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
disable_pre_activation (bool): If true, disable the preactivation,
which includes BatchNorm3D and ReLU.
"""
super(ResBlock, self).__init__()
assert skip_transformation_type in skip_transformations, (
"unknown skip transformation: %s" % skip_transformation_type
)
if (dim_in != dim_out) or (spatial_stride != 1) or (temporal_stride != 1):
self.skip = skip_transformations[skip_transformation_type](
dim_in,
dim_out,
temporal_stride,
spatial_stride,
bn_eps=bn_eps,
bn_mmt=bn_mmt,
disable_pre_activation=disable_pre_activation,
)
assert residual_transformation_type in residual_transformations, (
"unknown residual transformation: %s" % residual_transformation_type
)
self.residual = residual_transformations[residual_transformation_type](
dim_in,
dim_out,
temporal_stride,
spatial_stride,
num_groups,
dim_inner,
temporal_kernel_size=temporal_kernel_size,
temporal_conv_1x1=temporal_conv_1x1,
disable_pre_activation=disable_pre_activation,
)
self.relu = nn.ReLU(inplace_relu)
def forward(self, x):
if hasattr(self, "skip"):
x = self.skip(x) + self.residual(x)
else:
x = x + self.residual(x)
x = self.relu(x)
return x
| ClassyVision-main | classy_vision/models/resnext3d_block.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Sequence
import torch
from classy_vision.generic.distributed_util import all_reduce_sum
from classy_vision.generic.util import is_pos_int, maybe_convert_to_one_hot
from classy_vision.meters import ClassyMeter
from . import register_meter
@register_meter("recall_at_k")
class RecallAtKMeter(ClassyMeter):
"""Meter to calculate top-k recall for single-label or multi-label
image classification task.
"""
def __init__(self, topk, target_is_one_hot=True, num_classes=None):
"""
args:
topk: list of int `k` values.
"""
super().__init__()
assert isinstance(topk, Sequence), "topk must be a sequence"
assert len(topk) > 0, "topk list should have at least one element"
assert [is_pos_int(x) for x in topk], "each value in topk must be >= 1"
self._topk = topk
# _total_* variables store running, in-sync totals for the
# metrics. These should not be communicated / summed.
self._total_correct_predictions_k = None
self._total_correct_targets = None
# _curr_* variables store counts since the last sync. Only
# these should be summed across workers and they are reset
# after each communication
self._curr_correct_predictions_k = None
self._curr_correct_targets = None
# Initialize all values properly
self.reset()
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "RecallAtKMeter":
"""Instantiates a RecallAtKMeter from a configuration.
Args:
config: A configuration for a RecallAtKMeter.
See :func:`__init__` for parameters expected in the config.
Returns:
A RecallAtKMeter instance.
"""
return cls(topk=config["topk"])
@property
def name(self):
return "recall_at_k"
def sync_state(self):
# Communications
self._curr_correct_predictions_k = all_reduce_sum(
self._curr_correct_predictions_k
)
self._curr_correct_targets = all_reduce_sum(self._curr_correct_targets)
# Store results
self._total_correct_predictions_k += self._curr_correct_predictions_k
self._total_correct_targets += self._curr_correct_targets
# Reset values until next sync
self._curr_correct_predictions_k.zero_()
self._curr_correct_targets.zero_()
@property
def value(self):
# Return value based on the local state of meter which
# includes the local sample count since last sync and the total global sample
# count obtained at the last sync
correct_predictions = {
k: self._curr_correct_predictions_k[i]
+ self._total_correct_predictions_k[i]
for i, k in enumerate(self._topk)
}
correct_targets = self._total_correct_targets + self._curr_correct_targets
return {
"top_{}".format(k): ((correct_predictions[k] / correct_targets).item())
if correct_targets
else 0.0
for k in self._topk
}
def get_classy_state(self):
"""Contains the states of the meter."""
return {
"name": self.name,
"top_k": self._topk,
"total_correct_predictions": self._total_correct_predictions_k.clone(),
"total_correct_targets": self._total_correct_targets.clone(),
"curr_correct_targets": self._curr_correct_targets.clone(),
"curr_correct_predictions_k": self._curr_correct_predictions_k.clone(),
}
def set_classy_state(self, state):
assert (
self.name == state["name"]
), "State name {state_name} does not match meter name {obj_name}".format(
state_name=state["name"], obj_name=self.name
)
assert (
self._topk == state["top_k"]
), "top-k of state {state_k} does not match object's top-k {obj_k}".format(
state_k=state["top_k"], obj_k=self._topk
)
# Restore the state -- correct_predictions and correct_targets.
self.reset()
self._total_correct_predictions_k = state["total_correct_predictions"].clone()
self._total_correct_targets = state["total_correct_targets"].clone()
self._curr_correct_predictions_k = state["curr_correct_predictions_k"].clone()
self._curr_correct_targets = state["curr_correct_targets"].clone()
def update(self, model_output, target, **kwargs):
"""
args:
model_output: tensor of shape (B, C) where each value is
either logit or class probability.
target: tensor of shape (B, C), which is one-hot /
multi-label encoded, or tensor of shape (B) /
(B, 1), integer encoded
"""
# Convert target to 0/1 encoding if isn't
target = maybe_convert_to_one_hot(target, model_output)
# If Pytorch AMP is being used, model outputs are probably fp16
# Since .topk() is not compatible with fp16, we promote the model outputs to full precision
_, pred_classes = model_output.float().topk(
max(self._topk), dim=1, largest=True, sorted=True
)
pred_mask_tensor = torch.zeros(target.size())
for i, k in enumerate(self._topk):
pred_mask_tensor.zero_()
self._curr_correct_predictions_k[i] += torch.sum(
# torch.min is used to simulate AND between binary
# tensors. If tensors are not binary, this will fail.
torch.min(
pred_mask_tensor.scatter_(1, pred_classes[:, :k], 1.0),
target.float(),
)
).item()
self._curr_correct_targets += target.sum().item()
def reset(self):
self._total_correct_predictions_k = torch.zeros(len(self._topk))
self._total_correct_targets = torch.zeros(1)
self._curr_correct_predictions_k = torch.zeros(len(self._topk))
self._curr_correct_targets = torch.zeros(1)
def validate(self, model_output_shape, target_shape):
assert (
len(model_output_shape) == 2
), "model_output_shape must be (B, C) \
Found shape {}".format(
model_output_shape
)
assert (
len(target_shape) > 0 and len(target_shape) < 3
), "target_shape must be (B) or (B, C) \
Found shape {}".format(
target_shape
)
assert (
max(self._topk) < model_output_shape[1]
), "k in top_k, for \
recall_meter cannot be larger than num_classes: \
{}".format(
model_output_shape[1]
)
| ClassyVision-main | classy_vision/meters/recall_meter.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import traceback
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from .classy_meter import ClassyMeter
FILE_ROOT = Path(__file__).parent
METER_REGISTRY = {}
METER_REGISTRY_TB = {}
def build_meter(config):
"""Builds a :class:`ClassyMeter` from a config.
This assumes a 'name' key in the config which is used to determine what
meter class to instantiate. For instance, a config `{"name": "my_meter",
"foo": "bar"}` will find a class that was registered as "my_meter" (see
:func:`register_meter`) and call .from_config on it."""
return METER_REGISTRY[config["name"]].from_config(config)
def build_meters(config):
configs = [{"name": name, **args} for name, args in config.items()]
return [build_meter(config) for config in configs]
def register_meter(name, bypass_checks=False):
"""Registers a :class:`ClassyMeter` subclass.
This decorator allows Classy Vision to instantiate a subclass of
ClassyMeter from a configuration file, even if the class itself is not
part of the Classy Vision framework. To use it, apply this decorator to a
ClassyMeter subclass, like this:
.. code-block:: python
@register_meter('accuracy')
class AccuracyMeter(ClassyMeter):
...
To instantiate a meter from a configuration file, see
:func:`build_meter`."""
def register_meter_cls(cls):
if not bypass_checks:
if name in METER_REGISTRY:
msg = (
"Cannot register duplicate meter ({}). Already registered at \n{}\n"
)
raise ValueError(msg.format(name, METER_REGISTRY_TB[name]))
if not issubclass(cls, ClassyMeter):
raise ValueError(
"Meter ({}: {}) must extend \
ClassyMeter".format(
name, cls.__name__
)
)
tb = "".join(traceback.format_stack())
METER_REGISTRY[name] = cls
METER_REGISTRY_TB[name] = tb
return cls
return register_meter_cls
# automatically import any Python files in the meters/ directory
import_all_modules(FILE_ROOT, "classy_vision.meters")
from .accuracy_meter import AccuracyMeter # isort:skip
from .precision_meter import PrecisionAtKMeter # isort:skip
from .recall_meter import RecallAtKMeter # isort:skip
from .video_accuracy_meter import VideoAccuracyMeter # isort:skip
__all__ = [
"AccuracyMeter",
"ClassyMeter",
"PrecisionAtKMeter",
"RecallAtKMeter",
"VideoAccuracyMeter",
"build_meter",
"build_meters",
"register_meter",
]
| ClassyVision-main | classy_vision/meters/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from classy_vision.meters import ClassyMeter
class VideoMeter(ClassyMeter):
"""An abstraction of meter for evaluating video models.
Video-level metric is computed by averaging clip-level predictions and
compare the result with video-level groundtruth label.
This meter abstraction can wrap conventional classy meters by passing
averaged clip-level predictions to the meter needed for video level metrics.
"""
def __init__(self, clips_per_video_train, clips_per_video_test):
"""Constructor of VideoMeter class.
Args:
clips_per_video_train: No. of clips sampled per video at train time
clips_per_video_test: No. of clips sampled per video at test time
"""
super().__init__()
self._clips_per_video_train = clips_per_video_train
self._clips_per_video_test = clips_per_video_test
@property
def value(self):
return self.meter.value
def sync_state(self):
self.meter.sync_state()
@property
def meter(self) -> "ClassyMeter":
"""Every video meter should implement to have its own internal meter.
It consumes the video level predictions and ground truth label, and compute
the actual metrics.
Returns:
An instance of ClassyMeter.
"""
raise NotImplementedError
def get_classy_state(self):
"""Contains the states of the meter."""
state = {}
state["meter_state"] = self.meter.get_classy_state()
state["name"] = self.name
state["clips_per_video_train"] = self._clips_per_video_train
state["clips_per_video_test"] = self._clips_per_video_test
return state
def set_classy_state(self, state):
assert (
self.name == state["name"]
), "State name {state_name} does not match meter name {obj_name}".format(
state_name=state["name"], obj_name=self.name
)
assert (
self._clips_per_video_train == state["clips_per_video_train"]
), "incompatible clips_per_video_train for video accuracy"
assert (
self._clips_per_video_test == state["clips_per_video_test"]
), "incompatible clips_per_video_test for video accuracy"
# Restore the state -- correct_predictions and sample_count.
self.reset()
self.meter.set_classy_state(state["meter_state"])
def update(self, model_output, target, is_train, **kwargs):
"""Updates any internal state of meter with new model output and target.
Args:
model_output: tensor of shape (B * clips_per_video, C) where each value is
either logit or class probability.
target: tensor of shape (B * clips_per_video).
is_train if True, it is training stage when meter is updated
Note: For binary classification, C=2.
"""
num_clips = len(model_output)
clips_per_video = (
self._clips_per_video_train if is_train else self._clips_per_video_test
)
if not num_clips % clips_per_video == 0:
logging.info(
"Skip meter update. Because for video model testing, batch size "
"is expected to be a multplier of No. of clips per video. "
"num_clips: %d, clips_per_video: %d" % (num_clips, clips_per_video)
)
return
num_videos = num_clips // clips_per_video
for i in range(num_videos):
clip_labels = target[i * clips_per_video : (i + 1) * clips_per_video]
if clip_labels.ndim == 1:
# single label
assert (
len(torch.unique(clip_labels)) == 1
), "all clips from the same video should have same label"
elif clip_labels.ndim == 2:
# multi-hot label
for j in range(1, clip_labels.shape[0]):
assert torch.equal(
clip_labels[0], clip_labels[j]
), "all clips from the same video should have the same labels"
else:
raise ValueError(
"dimension of clip label matrix should be either 1 or 2"
)
video_model_output = torch.mean(
torch.reshape(model_output, (num_videos, clips_per_video, -1)), 1
)
video_target = target[::clips_per_video]
self.meter.update(video_model_output, video_target)
def reset(self):
self.meter.reset()
def validate(self, model_output_shape, target_shape):
self.meter.validate(model_output_shape, target_shape)
| ClassyVision-main | classy_vision/meters/video_meter.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Sequence
from classy_vision.generic.util import is_pos_int
from classy_vision.meters.accuracy_meter import AccuracyMeter
from . import register_meter
from .video_meter import VideoMeter
@register_meter("video_accuracy")
class VideoAccuracyMeter(VideoMeter):
"""Meter to calculate top-k video-level accuracy for single/multi label
video classification task.
Video-level accuarcy is computed by averaging clip-level predictions and
compare the reslt with video-level groundtruth label.
"""
def __init__(self, topk, clips_per_video_train, clips_per_video_test):
"""
Args:
topk: list of int `k` values.
clips_per_video_train: No. of clips sampled per video at train time
clips_per_video_test: No. of clips sampled per video at test time
"""
super().__init__(clips_per_video_train, clips_per_video_test)
assert isinstance(topk, Sequence), "topk must be a sequence"
assert len(topk) > 0, "topk list should have at least one element"
assert [is_pos_int(x) for x in topk], "each value in topk must be >= 1"
self._accuracy_meter = AccuracyMeter(topk)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "VideoAccuracyMeter":
"""Instantiates a VideoAccuracyMeter from a configuration.
Args:
config: A configuration for a VideoAccuracyMeter.
See :func:`__init__` for parameters expected in the config.
Returns:
A VideoAccuracyMeter instance.
"""
return cls(
topk=config["topk"],
clips_per_video_train=config.get("clips_per_video_train", 1),
clips_per_video_test=config["clips_per_video_test"],
)
@property
def name(self):
return "video_accuracy"
@property
def meter(self):
return self._accuracy_meter
| ClassyVision-main | classy_vision/meters/video_accuracy_meter.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Tuple
import torch
from classy_vision.generic.util import log_class_usage
class ClassyMeter:
"""
Base class to measure various metrics during training and testing phases.
This can include meters like Accuracy, Precision and Recall, etc.
"""
def __init__(self):
log_class_usage("Meter", self.__class__)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassyMeter":
"""Instantiates a ClassyMeter using a configuration.
Args:
config: A configuration for a ClassyMeter.
Returns:
A ClassyMeter instance.
"""
raise NotImplementedError
@property
def name(self) -> str:
"""The name of the meter."""
raise NotImplementedError
@property
def value(self) -> Any:
"""
Value of meter based on local state, can be any python object.
Note:
If there are multiple training processes then this
represents the local state of the meter. If :func:`sync_state` is
implemented, then value will return the global state since the
last sync PLUS any local unsynced updates that have occurred
in the local process.
"""
raise NotImplementedError
def sync_state(self) -> None:
"""
Syncs state with all other meters in distributed training.
If not provided by child class this does nothing by default
and meter only provides the local process stats. If
implemented then the meter provides the global stats at last
sync + any local updates since the last sync.
Warning:
Calls to sync_state could involve communications via
:mod:`torch.distributed` which can result in a loss of performance or
deadlocks if not coordinated among threads.
"""
pass
def reset(self):
"""
Resets any internal meter state.
Should normally be called at the end of a phase.
"""
raise NotImplementedError
def update(
self, model_output: torch.Tensor, target: torch.Tensor, **kwargs
) -> None:
"""
Updates any internal state of meter.
Should be called after each batch processing of each phase.
Args:
model_output: Output of a :class:`ClassyModel`.
target: Target provided by a dataloader from :class:`ClassyDataset`.
"""
raise NotImplementedError
def validate(self, model_output_shape: Tuple, target_shape: Tuple) -> None:
"""
Validate the meter.
Checks if the meter can be calculated on the given ``model_output_shape``
and ``target_shape``.
"""
raise NotImplementedError
def get_classy_state(self) -> Dict[str, Any]:
"""Get the state of the ClassyMeter.
The returned state is used for checkpointing.
Returns:
A state dictionary containing the state of the meter.
"""
raise NotImplementedError
def set_classy_state(self, state: Dict[str, Any]) -> None:
"""Set the state of the ClassyMeter.
Args:
state_dict: The state dictionary. Must be the output of a call to
:func:`get_classy_state`.
This is used to load the state of the meter from a checkpoint.
"""
raise NotImplementedError
def __repr__(self):
"""Returns a string representation of the meter, used for logging.
The default implementation assumes value is a dict. value is not
required to be a dict, and in that case you should override this
method."""
if not isinstance(self.value, dict):
return super().__repr__()
values = ",".join([f"{key}={value:.6f}" for key, value in self.value.items()])
return f"{self.name}_meter({values})"
| ClassyVision-main | classy_vision/meters/classy_meter.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Sequence
import torch
from classy_vision.generic.distributed_util import all_reduce_sum
from classy_vision.generic.util import is_pos_int, maybe_convert_to_one_hot
from classy_vision.meters import ClassyMeter
from . import register_meter
@register_meter("precision_at_k")
class PrecisionAtKMeter(ClassyMeter):
"""
Meter to calculate top-k precision for single-label or multi-label
image classification task. Note, ties are resolved randomly.
"""
def __init__(self, topk):
"""
args:
topk: list of int `k` values.
"""
super().__init__()
assert isinstance(topk, Sequence), "topk must be a sequence"
assert len(topk) > 0, "topk list should have at least one element"
assert [is_pos_int(x) for x in topk], "each value in topk must be >= 1"
self._topk = topk
# _total_* variables store running, in-sync totals for the
# metrics. These should not be communicated / summed.
self._total_correct_predictions_k = None
self._total_sample_count = None
# _curr_* variables store counts since the last sync. Only
# these should be summed across workers and they are reset
# after each communication
self._curr_correct_predictions_k = None
self._curr_sample_count = None
# Initialize all values properly
self.reset()
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "PrecisionAtKMeter":
"""Instantiates a PrecisionAtKMeter from a configuration.
Args:
config: A configuration for a PrecisionAtKMeter.
See :func:`__init__` for parameters expected in the config.
Returns:
A PrecisionAtKMeter instance.
"""
return cls(topk=config["topk"])
@property
def name(self):
return "precision_at_k"
def sync_state(self):
# Communications
self._curr_correct_predictions_k = all_reduce_sum(
self._curr_correct_predictions_k
)
self._curr_sample_count = all_reduce_sum(self._curr_sample_count)
# Store results
self._total_correct_predictions_k += self._curr_correct_predictions_k
self._total_sample_count += self._curr_sample_count
# Reset values until next sync
self._curr_correct_predictions_k.zero_()
self._curr_sample_count.zero_()
@property
def value(self):
# Return value based on the local state of meter which
# includes the local sample count since last sync and the total global sample
# count obtained at the last sync
correct_predictions = {
k: self._curr_correct_predictions_k[i]
+ self._total_correct_predictions_k[i]
for i, k in enumerate(self._topk)
}
sample_count = self._total_sample_count + self._curr_sample_count
return {
"top_{}".format(k): (correct_predictions[k] / (k * sample_count)).item()
if sample_count
else 0.0
for k in self._topk
}
def get_classy_state(self):
"""Contains the states of the meter."""
return {
"name": self.name,
"top_k": self._topk,
"total_correct_predictions": self._total_correct_predictions_k.clone(),
"total_sample_count": self._total_sample_count.clone(),
"curr_sample_count": self._curr_sample_count.clone(),
"curr_correct_predictions_k": self._curr_correct_predictions_k.clone(),
}
def set_classy_state(self, state):
assert (
self.name == state["name"]
), "State name {state_name} does not match meter name {obj_name}".format(
state_name=state["name"], obj_name=self.name
)
assert (
self._topk == state["top_k"]
), "top-k of state {state_k} does not match object's top-k {obj_k}".format(
state_k=state["top_k"], obj_k=self._topk
)
# Restore the state -- correct_predictions and sample_count.
self.reset()
self._total_correct_predictions_k = state["total_correct_predictions"].clone()
self._total_sample_count = state["total_sample_count"].clone()
self._curr_correct_predictions_k = state["curr_correct_predictions_k"].clone()
self._curr_sample_count = state["curr_sample_count"].clone()
def update(self, model_output, target, **kwargs):
"""
args:
model_output: tensor of shape (B, C) where each value is
either logit or class probability.
target: tensor of shape (B, C), which is one-hot /
multi-label encoded, or tensor of shape (B) /
(B, 1), integer encoded
"""
# Convert target to 0/1 encoding if isn't
target = maybe_convert_to_one_hot(target, model_output)
# If Pytorch AMP is being used, model outputs are probably fp16
# Since .topk() is not compatible with fp16, we promote the model outputs to full precision
_, pred_classes = model_output.float().topk(
max(self._topk), dim=1, largest=True, sorted=True
)
pred_mask_tensor = torch.zeros(target.size())
for i, k in enumerate(self._topk):
pred_mask_tensor.zero_()
self._curr_correct_predictions_k[i] += torch.sum(
# torch.min is used to simulate AND between binary
# tensors. If tensors are not binary, this will fail.
torch.min(
pred_mask_tensor.scatter_(1, pred_classes[:, :k], 1.0),
target.float(),
)
).item()
self._curr_sample_count += model_output.shape[0]
def reset(self):
self._total_correct_predictions_k = torch.zeros(len(self._topk))
self._total_sample_count = torch.zeros(1)
self._curr_correct_predictions_k = torch.zeros(len(self._topk))
self._curr_sample_count = torch.zeros(1)
def validate(self, model_output_shape, target_shape):
assert (
len(model_output_shape) == 2
), "model_output_shape must be (B, C) \
Found shape {}".format(
model_output_shape
)
assert (
len(target_shape) > 0 and len(target_shape) < 3
), "target_shape must be (B) or (B, C) \
Found shape {}".format(
target_shape
)
assert (
max(self._topk) < model_output_shape[1]
), "k in top_k, for \
precision_meter cannot be larger than num_classes: \
{}".format(
model_output_shape[1]
)
| ClassyVision-main | classy_vision/meters/precision_meter.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Sequence
import torch
from classy_vision.generic.distributed_util import all_reduce_sum
from classy_vision.generic.util import is_pos_int, maybe_convert_to_one_hot
from classy_vision.meters import ClassyMeter
from . import register_meter
@register_meter("accuracy")
class AccuracyMeter(ClassyMeter):
"""Meter to calculate top-k accuracy for single label/ multi label
image classification task.
"""
def __init__(self, topk):
"""
args:
topk: list of int `k` values.
"""
super().__init__()
assert isinstance(topk, Sequence), "topk must be a sequence"
assert len(topk) > 0, "topk list should have at least one element"
assert [is_pos_int(x) for x in topk], "each value in topk must be >= 1"
self._topk = topk
# _total_* variables store running, in-sync totals for the
# metrics. These should not be communicated / summed.
self._total_correct_predictions_k = None
self._total_sample_count = None
# _curr_* variables store counts since the last sync. Only
# these should be summed across workers and they are reset
# after each communication
self._curr_correct_predictions_k = None
self._curr_sample_count = None
# Initialize all values properly
self.reset()
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "AccuracyMeter":
"""Instantiates a AccuracyMeter from a configuration.
Args:
config: A configuration for a AccuracyMeter.
See :func:`__init__` for parameters expected in the config.
Returns:
A AccuracyMeter instance.
"""
return cls(topk=config["topk"])
@property
def name(self):
return "accuracy"
def sync_state(self):
# Communications
self._curr_correct_predictions_k = all_reduce_sum(
self._curr_correct_predictions_k
)
self._curr_sample_count = all_reduce_sum(self._curr_sample_count)
# Store results
self._total_correct_predictions_k += self._curr_correct_predictions_k
self._total_sample_count += self._curr_sample_count
# Reset values until next sync
self._curr_correct_predictions_k.zero_()
self._curr_sample_count.zero_()
@property
def value(self):
# Return value based on the local state of meter which
# includes the local sample count since last sync and the total global sample
# count obtained at the last sync
correct_predictions = {
k: self._curr_correct_predictions_k[i]
+ self._total_correct_predictions_k[i]
for i, k in enumerate(self._topk)
}
sample_count = self._total_sample_count + self._curr_sample_count
return {
"top_{}".format(k): (correct_predictions[k] / sample_count).item()
if sample_count
else 0.0
for k in self._topk
}
def get_classy_state(self):
"""Contains the states of the meter."""
return {
"name": self.name,
"top_k": self._topk,
"total_correct_predictions": self._total_correct_predictions_k.clone(),
"total_sample_count": self._total_sample_count.clone(),
"curr_sample_count": self._curr_sample_count.clone(),
"curr_correct_predictions_k": self._curr_correct_predictions_k.clone(),
}
def set_classy_state(self, state):
assert (
self.name == state["name"]
), "State name {state_name} does not match meter name {obj_name}".format(
state_name=state["name"], obj_name=self.name
)
assert (
self._topk == state["top_k"]
), "top-k of state {state_k} does not match object's top-k {obj_k}".format(
state_k=state["top_k"], obj_k=self._topk
)
# Restore the state -- correct_predictions and sample_count.
self.reset()
self._total_correct_predictions_k = state["total_correct_predictions"].clone()
self._total_sample_count = state["total_sample_count"].clone()
self._curr_correct_predictions_k = state["curr_correct_predictions_k"].clone()
self._curr_sample_count = state["curr_sample_count"].clone()
def update(self, model_output, target, **kwargs):
"""
args:
model_output: tensor of shape (B, C) where each value is
either logit or class probability.
target: tensor of shape (B, C), which is one-hot /
multi-label encoded, or tensor of shape (B) /
(B, 1), integer encoded
"""
# Convert target to 0/1 encoding if isn't
target = maybe_convert_to_one_hot(target, model_output)
# If Pytorch AMP is being used, model outputs are probably fp16
# Since .topk() is not compatible with fp16, we promote the model outputs to full precision
_, pred = model_output.float().topk(
max(self._topk), dim=1, largest=True, sorted=True
)
for i, k in enumerate(self._topk):
self._curr_correct_predictions_k[i] += (
torch.gather(target, dim=1, index=pred[:, :k])
.max(dim=1)
.values.sum()
.item()
)
self._curr_sample_count += model_output.shape[0]
def reset(self):
self._total_correct_predictions_k = torch.zeros(len(self._topk))
self._total_sample_count = torch.zeros(1)
self._curr_correct_predictions_k = torch.zeros(len(self._topk))
self._curr_sample_count = torch.zeros(1)
def validate(self, model_output_shape, target_shape):
assert (
len(model_output_shape) == 2
), "model_output_shape must be (B, C) \
Found shape {}".format(
model_output_shape
)
assert (
len(target_shape) > 0 and len(target_shape) < 3
), "target_shape must be (B) or (B, C) \
Found shape {}".format(
target_shape
)
assert (
max(self._topk) < model_output_shape[1]
), "k in top_k, for \
accuracy_meter cannot be larger than num_classes: \
{}".format(
model_output_shape[1]
)
| ClassyVision-main | classy_vision/meters/accuracy_meter.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
from typing import Any, Dict, Iterable, Tuple
import torch
import torch.nn as nn
from classy_vision.generic.util import get_torch_version
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
@register_hook("ema_model_weights")
class ExponentialMovingAverageModelHook(ClassyHook):
"""
Hook which keeps a track of the exponential moving average (EMA) of the model's
parameters and applies the EMA params to the model during the test phases.
Saving the state in cpu will save gpu memory, but will make training slower since
the model parameters will need to be moved to cpu before the averaging.
Note:
This hooks stores two additional copies of the model's parameters, which will
increase memory usage significantly.
"""
on_end = ClassyHook._noop
def __init__(
self, decay: float, consider_bn_buffers: bool = True, device: str = "gpu"
) -> None:
"""The constructor method of ExponentialMovingAverageModelHook.
Args:
decay: EMA decay factor, should be in [0, 1]. A decay of 0 corresponds to
always using the latest value (no EMA) and a decay of 1 corresponds to
not updating weights after initialization.
consider_bn_buffers: Whether to apply EMA to batch norm buffers
device: Device to store the model state.
"""
super().__init__()
assert 0 <= decay <= 1, "Decay should be between 0 and 1"
assert device in ["cpu", "gpu"], "Device should be one of cpu or gpu"
self.decay: int = decay
self.consider_bn_buffers = consider_bn_buffers
self.device = "cuda" if device == "gpu" else "cpu"
self.state.model_state = {}
self.state.ema_model_state = {}
self.ema_model_state_list = []
self.param_list = []
logging.info(
f"{self.__class__.__name__} initialized with a decay of "
f"{decay} on device {device}"
)
def get_model_state_iterator(self, model: nn.Module) -> Iterable[Tuple[str, Any]]:
"""Get an iterator over the model state to apply EMA to."""
iterable = model.named_parameters()
if self.consider_bn_buffers:
# also add batch norm buffers to the list of state params to iterate over
buffers_iterable = (
(f"{module_name}_buffer_{name}", buffer)
for module_name, module in model.named_modules()
for name, buffer in module.named_buffers()
if isinstance(module, nn.modules.batchnorm._BatchNorm)
)
iterable = itertools.chain(iterable, buffers_iterable)
return iterable
def _save_current_model_state(self, model: nn.Module, model_state: Dict[str, Any]):
"""Copy the model's state to the provided dict."""
for name, param in self.get_model_state_iterator(model):
model_state[name] = param.detach().clone().to(device=self.device)
def on_start(self, task) -> None:
if self.state.model_state:
# loaded state from checkpoint, do not re-initialize, only move the state
# to the right device
for name in self.state.model_state:
self.state.model_state[name] = self.state.model_state[name].to(
device=self.device
)
self.state.ema_model_state[name] = self.state.ema_model_state[name].to(
device=self.device
)
else:
self._save_current_model_state(task.base_model, self.state.model_state)
self._save_current_model_state(task.base_model, self.state.ema_model_state)
if self.use_optimization(task):
non_fp_states = []
for name in self.state.ema_model_state:
if self.state.ema_model_state[name].dtype not in [
torch.float32,
torch.float16,
]:
non_fp_states.append(name)
if non_fp_states:
logging.warning(
f"In {self.__class__.__name__}, {non_fp_states} are excluded in EMA hook"
f"because the dtype is not fp32 or fp16."
)
def on_phase_start(self, task) -> None:
# restore the right state depending on the phase type
use_ema = (
(not task.train and task.ema) if hasattr(task, "ema") else not task.train
)
self.set_model_state(task, use_ema=use_ema)
if self.use_optimization(task):
self.param_list = []
self.ema_model_state_list = []
for name, param in self.get_model_state_iterator(task.base_model):
if param.dtype in [torch.float32, torch.float16]:
self.param_list.append(param)
self.ema_model_state_list.append(self.state.ema_model_state[name])
else:
logging.warning(
"ExponentialMovingAverageModelHook has better performance since PyTorch version 1.7 "
"and the ema state is on the same device as the model params"
)
def on_phase_end(self, task) -> None:
if task.train:
# save the current model state since this will be overwritten by the ema
# state in the test phase
self._save_current_model_state(task.base_model, self.state.model_state)
def on_step(self, task) -> None:
if not task.train:
return
with torch.no_grad():
if self.use_optimization(task):
torch._foreach_mul_(self.ema_model_state_list, self.decay)
torch._foreach_add_(
self.ema_model_state_list, self.param_list, alpha=(1 - self.decay)
)
else:
for name, param in self.get_model_state_iterator(task.base_model):
self.state.ema_model_state[
name
] = self.decay * self.state.ema_model_state[name] + (
1 - self.decay
) * param.to(
device=self.device
)
def set_model_state(self, task, use_ema: bool) -> None:
"""
Depending on use_ema, set the appropriate state for the model.
"""
model_state = self.state.ema_model_state if use_ema else self.state.model_state
with torch.no_grad():
for name, param in self.get_model_state_iterator(task.base_model):
param.copy_(model_state[name])
def use_optimization(self, task):
# we can only use the optimization if we are on PyTorch >= 1.7 and the EMA state
# is on the same device as the model
return get_torch_version() >= [1, 7] and task.use_gpu == (self.device == "cuda")
| ClassyVision-main | classy_vision/hooks/exponential_moving_average_model_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
from classy_vision.generic.distributed_util import is_primary
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
try:
import progressbar
progressbar_available = True
except ImportError:
progressbar_available = False
@register_hook("progress_bar")
class ProgressBarHook(ClassyHook):
"""
Displays a progress bar to show progress in processing batches.
"""
on_start = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self) -> None:
"""The constructor method of ProgressBarHook."""
super().__init__()
self.progress_bar: Optional[progressbar.ProgressBar] = None
self.bar_size: int = 0
self.batches: int = 0
def on_phase_start(self, task) -> None:
"""Create and display a progress bar with 0 progress."""
if not progressbar_available:
raise RuntimeError(
"progressbar module not installed, cannot use ProgressBarHook"
)
if is_primary():
self.bar_size = task.num_batches_per_phase
self.batches = 0
self.progress_bar = progressbar.ProgressBar(self.bar_size)
self.progress_bar.start()
def on_step(self, task) -> None:
"""Update the progress bar with the batch size."""
if task.train and is_primary() and self.progress_bar is not None:
self.batches += 1
self.progress_bar.update(min(self.batches, self.bar_size))
def on_phase_end(self, task) -> None:
"""Clear the progress bar at the end of the phase."""
if is_primary() and self.progress_bar is not None:
self.progress_bar.finish()
| ClassyVision-main | classy_vision/hooks/progress_bar_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Any, Dict
from classy_vision.generic.util import log_class_usage
class ClassyHookState:
"""Class to store state within instances of ClassyHook.
Any serializable data can be stored in the instance's attributes.
"""
def get_classy_state(self) -> Dict[str, Any]:
return self.__dict__
def set_classy_state(self, state_dict: Dict[str, Any]):
# We take a conservative approach and only update the dictionary instead of
# replacing it. This allows hooks to continue functioning in case the state
# is loaded from older implementations.
self.__dict__.update(state_dict)
class ClassyHook(ABC):
"""Base class for hooks.
Hooks allow to inject behavior at different places of the training loop, which
are listed below in the chronological order.
on_start -> on_phase_start ->
on_step -> on_phase_end -> on_end
Deriving classes should call ``super().__init__()`` and store any state in
``self.state``. Any state added to this property should be serializable.
E.g. -
.. code-block:: python
class MyHook(ClassyHook):
def __init__(self, a, b):
super().__init__()
self.state.a = [1,2,3]
self.state.b = "my_hook"
# the following line is not allowed
# self.state.my_lambda = lambda x: x^2
"""
def __init__(self):
log_class_usage("Hooks", self.__class__)
self.state = ClassyHookState()
@classmethod
def from_config(cls, config) -> "ClassyHook":
return cls(**config)
def _noop(self, *args, **kwargs) -> None:
"""Derived classes can set their hook functions to this.
This is useful if they want those hook functions to not do anything.
"""
pass
@classmethod
def name(cls) -> str:
"""Returns the name of the class."""
return cls.__name__
@abstractmethod
def on_start(self, task) -> None:
"""Called at the start of training."""
pass
@abstractmethod
def on_phase_start(self, task) -> None:
"""Called at the start of each phase."""
pass
@abstractmethod
def on_step(self, task) -> None:
"""Called each time after parameters have been updated by the optimizer."""
pass
@abstractmethod
def on_phase_end(self, task) -> None:
"""Called at the end of each phase (epoch)."""
pass
@abstractmethod
def on_end(self, task) -> None:
"""Called at the end of training."""
pass
def get_classy_state(self) -> Dict[str, Any]:
"""Get the state of the ClassyHook.
The returned state is used for checkpointing.
Returns:
A state dictionary containing the state of the hook.\
"""
return self.state.get_classy_state()
def set_classy_state(self, state_dict: Dict[str, Any]) -> None:
"""Set the state of the ClassyHook.
Args:
state_dict: The state dictionary. Must be the output of a call to
:func:`get_classy_state`.
This is used to load the state of the hook from a checkpoint.
"""
self.state.set_classy_state(state_dict)
| ClassyVision-main | classy_vision/hooks/classy_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import auto, Enum
class ClassyHookFunctions(Enum):
"""
Enumeration of all the hook functions in the ClassyHook class.
"""
on_start = auto()
on_phase_start = auto()
on_forward = auto()
on_loss_and_meter = auto()
on_step = auto()
on_phase_end = auto()
on_end = auto()
| ClassyVision-main | classy_vision/hooks/constants.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
from fvcore.common.file_io import PathManager
DEFAULT_FILE_NAME = "predictions.csv"
@register_hook("output_csv")
class OutputCSVHook(ClassyHook):
on_phase_start = ClassyHook._noop
on_start = ClassyHook._noop
def __init__(self, folder, id_key="id", delimiter="\t") -> None:
super().__init__()
self.output_path = f"{folder}/{DEFAULT_FILE_NAME}"
self.file = PathManager.open(self.output_path, "a")
self.id_key = id_key
self.delimiter = delimiter
def on_start(self, task) -> None:
# File header
self.file.write(
self.delimiter.join(["sample_id", "prediction", "target"]) + "\n"
)
def on_step(self, task) -> None:
"""Saves the output of the model to a CSV file.
This hook assumes the dataset provides an "id" key. It also expects the
task to provide an output of shape (B, C) where B is the batch size and
C is the number of classes. Targets can be either one-hot encoded or
single numbers."""
if self.id_key not in task.last_batch.sample:
return
if task.train:
return
assert (
len(task.last_batch.output.shape) == 2
), "First dimension must be batch size, second is the class logits"
assert len(task.last_batch.sample["target"].shape) in [
1,
2,
], "Target must be integer or one-hot encoded vectors"
sample_ids = task.last_batch.sample[self.id_key].tolist()
predictions = task.last_batch.output.argmax(dim=1).tolist()
target = task.last_batch.sample["target"]
# One-hot encoded vectors
if len(target.shape) == 2:
targets = target.argmax(dim=1)
targets = target.tolist()
for sample_id, prediction, target in zip(sample_ids, predictions, targets):
self.file.write(
self.delimiter.join([str(sample_id), str(prediction), str(target)])
+ "\n"
)
def on_phase_end(self, task) -> None:
self.file.flush()
def on_end(self, task) -> None:
self.file.close()
| ClassyVision-main | classy_vision/hooks/output_csv_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Collection, Dict, Optional # noqa
from classy_vision.generic.distributed_util import is_primary
from classy_vision.generic.util import get_checkpoint_dict, save_checkpoint
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
from fvcore.common.file_io import PathManager
@register_hook("checkpoint")
class CheckpointHook(ClassyHook):
"""
Hook to checkpoint a model's task.
Saves the checkpoints in checkpoint_folder.
"""
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(
self,
checkpoint_folder: str,
input_args: Any = None,
phase_types: Optional[Collection[str]] = None,
checkpoint_period: int = 1,
) -> None:
"""The constructor method of CheckpointHook.
Args:
checkpoint_folder: Folder to store checkpoints in
input_args: Any arguments to save about the runtime setup. For example,
it is useful to store the config that was used to instantiate the model.
phase_types: If `phase_types` is specified, only checkpoint on those phase
types. Each item in `phase_types` must be either "train" or "test". If
not specified, it is set to checkpoint after "train" phases.
checkpoint_period: Checkpoint at the end of every x phases (default 1)
"""
super().__init__()
assert isinstance(
checkpoint_folder, str
), "checkpoint_folder must be a string specifying the checkpoint directory"
assert (
isinstance(checkpoint_period, int) and checkpoint_period > 0
), "checkpoint_period must be a positive integer"
self.checkpoint_folder: str = checkpoint_folder
self.input_args: Any = input_args
if phase_types is None:
phase_types = ["train"]
assert len(phase_types) > 0 and all(
phase_type in ["train", "test"] for phase_type in phase_types
), "phase_types should contain one or more of ['train', 'test']"
assert (
isinstance(checkpoint_period, int) and checkpoint_period > 0
), "checkpoint period must be positive"
self.phase_types: Collection[str] = phase_types
self.checkpoint_period: int = checkpoint_period
self.phase_counter: int = 0
@classmethod
def get_checkpoint_name(cls, phase_idx):
return "model_phase-{phase}_end.torch".format(phase=phase_idx)
def _save_checkpoint(self, task, filename):
if getattr(task, "test_only", False):
return
assert PathManager.exists(
self.checkpoint_folder
), "Checkpoint folder '{}' deleted unexpectedly".format(self.checkpoint_folder)
# save checkpoint:
logging.info("Saving checkpoint to '{}'...".format(self.checkpoint_folder))
checkpoint_file = save_checkpoint(
self.checkpoint_folder, get_checkpoint_dict(task, self.input_args)
)
# make copy of checkpoint that won't be overwritten:
PathManager.copy(checkpoint_file, f"{self.checkpoint_folder}/{filename}")
def on_start(self, task) -> None:
if not is_primary() or getattr(task, "test_only", False):
return
if not PathManager.exists(self.checkpoint_folder):
err_msg = "Checkpoint folder '{}' does not exist.".format(
self.checkpoint_folder
)
raise FileNotFoundError(err_msg)
def on_phase_end(self, task) -> None:
"""Checkpoint the task every checkpoint_period phases.
We do not necessarily checkpoint the task at the end of every phase.
"""
if not is_primary() or task.phase_type not in self.phase_types:
return
self.phase_counter += 1
if self.phase_counter % self.checkpoint_period != 0:
return
checkpoint_name = CheckpointHook.get_checkpoint_name(task.phase_idx)
self._save_checkpoint(task, checkpoint_name)
| ClassyVision-main | classy_vision/hooks/checkpoint_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import traceback
from pathlib import Path
from typing import Any, Dict, List
from classy_vision.generic.registry_utils import import_all_modules
from .constants import ClassyHookFunctions # isort:skip
from .classy_hook import ClassyHook # isort:skip
FILE_ROOT = Path(__file__).parent
HOOK_REGISTRY = {}
HOOK_CLASS_NAMES = set()
HOOK_REGISTRY_TB = {}
HOOK_CLASS_NAMES_TB = {}
def register_hook(name, bypass_checks=False):
"""Registers a :class:`ClassyHook` subclass.
This decorator allows Classy Vision to instantiate a subclass of
:class:`ClassyHook` from a configuration file, even if the class
itself is not part of the base Classy Vision framework. To use it,
apply this decorator to a ClassyHook subclass, like this:
.. code-block:: python
@register_hook('custom_hook')
class CustomHook(ClassyHook):
...
To instantiate a hook from a configuration file, see
:func:`build_hook`.
"""
def register_hook_cls(cls):
if not bypass_checks:
if name in HOOK_REGISTRY:
msg = (
"Cannot register duplicate hook ({}). Already registered at \n{}\n"
)
raise ValueError(msg.format(name, HOOK_REGISTRY_TB[name]))
if not issubclass(cls, ClassyHook):
raise ValueError(
"Hook ({}: {}) must extend ClassyHook".format(name, cls.__name__)
)
if cls.__name__ in HOOK_CLASS_NAMES:
msg = (
"Cannot register hook with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, HOOK_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
HOOK_REGISTRY[name] = cls
HOOK_CLASS_NAMES.add(cls.__name__)
HOOK_REGISTRY_TB[name] = tb
HOOK_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_hook_cls
def build_hooks(hook_configs: List[Dict[str, Any]]):
return [build_hook(config) for config in hook_configs]
def build_hook(hook_config: Dict[str, Any]):
"""Builds a ClassyHook from a config.
This assumes a 'name' key in the config which is used to determine
what hook class to instantiate. For instance, a config `{"name":
"my_hook", "foo": "bar"}` will find a class that was registered as
"my_hook" (see :func:`register_hook`) and call .from_config on
it."""
assert hook_config["name"] in HOOK_REGISTRY, (
"Unregistered hook. Did you make sure to use the register_hook decorator "
"AND import the hook file before calling this function??"
)
hook_config = copy.deepcopy(hook_config)
hook_name = hook_config.pop("name")
return HOOK_REGISTRY[hook_name].from_config(hook_config)
# automatically import any Python files in the hooks/ directory
import_all_modules(FILE_ROOT, "classy_vision.hooks")
from .checkpoint_hook import CheckpointHook # isort:skip
from .torchscript_hook import TorchscriptHook # isort:skip
from .output_csv_hook import OutputCSVHook # isort:skip
from .exponential_moving_average_model_hook import ( # isort:skip
ExponentialMovingAverageModelHook,
)
from .loss_lr_meter_logging_hook import LossLrMeterLoggingHook # isort:skip
from .model_complexity_hook import ModelComplexityHook # isort:skip
from .model_tensorboard_hook import ModelTensorboardHook # isort:skip
from .precise_batch_norm_hook import PreciseBatchNormHook # isort:skip
from .profiler_hook import ProfilerHook # isort:skip
from .progress_bar_hook import ProgressBarHook # isort:skip
from .tensorboard_plot_hook import TensorboardPlotHook # isort:skip
from .visdom_hook import VisdomHook # isort:skip
__all__ = [
"build_hooks",
"build_hook",
"register_hook",
"CheckpointHook",
"ClassyHook",
"ClassyHookFunctions",
"ExponentialMovingAverageModelHook",
"LossLrMeterLoggingHook",
"OutputCSVHook",
"TensorboardPlotHook",
"TorchscriptHook",
"ModelComplexityHook",
"ModelTensorboardHook",
"PreciseBatchNormHook",
"ProfilerHook",
"ProgressBarHook",
"VisdomHook",
]
| ClassyVision-main | classy_vision/hooks/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import time
from itertools import accumulate
from typing import Any, Dict, List, Optional, Tuple
import torch
from classy_vision.generic.distributed_util import all_reduce_max, is_primary
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
try:
from torch.utils.tensorboard import SummaryWriter # noqa F401
tb_available = True
except ImportError:
tb_available = False
log = logging.getLogger()
@register_hook("tensorboard_plot")
class TensorboardPlotHook(ClassyHook):
"""
Hook for writing the losses, learning rates and meters to `tensorboard <https
://www.tensorflow.org/tensorboard`>_.
Global steps are counted in terms of the number of samples processed.
"""
on_end = ClassyHook._noop
def __init__(self, tb_writer, log_period: int = 10) -> None:
"""The constructor method of TensorboardPlotHook.
Args:
tb_writer: `Tensorboard SummaryWriter <https://tensorboardx.
readthedocs.io/en/latest/tensorboard.html#tensorboardX.
SummaryWriter>`_ instance or None (only on secondary replicas)
"""
super().__init__()
if not tb_available:
raise ModuleNotFoundError(
"tensorboard not installed, cannot use TensorboardPlotHook"
)
if not isinstance(log_period, int):
raise TypeError("log_period must be an int")
self.tb_writer = tb_writer
self.learning_rates: Optional[List[float]] = None
self.wall_times: Optional[List[float]] = None
self.sample_fetch_times: Optional[List[float]] = None
self.log_period = log_period
# need to maintain the step count at the end of every phase
# and the cumulative sample fetch time for checkpointing
self.state.step_count = {"train": 0, "test": 0}
self.state.cum_sample_fetch_time = {"train": 0, "test": 0}
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "TensorboardPlotHook":
"""The config is expected to include the key
"summary_writer" with arguments which correspond
to those listed at <https://tensorboardx.
readthedocs.io/en/latest/tensorboard.html#tensorboardX.SummaryWriter>:
"""
tb_writer = SummaryWriter(**config["summary_writer"])
log_period = config.get("log_period", 10)
return cls(tb_writer=tb_writer, log_period=log_period)
def on_start(self, task) -> None:
if is_primary():
self.tb_writer.add_text("Task", f"{task}")
def on_phase_start(self, task) -> None:
"""Initialize losses and learning_rates."""
self.learning_rates = []
self.wall_times = []
self.sample_fetch_times = []
if not is_primary():
return
if torch.cuda.is_available():
torch.cuda.reset_max_memory_allocated()
# log the parameters before training starts
if task.train and task.train_phase_idx == 0:
for name, parameter in task.base_model.named_parameters():
self.tb_writer.add_histogram(
f"Parameters/{name}", parameter, global_step=-1
)
def on_step(self, task) -> None:
"""Store the observed learning rates."""
self.state.step_count[task.phase_type] += 1
self.wall_times.append(time.time())
if "sample_fetch_time" in task.last_batch.step_data:
self.sample_fetch_times.append(
task.last_batch.step_data["sample_fetch_time"]
)
if task.train:
self.learning_rates.append(task.optimizer.options_view.lr)
def _get_cum_sample_fetch_times(self, phase_type) -> Tuple[List[float], ...]:
if not self.sample_fetch_times:
return None
sample_fetch_times = torch.Tensor(self.sample_fetch_times)
max_sample_fetch_times = all_reduce_max(sample_fetch_times).tolist()
cum_sample_fetch_times = list(
accumulate(
[self.state.cum_sample_fetch_time[phase_type]] + max_sample_fetch_times
)
)[1:]
self.state.cum_sample_fetch_time[phase_type] = cum_sample_fetch_times[-1]
return cum_sample_fetch_times
def on_phase_end(self, task) -> None:
"""Add the losses and learning rates to tensorboard."""
if self.learning_rates is None:
logging.warning("learning_rates is not initialized")
return
phase_type = task.phase_type
cum_sample_fetch_times = self._get_cum_sample_fetch_times(phase_type)
batches = len(task.losses)
if batches == 0 or not is_primary():
return
phase_type_idx = task.train_phase_idx if task.train else task.eval_phase_idx
logging.info(f"Plotting to Tensorboard for {phase_type} phase {phase_type_idx}")
for i in range(0, len(self.wall_times), self.log_period):
global_step = (
i + self.state.step_count[phase_type] - len(self.wall_times) + 1
)
if cum_sample_fetch_times:
self.tb_writer.add_scalar(
f"Speed/{phase_type}/cumulative_sample_fetch_time",
cum_sample_fetch_times[i],
global_step=global_step,
walltime=self.wall_times[i],
)
if task.train:
self.tb_writer.add_scalar(
"Learning Rate/train",
self.learning_rates[i],
global_step=global_step,
walltime=self.wall_times[i],
)
if task.train:
for name, parameter in task.base_model.named_parameters():
self.tb_writer.add_histogram(
f"Parameters/{name}", parameter, global_step=phase_type_idx
)
if torch.cuda.is_available():
self.tb_writer.add_scalar(
f"Memory/{phase_type}/peak_allocated",
torch.cuda.max_memory_allocated(),
global_step=phase_type_idx,
)
loss_avg = sum(task.losses) / batches
loss_key = f"Losses/{phase_type}"
self.tb_writer.add_scalar(loss_key, loss_avg, global_step=phase_type_idx)
# plot meters which return a dict
for meter in task.meters:
if not isinstance(meter.value, dict):
log.warn(f"Skipping meter {meter.name} with value: {meter.value}")
continue
for name, value in meter.value.items():
if isinstance(value, float):
meter_key = f"Meters/{phase_type}/{meter.name}/{name}"
self.tb_writer.add_scalar(
meter_key, value, global_step=phase_type_idx
)
else:
log.warn(
f"Skipping meter name {meter.name}/{name} with value: {value}"
)
continue
if hasattr(task, "perf_log"):
for perf in task.perf_log:
phase_idx = perf["phase_idx"]
tag = perf["tag"]
for metric_name, metric_value in perf.items():
if metric_name in ["phase_idx", "tag"]:
continue
self.tb_writer.add_scalar(
f"Speed/{tag}/{metric_name}",
metric_value,
global_step=phase_idx,
)
# flush so that the plots aren't lost if training crashes soon after
self.tb_writer.flush()
logging.info("Done plotting to Tensorboard")
| ClassyVision-main | classy_vision/hooks/tensorboard_plot_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from classy_vision.generic.distributed_util import is_primary
from classy_vision.generic.util import eval_model, get_model_dummy_input
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
from fvcore.common.file_io import PathManager
# constants
TORCHSCRIPT_FILE = "torchscript.pt"
@register_hook("torchscript")
class TorchscriptHook(ClassyHook):
"""
Hook to convert a task model into torch script.
Saves the torch scripts in torchscript_folder.
"""
on_phase_start = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_step = ClassyHook._noop
def __init__(
self,
torchscript_folder: str,
use_trace: bool = True,
trace_strict: bool = True,
device: str = "cpu",
) -> None:
"""The constructor method of TorchscriptHook.
Args:
torchscript_folder: Folder to store torch scripts in.
use_trace: set to true for tracing and false for scripting,
trace_strict: run the tracer in a strict mode or not
(default: ``True``). Only turn this off when you want the tracer to
record your mutable container types (currently ``list``/``dict``)
and you are sure that the container you are using in your
problem is a ``constant`` structure and does not get used as
control flow (if, for) conditions.
device: move to device before saving.
"""
super().__init__()
assert isinstance(
torchscript_folder, str
), "torchscript_folder must be a string specifying the torchscript directory"
self.torchscript_folder: str = torchscript_folder
self.use_trace: bool = use_trace
self.trace_strict: bool = trace_strict
self.device: str = device
def torchscript_using_trace(self, model):
input_shape = model.input_shape if hasattr(model, "input_shape") else None
if not input_shape:
logging.warning(
"This model doesn't implement input_shape."
"Cannot save torchscripted model."
)
return
input_data = get_model_dummy_input(
model,
input_shape,
input_key=model.input_key if hasattr(model, "input_key") else None,
)
with eval_model(model) and torch.no_grad():
torchscript = torch.jit.trace(model, input_data, strict=self.trace_strict)
return torchscript
def torchscript_using_script(self, model):
with eval_model(model) and torch.no_grad():
torchscript = torch.jit.script(model)
return torchscript
def save_torchscript(self, task) -> None:
model = task.base_model
torchscript = (
self.torchscript_using_trace(model)
if self.use_trace
else self.torchscript_using_script(model)
)
# save torchscript:
logging.info("Saving torchscript to '{}'...".format(self.torchscript_folder))
torchscript = torchscript.to(self.device)
torchscript_name = f"{self.torchscript_folder}/{TORCHSCRIPT_FILE}"
with PathManager.open(torchscript_name, "wb") as f:
torch.jit.save(torchscript, f)
def on_start(self, task) -> None:
if not is_primary():
return
if not PathManager.exists(self.torchscript_folder):
err_msg = "Torchscript folder '{}' does not exist.".format(
self.torchscript_folder
)
raise FileNotFoundError(err_msg)
def on_end(self, task) -> None:
"""Save model into torchscript by the end of training/testing."""
if not is_primary():
return
self.save_torchscript(task)
| ClassyVision-main | classy_vision/hooks/torchscript_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict
from classy_vision.generic.profiler import profile, summarize_profiler_info
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
@register_hook("profiler")
class ProfilerHook(ClassyHook):
"""
Hook to profile a model and to show model runtime information, such as
the time breakdown in milliseconds of forward/backward pass.
"""
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def on_start(self, task) -> None:
"""Profile the forward pass."""
logging.info("Profiling forward pass...")
batchsize_per_replica = task.get_batchsize_per_replica()
input_shape = task.base_model.input_shape
p = profile(
task.model,
batchsize_per_replica=batchsize_per_replica,
input_shape=input_shape,
input_key=getattr(task.base_model, "input_key", None),
)
logging.info(summarize_profiler_info(p))
| ClassyVision-main | classy_vision/hooks/profiler_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections.abc
import logging
from typing import Any, Dict
from classy_vision.generic.distributed_util import is_primary
from classy_vision.generic.util import flatten_dict
from classy_vision.generic.visualize import plot_learning_curves
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
try:
from visdom import Visdom
visdom_available = True
except ImportError:
visdom_available = False
@register_hook("visdom")
class VisdomHook(ClassyHook):
"""Plots metrics on to `Visdom <https://github.com/facebookresearch/visdom>`_.
Visdom is a flexible tool for creating, organizing, and sharing visualizations
of live, rich data. It supports Python.
"""
on_start = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(
self, server: str, port: str, env: str = "main", title_suffix: str = ""
) -> None:
"""
Args:
server: host name of the visdom server
port: port of visdom server, such as 8097
env: environment of visdom
title_suffix: suffix that will be appended to the title
"""
super().__init__()
if not visdom_available:
raise RuntimeError("Visdom is not installed, cannot use VisdomHook")
self.server: str = server
self.port: str = port
self.env: str = env
self.title_suffix: str = title_suffix
self.metrics: Dict = {}
self.visdom: Visdom = Visdom(self.server, self.port)
def on_phase_end(self, task) -> None:
"""
Plot the metrics on visdom.
"""
phase_type = task.phase_type
metrics = self.metrics
batches = len(task.losses)
if batches == 0:
return
# Loss for the phase
loss = sum(task.losses) / batches
loss_key = phase_type + "_loss"
if loss_key not in metrics:
metrics[loss_key] = []
metrics[loss_key].append(loss)
# Optimizer LR for the phase
optimizer_lr = task.optimizer.options_view.lr
lr_key = phase_type + "_learning_rate"
if lr_key not in metrics:
metrics[lr_key] = []
metrics[lr_key].append(optimizer_lr)
# Calculate meters
for meter in task.meters:
if isinstance(meter.value, collections.abc.MutableMapping):
flattened_meters_dict = flatten_dict(meter.value, prefix=meter.name)
for k, v in flattened_meters_dict.items():
metric_key = phase_type + "_" + k
if metric_key not in metrics:
metrics[metric_key] = []
metrics[metric_key].append(v)
else:
metric_key = phase_type + "_" + meter.name
if metric_key not in metrics:
metrics[metric_key] = []
metrics[metric_key].append(meter.value)
# update learning curve visualizations:
phase_type = "train" if task.train else "test"
title = "%s-%s" % (phase_type, task.base_model.__class__.__name__)
title += self.title_suffix
if not task.train and is_primary():
logging.info("Plotting learning curves to visdom")
plot_learning_curves(
metrics, visdom_server=self.visdom, env=self.env, win=title, title=title
)
| ClassyVision-main | classy_vision/hooks/visdom_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict
from classy_vision.generic.distributed_util import is_primary
from classy_vision.generic.visualize import plot_model
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
try:
from torch.utils.tensorboard import SummaryWriter # noqa F401
tb_available = True
except ImportError:
tb_available = False
@register_hook("model_tensorboard")
class ModelTensorboardHook(ClassyHook):
"""
Shows the model graph in `TensorBoard <https
://www.tensorflow.org/tensorboard`>_.
"""
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self, tb_writer) -> None:
"""The constructor method of ModelTensorboardHook.
Args:
tb_writer: `Tensorboard SummaryWriter <https://tensorboardx.
readthedocs.io/en/latest/tensorboard.html#tensorboardX.
SummaryWriter>`_ instance or None (only on secondary replicas)
"""
super().__init__()
if not tb_available:
raise ModuleNotFoundError(
"tensorboard not installed, cannot use ModelTensorboardHook"
)
self.tb_writer = tb_writer
@classmethod
def from_config(cls, config: [Dict[str, Any]]) -> "ModelTensorboardHook":
"""The config is expected to include the key
"summary_writer" with arguments which correspond
to those listed at <https://tensorboardx.
readthedocs.io/en/latest/tensorboard.html#tensorboardX.SummaryWriter>:
"""
tb_writer = SummaryWriter(**config["summary_writer"])
return cls(tb_writer=tb_writer)
def on_start(self, task) -> None:
"""
Plot the model on Tensorboard.
"""
if is_primary():
try:
# Show model in tensorboard:
logging.info("Showing model graph in TensorBoard...")
plot_model(
task.base_model,
size=task.base_model.input_shape,
input_key=task.base_model.input_key
if hasattr(task.base_model, "input_key")
else None,
writer=self.tb_writer,
)
except Exception:
logging.warning("Unable to plot model to tensorboard")
logging.debug("Exception encountered:", exc_info=True)
| ClassyVision-main | classy_vision/hooks/model_tensorboard_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import torch
from classy_vision.generic.util import (
get_batchsize_per_replica,
recursive_copy_to_device,
recursive_copy_to_gpu,
)
from classy_vision.hooks import ClassyHook, register_hook
from fvcore.nn.precise_bn import update_bn_stats
def _get_iterator(data_iter, use_gpu):
for elem in data_iter:
if use_gpu:
elem = recursive_copy_to_gpu(elem, non_blocking=True)
yield elem["input"]
@register_hook("precise_bn")
class PreciseBatchNormHook(ClassyHook):
"""Hook to compute precise batch normalization statistics.
Batch norm stats are calculated and updated during training, when the weights are
also changing, which makes the calculations imprecise. This hook recomputes the
batch norm stats at the end of a train phase to make them more precise. See
`fvcore's documentation <https://github.com/facebookresearch/fvcore/blob/master/
fvcore/nn/precise_bn.py>`_ for more information.
"""
on_end = ClassyHook._noop
def __init__(self, num_samples: int, cache_samples: bool = False) -> None:
"""The constructor method of PreciseBatchNormHook.
Caches the required number of samples on the CPU during train phases
Args:
num_samples: Number of samples to calculate the batch norm stats per replica
cache_samples: If True, we cache samples at training stage. This avoids re-creating
data loaders, but consumes more memory. If False, we re-create data loader at the
end of phase, which might be slow for large dataset, but saves memory.
"""
super().__init__()
if num_samples <= 0:
raise ValueError("num_samples has to be a positive integer")
self.num_samples = num_samples
self.cache_samples = cache_samples
if cache_samples:
self.cache = []
self.current_samples = 0
else:
self.batch_size = None
@classmethod
def from_config(cls, config):
return cls(config["num_samples"], config.get("cache_samples", False))
def on_phase_start(self, task) -> None:
if self.cache_samples:
self.cache = []
self.current_samples = 0
def on_start(self, task) -> None:
logging.info(f"Use precise BatchNorm hook. Cache samples? {self.cache_samples}")
def on_step(self, task) -> None:
if not task.train:
return
if self.cache_samples:
if self.current_samples >= self.num_samples:
return
sample = recursive_copy_to_device(
task.last_batch.sample,
non_blocking=True,
device=torch.device("cpu"),
)
self.cache.append(sample)
self.current_samples += get_batchsize_per_replica(sample)
else:
if self.batch_size is not None:
return
self.batch_size = get_batchsize_per_replica(task.last_batch.sample["input"])
def on_phase_end(self, task) -> None:
if not task.train:
return
if self.cache_samples:
iterator = _get_iterator(self.cache, task.use_gpu)
num_batches = len(self.cache)
else:
num_batches = int(math.ceil(self.num_samples / self.batch_size))
task.build_dataloaders_for_current_phase()
task.create_data_iterators()
if num_batches > len(task.data_iterator):
num_batches = len(task.data_iterator)
logging.info(
f"Reduce no. of samples to {num_batches * self.batch_size}"
)
iterator = _get_iterator(task.data_iterator, task.use_gpu)
update_bn_stats(task.base_model, iterator, num_batches)
| ClassyVision-main | classy_vision/hooks/precise_batch_norm_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Optional
from classy_vision.generic.distributed_util import get_rank
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
@register_hook("loss_lr_meter_logging")
class LossLrMeterLoggingHook(ClassyHook):
"""
Logs the loss, optimizer LR, and meters. Logs at the end of a phase.
"""
on_phase_start = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self, log_freq: Optional[int] = None) -> None:
"""The constructor method of LossLrMeterLoggingHook.
Args:
log_freq: if specified, also logs every ``log_freq`` batches.
"""
super().__init__()
assert log_freq is None or isinstance(
log_freq, int
), "log_freq must be an int or None"
self.log_freq: Optional[int] = log_freq
def on_start(self, task) -> None:
logging.info(f"Starting training. Task: {task}")
def on_phase_end(self, task) -> None:
"""
Log the loss, optimizer LR, and meters for the phase.
"""
batches = len(task.losses)
if batches:
# Most trainers will sync meters on phase end, however we
# do not explicitly state this since it is possible for a
# trainer to implement an unsynced end of phase meter or
# for meters to not provide a sync function.
self._log_loss_lr_meters(task, prefix="Synced meters: ", log_batches=True)
def on_step(self, task) -> None:
"""
Log the LR every log_freq batches, if log_freq is not None.
"""
if self.log_freq is None or not task.train:
return
batches = len(task.losses)
if batches and batches % self.log_freq == 0:
self._log_loss_lr_meters(task, prefix="Approximate meters: ")
def _log_loss_lr_meters(self, task, prefix="", log_batches=False) -> None:
"""
Compute and log the loss, lr, and meters.
"""
phase_type = task.phase_type
phase_type_idx = task.train_phase_idx if task.train else task.eval_phase_idx
batches = len(task.losses)
# Loss for the phase
loss = sum(task.losses) / batches
phase_pct = batches / task.num_batches_per_phase
msg = (
f"{prefix}[{get_rank()}] {phase_type} phase {phase_type_idx} "
f"({phase_pct*100:.2f}% done), loss: {loss:.4f}, meters: {task.meters}"
)
if task.train:
msg += f", lr: {task.optimizer.options_view.lr:.4f}"
if phase_type == "test" and hasattr(task, "ema"):
msg += f", ema: {task.ema}"
if log_batches:
msg += f", processed batches: {batches}"
logging.info(msg)
| ClassyVision-main | classy_vision/hooks/loss_lr_meter_logging_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from classy_vision.generic.profiler import (
ClassyProfilerNotImplementedError,
compute_activations,
compute_flops,
count_params,
)
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
@register_hook("model_complexity")
class ModelComplexityHook(ClassyHook):
"""
Logs the number of paramaters and forward pass FLOPs and activations of the model.
"""
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self) -> None:
super().__init__()
self.num_flops = None
self.num_activations = None
self.num_parameters = None
def on_start(self, task) -> None:
"""Measure number of parameters, FLOPs and activations."""
self.num_flops = 0
self.num_activations = 0
self.num_parameters = 0
try:
self.num_parameters = count_params(task.base_model)
logging.info("Number of parameters in model: %d" % self.num_parameters)
try:
self.num_flops = compute_flops(
task.base_model,
input_shape=task.base_model.input_shape,
input_key=task.base_model.input_key
if hasattr(task.base_model, "input_key")
else None,
)
if self.num_flops is None:
logging.info("FLOPs for forward pass: skipped.")
self.num_flops = 0
else:
logging.info(
"FLOPs for forward pass: %d MFLOPs"
% (float(self.num_flops) / 1e6)
)
except ClassyProfilerNotImplementedError as e:
logging.warning(f"Could not compute FLOPs for model forward pass: {e}")
try:
self.num_activations = compute_activations(
task.base_model,
input_shape=task.base_model.input_shape,
input_key=task.base_model.input_key
if hasattr(task.base_model, "input_key")
else None,
)
logging.info(f"Number of activations in model: {self.num_activations}")
except ClassyProfilerNotImplementedError as e:
logging.warning(
f"Could not compute activations for model forward pass: {e}"
)
except Exception:
logging.info("Skipping complexity calculation: Unexpected error")
logging.debug("Error trace for complexity calculation:", exc_info=True)
def get_summary(self):
return {
"FLOPS(M)": float(self.num_flops) / 1e6
if self.num_flops is not None
else 0,
"num_activations(M)": float(self.num_activations) / 1e6
if self.num_activations is not None
else 0,
"num_parameters(M)": float(self.num_parameters) / 1e6
if self.num_parameters is not None
else 0,
}
| ClassyVision-main | classy_vision/hooks/model_complexity_hook.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
# Automatically import any Python files in the losses/ directory
import_all_modules(FILE_ROOT, "losses")
| ClassyVision-main | classy_vision/templates/synthetic/losses/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn.functional as F
from classy_vision.losses import ClassyLoss, register_loss
@register_loss("my_loss")
class MyLoss(ClassyLoss):
def forward(self, input, target):
labels = F.one_hot(target, num_classes=2).float()
return F.binary_cross_entropy(input, labels)
@classmethod
def from_config(cls, config):
# We don't need anything from the config
return cls()
| ClassyVision-main | classy_vision/templates/synthetic/losses/my_loss.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
from classy_vision.dataset import ClassyDataset, register_dataset
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
SampleType,
)
from classy_vision.dataset.transforms import build_transforms, ClassyTransform
@register_dataset("my_dataset")
class MyDataset(ClassyDataset):
def __init__(
self,
batchsize_per_replica: int,
shuffle: bool,
transform: Optional[Union[ClassyTransform, Callable]],
num_samples: int,
crop_size: int,
class_ratio: float,
seed: int,
) -> None:
dataset = RandomImageBinaryClassDataset(
crop_size, class_ratio, num_samples, seed, SampleType.TUPLE
)
super().__init__(
dataset, batchsize_per_replica, shuffle, transform, num_samples
)
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "MyDataset":
assert all(key in config for key in ["crop_size", "class_ratio", "seed"])
crop_size = config["crop_size"]
class_ratio = config["class_ratio"]
seed = config["seed"]
(
transform_config,
batchsize_per_replica,
shuffle,
num_samples,
) = cls.parse_config(config)
transform = build_transforms(transform_config)
return cls(
batchsize_per_replica,
shuffle,
transform,
num_samples,
crop_size,
class_ratio,
seed,
)
| ClassyVision-main | classy_vision/templates/synthetic/datasets/my_dataset.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
# Automatically import any Python files in the datasets/ directory
import_all_modules(FILE_ROOT, "datasets")
| ClassyVision-main | classy_vision/templates/synthetic/datasets/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
# Automatically import any Python files in the models/ directory
import_all_modules(FILE_ROOT, "models")
| ClassyVision-main | classy_vision/templates/synthetic/models/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torchvision.models as models
from classy_vision.models import ClassyModel, register_model
@register_model("my_model")
class MyModel(ClassyModel):
def __init__(self):
super().__init__()
self.model = nn.Sequential(
nn.AdaptiveAvgPool2d((20, 20)),
nn.Flatten(1),
nn.Linear(3 * 20 * 20, 2),
nn.Sigmoid(),
)
def forward(self, x):
x = self.model(x)
return x
@classmethod
def from_config(cls, config):
return cls()
| ClassyVision-main | classy_vision/templates/synthetic/models/my_model.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import logging
import os
import sys
from pathlib import Path
def import_all_modules(root: str, base_module: str) -> None:
for file in os.listdir(root):
if file.endswith((".py", ".pyc")) and not file.startswith("_"):
module = file[: file.find(".py")]
if module not in sys.modules:
module_name = ".".join([base_module, module])
importlib.import_module(module_name)
def import_all_packages_from_directory(root: str) -> None:
"""Automatically imports all packages under the root directory.
For instance, if your directories look like:
root / foo / __init__.py
root / foo / abc.py
root / bar.py
root / baz / xyz.py
This function will import the package foo, but not bar or baz."""
for file in os.listdir(root):
# Try to import each file in the directory. Our previous implementation
# would look for directories here and see if there's a __init__.py
# under that directory, but that turns out to be unreliable while
# running on AWS: EFS filesystems cache metadata bits so the directory
# and existence checks fail even when the import succeeds. We should
# find a better workaround eventually, but this will do for now.
try:
file = Path(file)
module_name = file.name
# Dots have special meaning in Python packages -- it's a relative
# import or a subpackage. Skip these.
if "." not in module_name and module_name not in sys.modules:
logging.debug(f"Automatically importing {module_name}")
importlib.import_module(module_name)
except ModuleNotFoundError:
pass
| ClassyVision-main | classy_vision/generic/registry_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections.abc
import contextlib
import json
import logging
import os
import time
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from classy_vision.generic.distributed_util import broadcast_object, is_primary
from fvcore.common.file_io import PathManager
try:
import apex
apex_available = True
except ImportError:
apex_available = False
# constants:
CHECKPOINT_FILE = "checkpoint.torch"
CPU_DEVICE = torch.device("cpu")
GPU_DEVICE = torch.device("cuda")
def is_pos_int(number: int) -> bool:
"""
Returns True if a number is a positive integer.
"""
return type(number) == int and number >= 0
def is_pos_float(number: float) -> bool:
"""
Returns True if a number is a positive float.
"""
return type(number) == float and number >= 0.0
def is_pos_int_list(l: List) -> bool:
"""
Returns True if a list contains positive integers
"""
return type(l) == list and all(is_pos_int(n) for n in l)
def is_pos_int_tuple(t: Tuple) -> bool:
"""
Returns True if a tuple contains positive integers
"""
return type(t) == tuple and all(is_pos_int(n) for n in t)
def is_long_tensor(tensor: torch.Tensor) -> bool:
"""
Returns True if a tensor is a long tensor.
"""
if torch.is_tensor(tensor):
return tensor.type().endswith("LongTensor")
else:
return False
def is_float_tensor(tensor: torch.Tensor) -> bool:
"""
Returns True if a tensor is a float tensor.
"""
if torch.is_tensor(tensor):
return tensor.type().endswith("FloatTensor")
else:
return False
def is_double_tensor(tensor: torch.Tensor) -> bool:
"""
Returns True if a tensor is a double tensor.
"""
if torch.is_tensor(tensor):
return tensor.type().endswith("DoubleTensor")
else:
return False
def is_leaf(module: nn.Module) -> bool:
"""
Returns True if module is leaf in the graph.
"""
assert isinstance(module, nn.Module), "module should be nn.Module"
return len(list(module.children())) == 0 or hasattr(module, "_mask")
def is_on_gpu(model: torch.nn.Module) -> bool:
"""
Returns True if all parameters of a model live on the GPU.
"""
assert isinstance(model, torch.nn.Module)
on_gpu = True
has_params = False
for param in model.parameters():
has_params = True
if not param.data.is_cuda:
on_gpu = False
return has_params and on_gpu
def is_not_none(sample: Any) -> bool:
"""
Returns True if sample is not None and constituents are not none.
"""
if sample is None:
return False
if isinstance(sample, (list, tuple)):
if any(s is None for s in sample):
return False
if isinstance(sample, dict):
if any(s is None for s in sample.values()):
return False
return True
def copy_model_to_gpu(model, loss=None):
"""
Copies a model and (optional) loss to GPU and enables cudnn benchmarking.
For multiple gpus training, the model in DistributedDataParallel for
distributed training.
"""
if not torch.backends.cudnn.deterministic:
torch.backends.cudnn.benchmark = True
model = model.cuda()
if loss is not None:
loss = loss.cuda()
return model, loss
else:
return model
def recursive_copy_to_device(
value: Any, non_blocking: bool, device: torch.device
) -> Any:
"""
Recursively searches lists, tuples, dicts and copies any object which
supports an object.to API (e.g. tensors) to device if possible.
Other values are passed as-is in the result.
Note: These are all copies, so if there are two objects that reference
the same object, then after this call, there will be two different objects
referenced on the device.
"""
if isinstance(value, list) or isinstance(value, tuple):
device_val = []
for val in value:
device_val.append(
recursive_copy_to_device(val, non_blocking=non_blocking, device=device)
)
return device_val if isinstance(value, list) else tuple(device_val)
elif isinstance(value, collections.abc.Mapping):
device_val = {}
for key, val in value.items():
device_val[key] = recursive_copy_to_device(
val, non_blocking=non_blocking, device=device
)
return device_val
elif callable(getattr(value, "to", None)):
return value.to(device=device, non_blocking=non_blocking)
return value
def recursive_copy_to_gpu(value: Any, non_blocking: bool = True) -> Any:
"""
Recursively searches lists, tuples, dicts and copies tensors to GPU if
possible. Non-tensor values are passed as-is in the result.
Note: These are all copies, so if there are two objects that reference
the same object, then after this call, there will be two different objects
referenced on the GPU.
"""
return recursive_copy_to_device(
value=value, non_blocking=non_blocking, device=GPU_DEVICE
)
@contextlib.contextmanager
def numpy_seed(seed: Optional[int], *addl_seeds: int) -> None:
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def get_checkpoint_dict(
task, input_args: Optional[Dict], deep_copy: bool = False
) -> Dict[str, Any]:
assert input_args is None or isinstance(
input_args, dict
), f"Unexpected input_args of type: {type(input_args)}"
return {
"input_args": input_args,
"classy_state_dict": task.get_classy_state(deep_copy=deep_copy),
}
def load_and_broadcast_checkpoint(
checkpoint_path: str, device: torch.device = CPU_DEVICE
) -> Optional[Dict]:
"""Loads a checkpoint on primary and broadcasts it to all replicas.
This is a collective operation which needs to be run in sync on all replicas.
See :func:`load_checkpoint` for the arguments.
"""
if is_primary():
checkpoint = load_checkpoint(checkpoint_path, device)
else:
checkpoint = None
logging.info(f"Broadcasting checkpoint loaded from {checkpoint_path}")
return broadcast_object(checkpoint)
def load_checkpoint(
checkpoint_path: str, device: torch.device = CPU_DEVICE
) -> Optional[Dict]:
"""Loads a checkpoint from the specified checkpoint path.
Args:
checkpoint_path: The path to load the checkpoint from. Can be a file or a
directory. If it is a directory, the checkpoint is loaded from
:py:data:`CHECKPOINT_FILE` inside the directory.
device: device to load the checkpoint to
Returns:
The checkpoint, if it exists, or None.
"""
if not checkpoint_path:
return None
assert device is not None, "Please specify what device to load checkpoint on"
assert device.type in ["cpu", "cuda"], f"Unknown device: {device}"
if device.type == "cuda":
assert torch.cuda.is_available()
if not PathManager.exists(checkpoint_path):
logging.warning(f"Checkpoint path {checkpoint_path} not found")
return None
if PathManager.isdir(checkpoint_path):
checkpoint_path = f"{checkpoint_path.rstrip('/')}/{CHECKPOINT_FILE}"
if not PathManager.exists(checkpoint_path):
logging.warning(f"Checkpoint file {checkpoint_path} not found.")
return None
logging.info(f"Attempting to load checkpoint from {checkpoint_path}")
# load model on specified device and not on saved device for model and return
# the checkpoint
with PathManager.open(checkpoint_path, "rb") as f:
checkpoint = torch.load(f, map_location=device)
logging.info(f"Loaded checkpoint from {checkpoint_path}")
return checkpoint
def update_classy_model(
model, model_state_dict: Dict, reset_heads: bool, strict: bool = True
) -> bool:
"""
Updates the model with the provided model state dictionary.
Args:
model: ClassyVisionModel instance to update
model_state_dict: State dict, should be the output of a call to
ClassyVisionModel.get_classy_state().
reset_heads: if False, uses the heads' state from model_state_dict.
strict: if True, strictly match the module/buffer keys in current model and
pass-in model_state_dict
"""
try:
if reset_heads:
current_model_state_dict = model.get_classy_state()
# replace the checkpointed head states with source head states
model_state_dict["model"]["heads"] = current_model_state_dict["model"][
"heads"
]
model.set_classy_state(model_state_dict, strict=strict)
logging.info("Model state load successful")
return True
except Exception:
logging.exception("Could not load the model state")
return False
def update_classy_state(task, state_dict: Dict) -> bool:
"""
Updates the task with the provided task dictionary.
Args:
task: ClassyTask instance to update
state_dict: State dict, should be the output of a call to
ClassyTask.get_classy_state().
"""
logging.info("Loading classy state from checkpoint")
try:
task.set_classy_state(state_dict)
logging.info("Checkpoint load successful")
return True
except Exception:
logging.exception("Could not load the checkpoint")
return False
def save_checkpoint(checkpoint_folder, state, checkpoint_file=CHECKPOINT_FILE):
"""
Saves a state variable to the specified checkpoint folder. Returns the filename
of the checkpoint if successful. Raises an exception otherwise.
"""
# make sure that we have a checkpoint folder:
if not PathManager.isdir(checkpoint_folder):
try:
PathManager.mkdirs(checkpoint_folder)
except BaseException:
logging.warning("Could not create folder %s." % checkpoint_folder)
raise
# write checkpoint atomically:
try:
full_filename = f"{checkpoint_folder}/{checkpoint_file}"
with PathManager.open(full_filename, "wb") as f:
torch.save(state, f)
return full_filename
except BaseException:
logging.warning(
"Unable to write checkpoint to %s." % checkpoint_folder, exc_info=True
)
raise
def flatten_dict(value_dict: Dict, prefix="", sep="_") -> Dict:
"""
Flattens nested dict into (key, val) dict. Used for flattening meters
structure, so that they can be visualized.
"""
items = []
for k, v in value_dict.items():
key = prefix + sep + k if prefix else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten_dict(value_dict=v, prefix=key, sep=sep).items())
else:
items.append((key, v))
return dict(items)
def load_json(json_path):
"""
Loads a json config from a file.
"""
assert os.path.exists(json_path), "Json file %s not found" % json_path
json_file = open(json_path)
json_config = json_file.read()
json_file.close()
try:
config = json.loads(json_config)
except BaseException as err:
raise Exception("Failed to validate config with error: %s" % str(err))
return config
@contextlib.contextmanager
def torch_seed(seed: Optional[int]):
"""Context manager which seeds the PyTorch PRNG with the specified seed and
restores the state afterward. Setting seed to None is equivalent to running
the code without the context manager."""
if seed is None:
yield
return
state = torch.get_rng_state()
torch.manual_seed(seed)
try:
yield
finally:
torch.set_rng_state(state)
def convert_to_one_hot(targets: torch.Tensor, classes) -> torch.Tensor:
"""
This function converts target class indices to one-hot vectors,
given the number of classes.
"""
assert (
torch.max(targets).item() < classes
), "Class Index must be less than number of classes"
one_hot_targets = torch.zeros(
(targets.shape[0], classes), dtype=torch.long, device=targets.device
)
one_hot_targets.scatter_(1, targets.long(), 1)
return one_hot_targets
def maybe_convert_to_one_hot(
target: torch.Tensor, model_output: torch.Tensor
) -> torch.Tensor:
"""
This function infers whether target is integer or 0/1 encoded
and converts it to 0/1 encoding if necessary.
"""
target_shape_list = list(target.size())
if len(target_shape_list) == 1 or (
len(target_shape_list) == 2 and target_shape_list[1] == 1
):
target = convert_to_one_hot(target.view(-1, 1), model_output.shape[1])
# target are not necessarily hard 0/1 encoding. It can be soft
# (i.e. fractional) in some cases, such as mixup label
assert (
target.shape == model_output.shape
), "Target must of the same shape as model_output."
return target
def get_model_dummy_input(
model,
input_shape: Any,
input_key: Union[str, List[str]],
batchsize: int = 1,
non_blocking: bool = False,
) -> Any:
# input_shape with type dict of dict
# e.g. {"key_1": {"key_1_1": [2, 3], "key_1_2": [4, 5, 6], "key_1_3": []}
if isinstance(input_shape, dict):
input = {}
for key, value in input_shape.items():
input[key] = get_model_dummy_input(
model, value, input_key, batchsize, non_blocking
)
elif isinstance(input_key, list):
# av mode, with multiple input keys
input = {}
for i, key in enumerate(input_key):
shape = (batchsize,) + tuple(input_shape[i])
cur_input = torch.zeros(shape)
if next(model.parameters()).is_cuda:
cur_input = cur_input.cuda(non_blocking=non_blocking)
input[key] = cur_input
else:
# add a dimension to represent minibatch axis
shape = (batchsize,) + tuple(input_shape)
input = torch.zeros(shape)
if next(model.parameters()).is_cuda:
input = input.cuda(non_blocking=non_blocking)
if input_key:
input = {input_key: input}
return input
def get_batchsize_per_replica(x: Union[Tuple, List, Dict]) -> int:
"""
Some layer may take tuple/list/dict/list[dict] as input in forward function. We
recursively dive into the tuple/list until we meet a tensor and infer the batch size
"""
while isinstance(x, (list, tuple)):
assert len(x) > 0, "input x of tuple/list type must have at least one element"
x = x[0]
if isinstance(x, (dict,)):
# index zero is always equal to batch size. select an arbitrary key.
key_list = list(x.keys())
x = x[key_list[0]]
return x.size()[0]
def split_batchnorm_params(model: nn.Module):
"""Finds the set of BatchNorm parameters in the model.
Recursively traverses all parameters in the given model and returns a tuple
of lists: the first element is the set of batchnorm parameters, the second
list contains all other parameters of the model."""
batchnorm_params = []
other_params = []
for module in model.modules():
# If module has children (i.e. internal node of constructed DAG) then
# only add direct parameters() to the list of params, else go over
# children node to find if they are BatchNorm or have "bias".
if list(module.children()) != []:
for params in module.parameters(recurse=False):
if params.requires_grad:
other_params.append(params)
elif isinstance(module, nn.modules.batchnorm._BatchNorm):
for params in module.parameters():
if params.requires_grad:
batchnorm_params.append(params)
else:
for params in module.parameters():
if params.requires_grad:
other_params.append(params)
return batchnorm_params, other_params
class Timer:
"""Timer context manager to get the elapsed time for a code block.
Example:
.. code-block:: python
with Timer() as timer:
do_something()
elapsed_time = timer.elapsed_time
"""
def __init__(self):
self.start = 0
self.elapsed_time = 0
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args, **kwargs):
self.elapsed_time = time.perf_counter() - self.start
@contextlib.contextmanager
def _train_mode(model: nn.Module, train_mode: bool):
"""Context manager which sets the train mode of a model. After returning, it
restores the state of every sub-module individually."""
train_modes = {}
for name, module in model.named_modules():
train_modes[name] = module.training
try:
model.train(train_mode)
yield
finally:
for name, module in model.named_modules():
module.training = train_modes[name]
def log_class_usage(component_type, klass):
"""This function is used to log the usage of different Classy components."""
identifier = "ClassyVision"
if klass and hasattr(klass, "__name__"):
identifier += f".{component_type}.{klass.__name__}"
torch._C._log_api_usage_once(identifier)
def get_torch_version():
"""Get the torch version as [major, minor].
All comparisons must be done with the two version values. Revisions are not
supported.
"""
version_list = torch.__version__.split(".")[:2]
return [int(version_str) for version_str in version_list]
train_model = partial(_train_mode, train_mode=True)
train_model.__doc__ = """Context manager which puts the model in train mode.
After returning, it restores the state of every sub-module individually.
"""
eval_model = partial(_train_mode, train_mode=False)
eval_model.__doc__ = """Context manager which puts the model in eval mode.
After returning, it restores the state of every sub-module individually.
"""
def master_params(optimizer):
"""Generator to iterate over all parameters in the optimizer param_groups.
When apex is available, uses that to guarantee we get the FP32 copy of the
parameters when O2 is enabled. Otherwise, iterate ourselves."""
if apex_available:
yield from apex.amp.master_params(optimizer)
else:
for group in optimizer.param_groups:
for p in group["params"]:
yield p
| ClassyVision-main | classy_vision/generic/util.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| ClassyVision-main | classy_vision/generic/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.modules as nn
from classy_vision.generic.util import get_model_dummy_input, is_pos_int
from classy_vision.models import ClassyModel
from PIL import Image
try:
import visdom
from torch.utils.tensorboard import SummaryWriter
except ImportError:
pass
# define loss types:
vis = [] # using a list makes this work as an upvalue
UNSUPPORTED_LOSSES = (
nn.CosineEmbeddingLoss,
nn.PoissonNLLLoss,
nn.KLDivLoss,
nn.TripletMarginLoss,
)
REGRESSION_LOSSES = (nn.L1Loss, nn.SmoothL1Loss, nn.MSELoss)
# connection to visdom:
def visdom_connect(server: Optional[str] = None, port: Optional[int] = None) -> None:
"""Connects to a visdom server if not currently connected."""
if not visdom_connected():
vis.append(visdom.Visdom(server=server, port=port))
# check if we are connected to visdom:
def visdom_connected() -> bool:
"""Returns True if the client is connected to a visdom server."""
return (
len(vis) > 0
and hasattr(vis[-1], "check_connection")
and vis[-1].check_connection()
)
# function that plots learning curve:
def plot_learning_curves(
curves: Dict[str, List],
visdom_server: Optional["visdom.Visdom"] = None,
env: Optional[str] = None,
win: Optional[str] = None,
title: str = "",
) -> Any:
"""Plots the specified dict of learning curves in visdom. Optionally, the
environment, window handle, and title for the visdom plot can be specified.
"""
if visdom_server is None and visdom_connected():
visdom_server = vis[-1]
# return if we are not connected to visdom server:
if not visdom_server or not visdom_server.check_connection():
print("WARNING: Not connected to visdom. Skipping plotting.")
return
# assertions:
assert type(curves) == dict
assert all(type(curve) == list for _, curve in curves.items())
# remove batch time curves:
_curves = {k: curves[k] for k in curves.keys() if "batch time" not in k}
# show plot:
X = torch.stack([torch.FloatTensor(curve) for _, curve in _curves.items()], dim=1)
Y = torch.arange(0, X.size(0))
Y = Y.view(Y.numel(), 1).expand(Y.numel(), X.size(1))
opts = {"title": title, "legend": list(_curves.keys()), "xlabel": "Epochs"}
return visdom_server.line(X, Y, env=env, win=win, opts=opts)
# function that plots loss functions:
def plot_losses(
losses: Union[nn.Module, List[nn.Module]],
visdom_server: Optional["visdom.Visdom"] = None,
env: Optional[str] = None,
win: Optional[str] = None,
title: str = "",
) -> Any:
"""Constructs a plot of specified losses as function of y * f(x). The losses
are a list of nn.Module losses. Optionally, the environment, window handle,
and title for the visdom plot can be specified.
"""
if visdom_server is None and visdom_connected():
visdom_server = vis[-1]
# return if we are not connected to visdom server:
if not visdom_server or not visdom_server.check_connection():
print("WARNING: Not connected to visdom. Skipping plotting.")
return
# assertions:
if isinstance(losses, nn.Module):
losses = [losses]
assert type(losses) == list
assert all(isinstance(loss, nn.Module) for loss in losses)
if any(isinstance(loss, UNSUPPORTED_LOSSES) for loss in losses):
raise NotImplementedError("loss function not supported")
# loop over all loss functions:
for idx, loss in enumerate(losses):
# construct scores and targets:
score = torch.arange(-5.0, 5.0, 0.005)
if idx == 0:
loss_val = torch.FloatTensor(score.size(0), len(losses))
if isinstance(loss, REGRESSION_LOSSES):
target = torch.FloatTensor(score.size()).fill_(0.0)
else:
target = torch.LongTensor(score.size()).fill_(1)
# compute loss values:
for n in range(0, score.nelement()):
loss_val[n][idx] = loss(
score.narrow(0, n, 1), target.narrow(0, n, 1)
).item()
# show plot:
title = str(loss) if title == "" else title
legend = [str(loss) for loss in losses]
opts = {"title": title, "xlabel": "Score", "ylabel": "Loss", "legend": legend}
win = visdom_server.line(loss_val, score, env=env, win=win, opts=opts)
return win
def plot_model(
model: ClassyModel,
size: Tuple[int, ...] = (3, 224, 224),
input_key: Optional[Union[str, List[str]]] = None,
writer: Optional["SummaryWriter"] = None,
folder: str = "",
train: bool = False,
) -> None:
"""Visualizes a model in TensorBoard.
The TensorBoard writer can be either specified directly via `writer` or can
be specified via a `folder`.
The model can be run in training or evaluation model via the `train` argument.
Example usage on devserver:
- Install TensorBoard using: `sudo feature install tensorboard`
- Start TensorBoard using: `tensorboard --port=8098 --logdir <folder>`
"""
assert (
writer is not None or folder != ""
), "must specify SummaryWriter or folder to create SummaryWriter in"
input = get_model_dummy_input(model, size, input_key)
if writer is None:
writer = SummaryWriter(log_dir=folder, comment="Model graph")
with torch.no_grad():
orig_train = model.training
model.train(train)
writer.add_graph(model, input_to_model=(input,))
model.train(orig_train)
writer.flush()
# function that produces an image map:
def image_map(
mapcoord: Union[np.ndarray, torch.Tensor],
dataset: Union[
torch.utils.data.dataloader.DataLoader, torch.utils.data.dataset.Dataset
],
mapsize: int = 5000,
imsize: int = 32,
unnormalize: Optional[Callable] = None,
snap_to_grid: bool = False,
) -> torch.ByteTensor:
"""Constructs a 2D map of images.
The 2D coordinates for each of the images are specified in `mapcoord`, the
corresponding images are in `dataset`. Optional arguments set the size of
the map images, the size of the images themselves, the unnormalization
transform, and whether or not to snap images to a grid.
"""
# assertions:
if type(mapcoord) == np.ndarray:
mapcoord = torch.from_numpy(mapcoord)
assert torch.is_tensor(mapcoord)
if isinstance(dataset, torch.utils.data.dataloader.DataLoader):
dataset = dataset.dataset
assert isinstance(dataset, torch.utils.data.dataset.Dataset)
assert is_pos_int(mapsize)
assert is_pos_int(imsize)
if unnormalize is not None:
assert callable(unnormalize)
# initialize some variables:
import torchvision.transforms.functional as F
background = 255
mapim = torch.ByteTensor(3, mapsize, mapsize).fill_(background)
# normalize map coordinates:
mapc = mapcoord.add(-(mapcoord.min()))
mapc.div_(mapc.max())
# loop over images:
for idx in range(len(dataset)):
# compute grid location:
if snap_to_grid:
y = 1 + int(math.floor(mapc[idx][0] * (mapsize - imsize - 2)))
x = 1 + int(math.floor(mapc[idx][1] * (mapsize - imsize - 2)))
else:
y = 1 + int(math.floor(mapc[idx][0] * (math.floor(mapsize - imsize) - 2)))
x = 1 + int(math.floor(mapc[idx][1] * (math.floor(mapsize - imsize) - 2)))
# check whether we can overwrite this location:
overwrite = not snap_to_grid
if not overwrite:
segment = mapim.narrow(1, y, imsize).narrow(2, x, imsize)
overwrite = segment.eq(background).all()
# draw image:
if overwrite:
# load, unnormalize, and resize image:
image = dataset[idx][0]
if unnormalize is not None:
image = unnormalize(image)
resized_im = F.to_tensor(
F.resize(F.to_pil_image(image), imsize, Image.BILINEAR)
)
# place image:
segment = mapim.narrow(1, y, imsize).narrow(2, x, imsize)
segment.copy_(resized_im.mul_(255.0).byte())
# return map:
return mapim
| ClassyVision-main | classy_vision/generic/visualize.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.