python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core code for sequence length warmup."""
import logging
import textwrap
from typing import Dict, Mapping, Optional
import torch
import torch.utils.data
from composer.core import Algorithm, Batch, Event, State, TimeUnit, get_precision_context
from composer.loggers import Logger
from composer.models import HuggingFaceModel
from composer.utils import dist, ensure_tuple
log = logging.getLogger(__name__)
__all__ = ['SeqLengthWarmup', 'set_batch_sequence_length']
def set_batch_sequence_length(
batch: Dict[str, torch.Tensor],
curr_seq_len: int,
truncate: bool = True,
preserve_end_of_sequence: bool = False,
) -> Batch:
"""Set the sequence length of a batch.
Changes the sequence length of all tensors in the provided dictionary
to ``curr_seq_len`` by either truncating the tensors (``truncate=True``)
or reshaping the tensors to create new examples from the extra tokens
(``truncate=False``).
.. note::
The schedule for ``curr_seq_len`` over training time should be managed
outside of this function.
.. note::
Variable input lengths can create CUDA OOM errors. To avoid this,
we follow the `PyTorch notes <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#pre-allocate-memory-in-case-of-variable-input-length>`_
and pre-allocate the memory with a blank forward and backward pass.
Args:
batch (Dict[str, Tensor]): The input batch to the model, must be a dictionary.
curr_seq_length (int): The desired sequence length to apply.
truncate (bool, optional): Truncate sequences early, or reshape tensors to create
new examples out of the extra tokens. Default: ``True``.
preserve_end_of_sequence (bool, optional): Preserve the end-of-sequence of the batch when
truncating. Useful when input formats include a unique end-of-sequence token.
Ignored if ``truncate=False``. Default: ``False``.
E.g., if ``batch["input_ids"]`` is ``[[10, 11, 12, 13, 14, 15]]``
and ``curr_seq_length=3``, ``"input_ids"`` in the returned batch would be
``[[10, 11, 12]]`` with ``preserve_end_of_sequence=False`` and would be
``[[10, 11, 15]]`` with ``preserve_end_of_sequence=True``. This behavior applies to any
batch tensor with 2 or more dimensions.
Returns:
Dict[str, Tensor]: a Mapping of input tensors to the model,
where all tensors have curr_seq_len in the second dimension.
Example:
.. code-block::
import composer.functional as cf
for epoch in range(num_epochs):
for X, y in train_loader:
X = cf.set_batch_sequence_length(X, sequence_length)
y_hat = model(X)
loss = loss_fn(y_hat, y)
"""
assert isinstance(batch, Mapping)
# This should act like a no-op if curr_seq_len isn't shorter than the batch's sequence length
batch_seq_len = 0
# The batch sequence length is assumed to be the shape[1] dimension of any non-1D batch tensor
for batch_tensor in batch.values():
tensor_shape = batch_tensor.shape
if len(tensor_shape) > 1:
batch_seq_len = tensor_shape[1]
break
if curr_seq_len >= batch_seq_len:
return batch
if truncate:
# Truncate, but preserve end-of-sequence tokens
if preserve_end_of_sequence:
if 'attention_mask' not in batch:
raise ValueError(
'Sequence Length Warmup requires that the batch has "attention_mask" when using ``preserve_end_of_sequence=True``.'
)
r_idx = torch.arange(batch['attention_mask'].shape[0])
# eos_idx should point to the final token index for each batch sample
eos_idx = batch['attention_mask'].sum(dim=1).long() - 1
# eos_idx_truncated is the same thing, after truncation is applied
eos_idx_truncated = eos_idx.clamp(max=curr_seq_len - 1)
for k in batch.keys():
if batch[k].ndim < 2:
raise ValueError(
f'Sequence Length Warmup requires that all tensors are sequence-shaped when ``truncate=True``. '
f'Tensor "{k}" has shape {batch[k].shape}.')
eos_value = batch[k][r_idx, eos_idx]
batch[k] = batch[k][:, :curr_seq_len].contiguous()
batch[k][r_idx, eos_idx_truncated] = eos_value
else:
for k in batch.keys():
if batch[k].ndim < 2:
raise ValueError(
f'Sequence Length Warmup requires that all tensors are sequence-shaped when ``truncate=True``. '
f'Tensor "{k}" has shape {batch[k].shape}.')
batch[k] = batch[k][:, :curr_seq_len].contiguous()
else:
if 'input_ids' not in batch:
raise ValueError(
'Sequence Length Warmup requires that the batch has "input_ids" when using ``truncate=False``.')
input_ids_shape = batch['input_ids'].shape
# ensure new tensor shape is divisible by curr_seq_len
input_ids = batch['input_ids'].view(-1)
tensor_len = (input_ids.shape[0] // curr_seq_len) * curr_seq_len
input_ids = input_ids[:tensor_len]
input_ids = input_ids.view(-1, curr_seq_len)
batch['input_ids'] = input_ids
for k, v in batch.items():
if k == 'input_ids':
continue
if v.shape != input_ids_shape:
raise ValueError(
f'When using ``truncate=False``, Sequence Length Warmup only supports batches where all tensors have the same shape. '
f'Tensor "{k}" has shape {v.shape} but should have shape {input_ids_shape}.')
v = v.view(-1)
v = v[:tensor_len]
batch[k] = v.view(-1, curr_seq_len)
return batch
class SeqLengthWarmup(Algorithm):
"""Progressively increases the sequence length during training.
Changes the sequence length of all tensors in the input batch. The
sequence length increases from ``min_seq_length`` to ``max_seq_length``
in steps of ``step_size`` during the first ``duration`` fraction of
training.
The sequence length is then kept at ``max_seq_length``
for the rest of training.
Tensors are either truncated (``truncate=True``) or reshaped to
create new examples from the extra tokens (``truncate=False``).
This algorithm runs on :attr:`.Event.AFTER_DATALOADER` to modify
the sequence length of a batch of data after the model and data have been moved to
accelerators.
.. note::
``step_size`` should be a `multiple of eight <https://developer.nvidia.com/blog/optimizing-gpu-performance-tensor-cores/>`_ for
optimal throughput on NVIDIA GPUs.
.. note::
Variable input lengths can create CUDA OOM errors. To avoid this,
we follow the `PyTorch notes <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#pre-allocate-memory-in-case-of-variable-input-length>`_
and pre-allocate the memory with a blank forward and backward pass.
See the :doc:`Method Card </method_cards/seq_length_warmup>` for more details.
Example:
.. code-block::
from composer.algorithms import SeqLengthWarmup
from composer import Trainer
seq_length_warmup = SeqLengthWarmup(duration=0.5,
min_seq_length=8,
max_seq_length=1024,
step_size=8,
truncate=True,
preserve_end_of_sequence=False)
trainer = Trainer(model=model,
train_dataloader=train_dataloader,
max_duration="1ep",
algorithms=[seq_length_warmup])
Args:
duration (float, optional): Fraction of total training for sequential length
learning. Default = ``0.3``.
min_seq_length (int, optional): Minimum sequence length to start the warmup.
Default = ``8``.
max_seq_length (int, optional): Maximum sequence length to stop the warmup.
Default = ``1024``.
step_size (int, optional): Step size of sequence length. Default = ``8``.
truncate (bool, optional): Truncate sequences early, or reshape tensors to create
new examples out of the extra tokens. Default: ``True``.
preserve_end_of_sequence (bool, optional): Preserve the end-of-sequence of the batch when
truncating. Useful when input formats include a unique end-of-sequence token.
Ignored if ``truncate=False``. Default: ``False``.
E.g., if ``batch["input_ids"]`` is ``[[10, 11, 12, 13, 14, 15]]``
and ``curr_seq_length=3``, ``"input_ids"`` in the returned batch would be
``[[10, 11, 12]]`` with ``preserve_end_of_sequence=False`` and would be
``[[10, 11, 15]]`` with ``preserve_end_of_sequence=True``. This behavior applies to any
batch tensor with 2 or more dimensions.
"""
def __init__(
self,
duration: float = 0.3,
min_seq_length: int = 8,
max_seq_length: int = 1024,
step_size: int = 8,
truncate: bool = True,
preserve_end_of_sequence: bool = False,
):
self.duration = duration
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.step_size = step_size
self.truncate = truncate
self.preserve_end_of_sequence = preserve_end_of_sequence
if self.duration < 0 or self.duration > 1:
raise ValueError(f'Duration must be between 0 and 1, got: {self.duration}')
if self.max_seq_length < self.min_seq_length:
raise ValueError(f'max_seq_length={self.max_seq_length} must be '
f'greater than min_seq_length={self.min_seq_length}')
self._activated = False
self._original_model = None
def _activate_model(self, state: State, logger: Logger) -> None:
"""Does a forward and a backward pass on a dummy input.
The purpose of activating the model is to prevent OOMs. This happens two ways.
First, this prevents GPU memory from being reallocated when the sequence
length increases.
Second, it detects if the batch*max_sequence_length size will cause an OOM and
decreases state.device_train_microbatch_size accordingly. This logic mirrors the
``device_train_microbatch_size='auto'`` logic in :class:`.Trainer`.
"""
assert self._original_model is not None, 'original model should be set on Event.INIT'
try:
# Both PyTorch and FFCV dataloaders define a `batch_size` attribute
# This exception would mainly be raised if the user is passing in a custom
# iterable
per_gpu_macrobatch = getattr(state.dataloader, 'batch_size')
except AttributeError as e:
raise AttributeError(
'Sequence Length Warmup requires the `state.dataloader` to have a `batch_size` attribute.') from e
if per_gpu_macrobatch is None:
raise RuntimeError('Sequence Length Warmup algorithm requires constant batch size.')
# truncate all sequence-shaped tensors to the max sequence length
batch_clone = {k: torch.clone(v) for k, v in state.batch.items()}
for k, v in batch_clone.items():
if v.ndim < 2:
raise ValueError(f'Sequence Length Warmup requires that all tensors are sequence-shaped. '
f'Tensor "{k}" has shape {v.shape}.')
batch_clone[k] = v[:, :self.max_seq_length].contiguous()
# In-line to avoid circular dependency
from composer.trainer.trainer import _adjust_device_train_microbatch_size, _is_cuda_oom
# This loop tries to do a forward/backward pass using the current microbatch size.
# If it hits an OOM error, it halves `state.device_train_microbatch_size` and tries again
# until it succeeds.
while True:
model_inputs = {k: v[:state.device_train_microbatch_size] for k, v in batch_clone.items()}
found_cuda_oom = 0 # int since bool BOR not supported on all torch.distributed backends
try:
# start by running a forward and backward pass
# of the maximum sequence length to allocate cache.
with get_precision_context(state.precision):
outputs = state.model.forward(model_inputs)
loss = self._original_model.loss(outputs, model_inputs)
# since use_grad_scaling is in the Trainer, and we
# don't care about the loss values, skip scaling
for loss_item in ensure_tuple(loss):
loss_item.backward()
# Zero any gradients created by the backward pass
for optimizer in state.optimizers:
optimizer.zero_grad()
# This error/state.device_train_microbatch_size handling mimics the logic in trainer._train_batch().
except RuntimeError as e:
if state.auto_microbatching and _is_cuda_oom(e):
log.debug((f"Rank {dist.get_global_rank()} OOM'd."))
found_cuda_oom = 1
else:
raise
if state.auto_microbatching:
# Propagate across all ranks if any rank hit CUDA OOM
found_cuda_oom = state.device.tensor_to_device(torch.tensor([found_cuda_oom], dtype=torch.uint8))
dist.all_reduce(found_cuda_oom, reduce_operation='MAX')
if found_cuda_oom.item() == 1:
_adjust_device_train_microbatch_size(state)
# Skip return and rerun after handling oom
continue
# Activate and return if we've completed without OOMing.
self._activated = True
return
def match(self, event: Event, state: State) -> bool:
return (event == Event.INIT and self._original_model is None) or event == Event.AFTER_DATALOADER
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
if event == Event.INIT:
if not isinstance(state.model, HuggingFaceModel):
raise RuntimeError(
textwrap.dedent(f"""\
{type(self).__name__} requires state.model to be of type {HuggingFaceModel.__name__}, not of type {type(state.model)}"""
))
self._original_model = state.model
return
assert state.dataloader is not None, 'dataloader should be set on AFTER_DATALOADER'
assert state.max_duration is not None, 'max_duration should be set on AFTER_DATALOADER'
# in order to avoid OOMs, we do a forward and a backward pass on a dummy input.
if not self._activated:
self._activate_model(state, logger)
if state.max_duration.unit == TimeUnit.EPOCH:
if state.dataloader_len is None:
raise RuntimeError('Sequential Length Warmup requires the dataloader to be sized.')
num_optimization_steps = int(state.dataloader_len) * state.max_duration.value
elif state.max_duration.unit == TimeUnit.BATCH:
num_optimization_steps = state.max_duration.value
else:
raise NotImplementedError(
textwrap.dedent("""\
To use sequential length warmup, the max_duration must be in epochs or batches.
Specifying the `max_duration` in tokens or samples for use with sequential
length warmup will be supported in a future Composer release. See
https://github.com/mosaicml/composer/issues/226."""))
num_warmup_steps = int(num_optimization_steps * self.duration) # in batches
# assume the full sequence length is the unaltered sequence length
num_update_steps = (self.max_seq_length - self.min_seq_length) // self.step_size
update_every_n_steps = num_warmup_steps // num_update_steps
curr_seq_len = self.step_size * (int(state.timestamp.batch) // update_every_n_steps) + self.min_seq_length
curr_seq_len = max(curr_seq_len, self.min_seq_length)
curr_seq_len = min(curr_seq_len, self.max_seq_length)
state.batch = set_batch_sequence_length(state.batch, curr_seq_len, self.truncate, self.preserve_end_of_sequence)
batch_size = state.batch['input_ids'].shape[0]
logger.log_metrics({
'seq_length_warmup/curr_seq_len': curr_seq_len,
'seq_length_warmup/curr_bs': batch_size,
})
| composer-dev | composer/algorithms/seq_length_warmup/seq_length_warmup.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Sequence length warmup progressively increases the sequence length during training of NLP models.
See the :doc:`Method Card </method_cards/seq_length_warmup>` for more details.
"""
from composer.algorithms.seq_length_warmup.seq_length_warmup import SeqLengthWarmup, set_batch_sequence_length
__all__ = ['SeqLengthWarmup', 'set_batch_sequence_length']
| composer-dev | composer/algorithms/seq_length_warmup/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import logging
from typing import Optional, Sequence, Union
import torch
from torch.optim import Optimizer
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import module_surgery
log = logging.getLogger(__name__)
def apply_squeeze_excite(
model: torch.nn.Module,
latent_channels: float = 64,
min_channels: int = 128,
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None,
) -> None:
"""Adds Squeeze-and-Excitation blocks (`Hu et al, 2019 <https://arxiv.org/abs/1709.01507>`_) after
:class:`torch.nn.Conv2d` layers.
A Squeeze-and-Excitation block applies global average pooling to the input,
feeds the resulting vector to a single-hidden-layer fully-connected
network (MLP), and uses the outputs of this MLP as attention coefficients
to rescale the input. This allows the network to take into account global
information about each input, as opposed to only local receptive fields
like in a convolutional layer.
Args:
model (torch.nn.Module): The module to apply squeeze excite replacement to.
latent_channels (float, optional): Dimensionality of the hidden layer within the added
MLP. If less than 1, interpreted as a fraction of the number of
output channels in the :class:`torch.nn.Conv2d` immediately
preceding each Squeeze-and-Excitation block. Default: ``64``.
min_channels (int, optional): An SE block is added after a :class:`torch.nn.Conv2d`
module ``conv`` only if one of the layer's input or output channels is greater than
this threshold. Default: ``128``.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional):
Existing optimizer(s) bound to ``model.parameters()``. All optimizers that have already been
constructed with ``model.parameters()`` must be specified here so that
they will optimize the correct parameters.
If the optimizer(s) are constructed *after* calling this function,
then it is safe to omit this parameter. These optimizers will see the correct
model parameters.
Example:
.. testcode::
import composer.functional as cf
from torchvision import models
model = models.resnet50()
cf.apply_stochastic_depth(
model,
target_layer_name='ResNetBottleneck'
)
"""
def convert_module(module: torch.nn.Module, module_index: int):
assert isinstance(module, torch.nn.Conv2d), 'should only be called with conv2d'
already_squeeze_excited = hasattr(module, '_already_squeeze_excited') and module._already_squeeze_excited
if min(module.in_channels, module.out_channels) >= min_channels and not already_squeeze_excited:
return SqueezeExciteConv2d.from_conv2d(module, module_index, latent_channels=latent_channels)
module_surgery.replace_module_classes(model, optimizers=optimizers, policies={torch.nn.Conv2d: convert_module})
class SqueezeExcite2d(torch.nn.Module):
"""Squeeze-and-Excitation block from (`Hu et al, 2019 <https://arxiv.org/abs/1709.01507>`_)
This block applies global average pooling to the input, feeds the resulting
vector to a single-hidden-layer fully-connected network (MLP), and uses the
outputs of this MLP as attention coefficients to rescale the input. This
allows the network to take into account global information about each input,
as opposed to only local receptive fields like in a convolutional layer.
Args:
num_features (int): Number of features or channels in the input.
latent_channels (float, optional): Dimensionality of the hidden layer within the added
MLP. If less than 1, interpreted as a fraction of ``num_features``. Default: ``0.125``.
"""
def __init__(self, num_features: int, latent_channels: float = .125):
super().__init__()
self.latent_channels = int(latent_channels if latent_channels >= 1 else latent_channels * num_features)
flattened_dims = num_features
self.pool_and_mlp = torch.nn.Sequential(torch.nn.AdaptiveAvgPool2d(1), torch.nn.Flatten(),
torch.nn.Linear(flattened_dims, self.latent_channels, bias=False),
torch.nn.ReLU(),
torch.nn.Linear(self.latent_channels, num_features, bias=False),
torch.nn.Sigmoid())
def forward(self, input: torch.Tensor) -> torch.Tensor:
n, c, _, _ = input.shape
attention_coeffs = self.pool_and_mlp(input)
return input * attention_coeffs.reshape(n, c, 1, 1)
class SqueezeExciteConv2d(torch.nn.Module):
"""Helper class used to add a :class:`.SqueezeExcite2d` module after a :class:`torch.nn.Conv2d`."""
def __init__(self, *args, latent_channels: float = 0.125, conv: Optional[torch.nn.Conv2d] = None, **kwargs):
super().__init__()
self.conv = torch.nn.Conv2d(*args, **kwargs) if conv is None else conv
self.conv._already_squeeze_excited = True # Mark to avoid rewrapping on duplicate calls
self.se = SqueezeExcite2d(num_features=self.conv.out_channels, latent_channels=latent_channels)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self.se(self.conv(input))
@staticmethod
def from_conv2d(module: torch.nn.Conv2d, module_index: int, latent_channels: float):
return SqueezeExciteConv2d(conv=module, latent_channels=latent_channels)
class SqueezeExcite(Algorithm):
"""Adds Squeeze-and-Excitation blocks (`Hu et al, 2019 <https://arxiv.org/abs/1709.01507>`_) after the
:class:`torch.nn.Conv2d` modules in a neural network.
Runs on :attr:`.Event.INIT`. See :class:`SqueezeExcite2d` for more information.
Args:
latent_channels (float, optional): Dimensionality of the hidden layer within the added
MLP. If less than 1, interpreted as a fraction of the number of
output channels in the :class:`torch.nn.Conv2d` immediately
preceding each Squeeze-and-Excitation block. Default: ``64``.
min_channels (int, optional): An SE block is added after a :class:`torch.nn.Conv2d`
module ``conv`` only if ``min(conv.in_channels, conv.out_channels) >= min_channels``.
For models that reduce spatial size and increase channel count
deeper in the network, this parameter can be used to only
add SE blocks deeper in the network. This may be desirable
because SE blocks add less overhead when their inputs have
smaller spatial size. Default: ``128``.
"""
def __init__(
self,
latent_channels: float = 64,
min_channels: int = 128,
):
self.latent_channels = latent_channels
self.min_channels = min_channels
def __repr__(self) -> str:
return f'{self.__class__.__name__}(latent_channels={self.latent_channels},min_channels={self.min_channels})'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
return event == Event.INIT
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
apply_squeeze_excite(state.model,
optimizers=state.optimizers,
latent_channels=self.latent_channels,
min_channels=self.min_channels)
layer_count = module_surgery.count_module_instances(state.model, SqueezeExciteConv2d)
log.info(f'Applied SqueezeExcite to model {state.model.__class__.__name__} '
f'with latent_channels={self.latent_channels}, '
f'min_channels={self.min_channels}. '
f'Model now has {layer_count} SqueezeExcite layers.')
logger.log_hyperparameters({
'squeeze_excite/num_squeeze_excite_layers': layer_count,
})
| composer-dev | composer/algorithms/squeeze_excite/squeeze_excite.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Adds Squeeze-and-Excitation blocks (`Hu et al, 2019 <https://arxiv.org/abs/1709.01507>`_) after the
:class:`~torch.nn.Conv2d` modules in a neural network.
See :class:`~composer.algorithms.SqueezeExcite` or the :doc:`Method Card </method_cards/squeeze_excite>` for details.
"""
from composer.algorithms.squeeze_excite.squeeze_excite import SqueezeExcite as SqueezeExcite
from composer.algorithms.squeeze_excite.squeeze_excite import SqueezeExcite2d as SqueezeExcite2d
from composer.algorithms.squeeze_excite.squeeze_excite import SqueezeExciteConv2d as SqueezeExciteConv2d
from composer.algorithms.squeeze_excite.squeeze_excite import apply_squeeze_excite as apply_squeeze_excite
__all__ = ['SqueezeExcite', 'SqueezeExcite2d', 'SqueezeExciteConv2d', 'apply_squeeze_excite']
| composer-dev | composer/algorithms/squeeze_excite/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Replaces all instances of :class:`torch.nn.LayerNorm` with a low precision :class:`torch.nn.LayerNorm` (either float16 or bfloat16).
By default, torch.autocast always runs torch.nn.LayerNorm in float32, so this surgery forces a lower precision.
"""
from composer.algorithms.low_precision_layernorm.low_precision_layernorm import (LowPrecisionLayerNorm,
apply_low_precision_layernorm)
__all__ = ['LowPrecisionLayerNorm', 'apply_low_precision_layernorm']
| composer-dev | composer/algorithms/low_precision_layernorm/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Low Precision LayerNorm."""
from __future__ import annotations
import logging
import warnings
from typing import Dict, Optional, Sequence, Type, Union
import torch
import torch.nn.functional as F
from packaging import version
from torch.optim import Optimizer
from composer.algorithms.warnings import NoEffectWarning
from composer.core import Algorithm, Event, Precision, State
from composer.loggers import Logger
from composer.utils import module_surgery
log = logging.getLogger(__name__)
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as APEXFusedLayerNorm
APEX_INSTALLED = True
except ImportError as e:
APEX_INSTALLED = False
def apply_low_precision_layernorm(model,
precision: Optional[Precision] = None,
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None):
if (precision != Precision.AMP_FP16 and precision != Precision.AMP_BF16):
warnings.warn(NoEffectWarning('Low Precision LayerNorm only applies to AMP_FP16 and AMP_BF16 precisions.'))
return model
policy: Dict[Type[torch.nn.Module], module_surgery.ReplacementFunction] = {torch.nn.LayerNorm: _to_LPLayerNorm}
# Prior to v1.13, torch.nn.LayerNorm is slow in bf16 precision.
# We use FusedLayerNorm as a fallback.
if version.parse(torch.__version__) < version.parse('1.13') and precision == Precision.AMP_BF16:
check_if_apex_installed()
policy: Dict[Type[torch.nn.Module], module_surgery.ReplacementFunction] = {
torch.nn.LayerNorm: _to_FusedLayerNorm
}
replaced_instances = module_surgery.replace_module_classes(module=model, optimizers=optimizers, policies=policy)
if len(replaced_instances) == 0:
warnings.warn(NoEffectWarning('No instances of torch.nn.LayerNorm found.'))
log.info(f'Successfully replaced {len(replaced_instances)} instances of LayerNorm with LowPrecisionLayerNorm')
class LowPrecisionLayerNorm(Algorithm):
"""
Replaces all instances of :class:`torch.nn.LayerNorm` with class:`.LPLayerNorm`.
LPLayerNorm is a thin wrapper around :class:`torch.nn.LayerNorm` which forces the layer to run
in lower precision (torch.float16 or torch.bfloat16) if autocast is enabled. This algorithm has
no effect in FP32 or DeepSpeed FP16 mode, where autocast is disabled.
This algorithm is intended to be used instead of Fused LayerNorm. They have similar behavior and performance.
Args:
apply_at (Event): Event where algorithm is applied.
"""
def __init__(self, apply_at: Event = Event.INIT):
self.apply_at = apply_at
if self.apply_at not in {Event.INIT, Event.AFTER_LOAD}:
raise ValueError('LowPrecisionLayerNorm only supports application on Event.INIT and Event.AFTER_LOAD.')
def __repr__(self) -> str:
return f'{self.__class__.__name__}(apply_at={self.apply_at})'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
del state # unused
return event == self.apply_at
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
del event, logger # unused
apply_low_precision_layernorm(model=state.model, optimizers=state.optimizers, precision=state._precision)
class LPLayerNorm(torch.nn.LayerNorm):
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None):
super().__init__(
normalized_shape=normalized_shape,
eps=eps,
elementwise_affine=elementwise_affine,
device=device,
dtype=dtype,
)
def forward(self, x):
module_device = x.device
downcast_x = _cast_if_autocast_enabled(x)
downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
downcast_bias = _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
with torch.autocast(enabled=False, device_type=module_device.type):
return F.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps)
def _cast_if_autocast_enabled(tensor):
if torch.is_autocast_enabled():
if tensor.device.type == 'cuda':
dtype = torch.get_autocast_gpu_dtype()
elif tensor.device.type == 'cpu':
dtype = torch.get_autocast_cpu_dtype()
else:
raise NotImplementedError()
return tensor.to(dtype=dtype)
return tensor
def check_if_apex_installed():
if not APEX_INSTALLED:
raise ImportError(
'https://github.com/NVIDIA/apex is not installed. The Low Precision LayerNorm algorithm cannot be applied on PyTorch <1.13 without Apex. The MosaicML Docker Images (https://hub.docker.com/r/mosaicml/pytorch) contain a copy of APEX for easy use.'
)
def _to_LPLayerNorm(layer: torch.nn.Module, module_index: int) -> LPLayerNorm:
"""Defines a replacement policy from a `torch.nn.LayerNorm` to a `LPLayerNorm`"""
if not isinstance(layer, torch.nn.LayerNorm):
raise TypeError(f'Expected torch.nn.LayerNorm, got {type(layer)}')
lp_layernorm = LPLayerNorm(layer.normalized_shape, layer.eps, layer.elementwise_affine)
with torch.no_grad():
if layer.weight is None:
lp_layernorm.register_parameter('weight', None)
else:
lp_layernorm.weight.copy_(layer.weight) # type: ignore
if layer.bias is None:
lp_layernorm.register_parameter('bias', None)
else:
lp_layernorm.bias.copy_(layer.bias) # type: ignore
return lp_layernorm
def _to_FusedLayerNorm(layer: torch.nn.Module, module_index: int) -> APEXFusedLayerNorm:
"""Defines a replacement policy from a `torch.nn.LayerNorm` to a `apex.normalization.fused_layer_norm`"""
if not isinstance(layer, torch.nn.LayerNorm):
raise TypeError(f'Expected torch.nn.LayerNorm, got {type(layer)}')
fused_layernorm = APEXFusedLayerNorm(normalized_shape=layer.normalized_shape, eps=layer.eps)
with torch.no_grad():
if layer.weight is None:
fused_layernorm.weight = None
else:
fused_layernorm.weight.copy_(layer.weight)
if layer.bias is None:
fused_layernorm.bias = None
else:
fused_layernorm.bias.copy_(layer.bias)
return fused_layernorm
| composer-dev | composer/algorithms/low_precision_layernorm/low_precision_layernorm.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Replaces all instances of `torch.nn.Dropout` with a `GyroDropout`.
By masking Dropout layer, this usually improves accuracy.
"""
from composer.algorithms.gyro_dropout.gyro_dropout import GyroDropout, GyroDropoutLayer, apply_gyro_dropout
__all__ = ['GyroDropoutLayer', 'GyroDropout', 'apply_gyro_dropout']
| composer-dev | composer/algorithms/gyro_dropout/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# Written by Gihyun Park, Junyeol Lee, and Jiwon Seo
import logging
import warnings
from typing import Dict, Optional, Type
import numpy as np
import torch
from composer.algorithms.warnings import NoEffectWarning
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import module_surgery
log = logging.getLogger(__name__)
class GyroDropoutLayer(torch.nn.Module):
def __init__(self, iters_per_epoch: int, max_epoch: int, p: float, sigma: int, tau: int):
super(GyroDropoutLayer, self).__init__()
self.iters_per_epoch = iters_per_epoch
self.max_epoch = max_epoch
self.p = p
self.sigma = sigma
self.tau = tau
self.preselect_masks = torch.empty(0, 0)
self.dropout_mask = torch.empty(0, 0)
self.selected_masks = torch.empty(0, 0)
self.training_step = 0
self.iter_num = 0
def forward(self, x):
if self.training:
if self.training_step == 0:
is_cuda_tensor = x.is_cuda
if is_cuda_tensor:
self.preselect_masks = (torch.rand(self.sigma, x.shape[1]) > self.p).float().to('cuda')
else:
self.preselect_masks = (torch.rand(self.sigma, x.shape[1]) > self.p).float()
# Below simplified from: (iters_per_epoch*max_epoch*batch_size/sigma) / (batch_size/self.tau)
self.iter_num = int(self.iters_per_epoch * self.max_epoch / self.sigma) * self.tau
if self.training_step % self.iter_num == 0:
pick_idx = np.random.choice(self.sigma, self.tau)
self.selected_masks = self.preselect_masks[pick_idx]
self.dropout_mask = torch.repeat_interleave(self.selected_masks, x.shape[0] // self.tau, dim=0)
self.training_step += 1
return x * self.dropout_mask * (1 / (1 - self.p))
else:
return x
def from_Dropout(iters_per_epoch: int, epoch: int, p: float, sigma: int, tau: int, layer: torch.nn.Module,
module_index: int):
"""Defines a replacement policy from a `torch.nn.Dropout` to a 'GyroDropout`"""
return GyroDropoutLayer(iters_per_epoch, epoch, p, sigma, tau)
def apply_gyro_dropout(model: torch.nn.Module, iters_per_epoch: int, max_epoch: int, p: float, sigma: int,
tau: int) -> None:
"""Replaces all instances of `torch.nn.Dropout` with a `GyroDropout`.
By masking Dropout layer, this usually improves accuracy.
"""
# prepare the replacement policy and perform replacement
from functools import partial
policy: Dict[Type[torch.nn.Module], module_surgery.ReplacementFunction] = {
torch.nn.Dropout: partial(from_Dropout, iters_per_epoch, max_epoch, p, sigma, tau)
}
replaced_instances = module_surgery.replace_module_classes(module=model, policies=policy)
if len(replaced_instances) == 0:
warnings.warn(
NoEffectWarning(
'No instances of `torch.nn.Dropout` were found, and therefore, there were no modules to replace.'))
log.info(f'Successfully replaced {len(replaced_instances)} of dropout with a Gyro dropout.')
class GyroDropout(Algorithm):
"""Replaces all instances of `torch.nn.Dropout` with a `GyroDropout`.
By masking Dropout layer, this usually improves accuracy.
Args:
p (float, optional): Float number of ratio to dropout.
Default: ``0.5``.
sigma (int, optional): the number of total pre-selected subnetwork
Default: ``256``.
tau (int, optional): the number of concurrently scheduled subnetworks in an iteration
Default: ``16``.
Example:
.. testcode::
from composer.algorithms import GyroDropout
algorithm = GyroDropout(p=0.5, sigma=256, tau=16)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration="100ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(self, p: float = 0.5, sigma: int = 256, tau: int = 16):
self.p = p
self.sigma = sigma
self.tau = tau
warnings.warn(
'GyroDropout is not implemented in a way that allows correct resumption from checkpoint, which may lead to incorrect behavior.'
)
def __repr__(self) -> str:
return f'{self.__class__.__name__}()'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
del state
return event == Event.FIT_START
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
del event, logger
assert state.dataloader_len is not None
assert state.max_duration is not None
apply_gyro_dropout(
model=state.model,
iters_per_epoch=state.dataloader_len.value,
max_epoch=state.max_duration.value,
p=self.p,
sigma=self.sigma,
tau=self.tau,
)
| composer-dev | composer/algorithms/gyro_dropout/gyro_dropout.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Replaces the Linear layers in the feed-forward network with `Gated Linear Units <https://arxiv.org/abs/2002.05202>`_.
This leads to improved convergence with a slight drop in throughput. Using no bias terms in the GLU is highly recommended.
See the :doc:`Method Card </method_cards/gated_linear_units>` for more details.
"""
from composer.algorithms.gated_linear_units.gated_linear_units import GatedLinearUnits, apply_gated_linear_units
__all__ = ['GatedLinearUnits', 'apply_gated_linear_units']
| composer-dev | composer/algorithms/gated_linear_units/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Callable
import torch
class BERTGatedFFOutput(torch.nn.Module):
"""
Defines a single feed-forward block that uses `Gated Linear Units <https://arxiv.org/abs/2002.05202>`_.
Args:
d_embed (int): The input dimension for the feed-forward network.
d_ff (int): The hidden dimension for the feed-forward network.
dropout_rate (float): The dropout rate to use between the two projection matricies in the feed-forward block.
act_fn (Callable[torch.Tensor, torch.Tensor]): The activation function to use in the feed-forward network.
layernorm_eps (float): The epsilon term to use in the LayerNorm operator. Useful for when the variance is small.
gated_layer_bias (bool): Whether to use a bias term in the gated projection matrix.
non_gated_layer_bias (bool): Whether to use a bias term in teh non-gated projection matrix.
"""
def __init__(self,
d_embed: int,
d_ff: int,
dropout_rate: float,
act_fn: Callable[[torch.Tensor], torch.Tensor],
layernorm_eps: float,
gated_layer_bias: bool = False,
non_gated_layer_bias: bool = False):
super().__init__()
self.gated_layer = torch.nn.Linear(d_embed, d_ff, bias=gated_layer_bias)
self.non_gated_layer = torch.nn.Linear(d_embed, d_ff, bias=non_gated_layer_bias)
self.wo = torch.nn.Linear(d_ff, d_embed)
self.dropout = torch.nn.Dropout(dropout_rate)
self.act = act_fn
self.layernorm = torch.nn.LayerNorm(d_embed, eps=layernorm_eps)
def forward(self, hidden_states: torch.Tensor, residual_connection: torch.Tensor):
"""
Args:
hidden_states (torch.Tensor): The hidden states from the attention matrix.
residual_connection (torch.Tensor): The residual connection to add before the LayerNorm operator.
"""
# compute the activation
hidden_states = self.act(self.gated_layer(hidden_states)) * self.non_gated_layer(hidden_states)
hidden_states = self.dropout(hidden_states)
# multiply by the second matrix
hidden_states = self.wo(hidden_states)
# add the residual connection and post-LN
hidden_states = self.layernorm(hidden_states + residual_connection)
return hidden_states
| composer-dev | composer/algorithms/gated_linear_units/gated_linear_unit_layers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 MosaicML. All Rights Reserved.
from __future__ import annotations
import logging
import warnings
from typing import Callable, Dict, Optional, Sequence, Type, Union
import torch
from composer.models.huggingface import HuggingFaceModel
try:
from transformers import BertPreTrainedModel
from transformers.models.bert.modeling_bert import BertIntermediate, BertOutput
IS_TRANSFORMERS_INSTALLED = True
except ImportError as e:
IS_TRANSFORMERS_INSTALLED = False
from composer.algorithms.gated_linear_units.gated_linear_unit_layers import BERTGatedFFOutput
from composer.algorithms.warnings import NoEffectWarning
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import MissingConditionalImportError, module_surgery
log = logging.getLogger(__name__)
def from_BertOutput(layer: torch.nn.Module,
module_index: int,
act_fn: Callable[[torch.Tensor], torch.Tensor],
gated_layer_bias: bool = False,
non_gated_layer_bias: bool = False) -> BERTGatedFFOutput:
"""Defines a replacement policy from a :class:`transformers.models.bert.modeling_bert.BertOutput` to a :class:`composer.algorithms.gated_linear_units.gated_linear_unit_layers.BERTGatedFFOutput`"""
assert isinstance(
layer, BertOutput
), 'The replacement policy requires an instance of transformers.models.bert.modeling_bert.BertOutput for the necessary fields to be defined.'
return BERTGatedFFOutput(
d_embed=layer.dense.out_features, #type: ignore dense.out_features member of BertOutput
d_ff=layer.dense.in_features, #type: ignore dense.in_features member of BertOutput
dropout_rate=layer.dropout.p, #type: ignore dropout.p member of BertOutput
act_fn=act_fn,
layernorm_eps=layer.LayerNorm.eps, #type: ignore LayerNorm.eps member of BertOutput
gated_layer_bias=gated_layer_bias,
non_gated_layer_bias=non_gated_layer_bias)
def from_BertIntermediate(layer: torch.nn.Module, module_index: int) -> torch.nn.Identity:
"""
Defines a replacement policy from a :class:`transformers.models.bert.modeling_bert.BertIntermediate` to a :class:`torch.nn.Identity`
The identity effectively acts as no-op.
"""
return torch.nn.Identity()
def apply_gated_linear_units(model: torch.nn.Module,
optimizers: Union[torch.optim.Optimizer, Sequence[torch.optim.Optimizer]],
act_fn: Optional[Callable[[torch.Tensor], torch.Tensor]] = None,
gated_layer_bias: bool = False,
non_gated_layer_bias: bool = False) -> None:
"""
Replaces the Linear layers in the feed-forward network with `Gated Linear Units <https://arxiv.org/abs/2002.05202>`_.
Args:
model (`torch.nn.Module`): The model to modify in-place.
optimizers (`torch.optim.Optimizer` | Sequence[`torch.optim.Optimizer`], optional):
Existing optimizers bound to ``model.parameters()``. All optimizers that have already been
constructed with ``model.parameters()`` must be specified here so that
they will optimize the correct parameters.
If the optimizer(s) are constructed after calling this function,
then it is safe to omit this parameter. These optimizers will see the correct
model parameters.
act_fn (Callable[torch.Tensor, torch.Tensor], optional): Optionally, the activation function to use. If ``None``, the algorithm will
use the existing activation function in the model.
gated_layer_bias (bool, optional): Whether to use biases in the linear layers within the GLU. Default: ``False``.
non_gated_layer_bias (bool, optional): Whether to use biases in the linear layers within the GLU. Default: ``False``.
"""
if not IS_TRANSFORMERS_INSTALLED:
raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers')
unwrapped_model = model.model if isinstance(model, HuggingFaceModel) else model
# ensure that the model is an instance of a Hugging Face BertPreTrainedModel class, since our replacement policy is only defined for BERTs
if not isinstance(unwrapped_model, BertPreTrainedModel):
raise TypeError(
'Gated Linear Units only has a surgery policy defined for subclasses of transformers.BertPreTrainedModel')
if act_fn is None:
intermediate_modules = {module for module in model.modules() if isinstance(module, BertIntermediate)}
if len(intermediate_modules) == 0:
warnings.warn(
NoEffectWarning('No instances of BertIntermediate were found so Gated Linear Units will be skipped '
'as no modules can be replaced. This is likely because Gated Linear Units has already '
'been applied to this model.'))
# get the activation functions used
act_fns = {module.intermediate_act_fn for module in intermediate_modules}
num_act_fns = len({type(act_fn) for act_fn in act_fns})
if num_act_fns == 0:
raise ValueError('Tried to get the activation function from the model, but none were found. '
'Please specify `act_fn` manually to use Gated Linear Units.')
elif num_act_fns > 1:
raise ValueError('Tried to get the activation function from the model, but multiple different '
'functions are used. This is currently unsupported with Gated Linear Units. '
'Please either use one activation function in BertIntermediate modules or '
'specify `act_fn` to manually override activation functions.')
# since our set is of 1, let's extract the activation function
act_fn = next(iter(act_fns)) # type: ignore will fail below if None
if act_fn is None:
raise ValueError(
'Found activation function was None. If this is an error, please manually specify `act_fn`.')
# now that we know the act fn, bind a few parameters of the replacement function
def from_bound_BertOutput(layer: torch.nn.Module, module_index: int) -> BERTGatedFFOutput:
return from_BertOutput(layer=layer,
module_index=module_index,
act_fn=act_fn,
gated_layer_bias=gated_layer_bias,
non_gated_layer_bias=non_gated_layer_bias)
# prepare the replacement policy and perform replacement
policy: Dict[Type[torch.nn.Module], module_surgery.ReplacementFunction] = {
BertIntermediate: from_BertIntermediate,
BertOutput: from_bound_BertOutput
}
replaced_instances = module_surgery.replace_module_classes(module=model, optimizers=optimizers, policies=policy)
if len(replaced_instances) == 0:
warnings.warn(
NoEffectWarning('No instances of BertIntermediate and BertOutput were found so no modules were replaced.'))
log.info(
f'Successfully replaced {len(replaced_instances)} of BertIntermediate and BertOutput with a GatedLinearUnit.')
class GatedLinearUnits(Algorithm):
"""Replaces all instances of Linear layers in the feed-forward subnetwork with a `Gated Linear Unit <https://arxiv.org/abs/2002.05202>`_.
The Gated Linear Units provide a more expressive form for the same number of parameters, and a slight degradation to throughput.
Runs on :attr:`.Event.INIT`, so it can swap the Linear layers in the FFN for GLUs before the model is DDP wrapped.
Args:
act_fn (Callable[[torch.Tensor], torch.Tensor], optional): Optionally, the activation function to use. If ``None``, the algorithm will
use the existing activation function in the model.
gated_layer_bias (bool, optional): Whether to use biases in the linear layers within the GLU. Default: ``False``.
non_gated_layer_bias (bool, optional): Whether to use biases in the linear layers within the GLU. Default: ``False``.
Example:
.. testsetup::
from tests.common.models import configure_tiny_bert_hf_model
from tests.common.datasets import dummy_bert_lm_dataloader
model, train_dataloader = configure_tiny_bert_hf_model(), dummy_bert_lm_dataloader()
.. testcode::
from composer.algorithms import GatedLinearUnits
algorithm = GatedLinearUnits()
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(self,
act_fn: Optional[Callable[[torch.Tensor], torch.Tensor]] = None,
gated_layer_bias: bool = False,
non_gated_layer_bias: bool = False):
if not IS_TRANSFORMERS_INSTALLED:
raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers')
self.act_fn = act_fn
self.gated_layer_bias = gated_layer_bias
self.non_gated_layer_bias = non_gated_layer_bias
def __repr__(self) -> str:
act_fn = 'act_fn' if self.act_fn else None
return f'{self.__class__.__name__}(act_fn={act_fn},gated_layer_bias={self.gated_layer_bias},non_gated_layer_bias={self.non_gated_layer_bias})'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
del state # unused
return event == Event.INIT
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
del event, logger # unused
apply_gated_linear_units(model=state.model,
optimizers=state.optimizers,
act_fn=self.act_fn,
gated_layer_bias=self.gated_layer_bias,
non_gated_layer_bias=self.non_gated_layer_bias)
| composer-dev | composer/algorithms/gated_linear_units/gated_linear_units.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core code for Stochastic Weight Averaging."""
from __future__ import annotations
import logging
import warnings
from typing import Any, Dict, List, Optional
import torch
from torch.optim.swa_utils import SWALR, AveragedModel
from composer.core import Algorithm, Event, PyTorchScheduler, State, Time, TimeUnit
from composer.loggers import Logger
log = logging.getLogger(__name__)
__all__ = ['SWA']
def _assert_valid_duration(time: Time):
if time.unit == TimeUnit.DURATION and (time < 0 or time > 1):
raise ValueError(f'time in duration units must be [0, 1], got {time}')
class SWA(Algorithm):
"""Applies Stochastic Weight Averaging (`Izmailov et al, 2018 <https://arxiv.org/abs/1803.05407>`_).
Stochastic Weight Averaging (SWA) averages model weights sampled at
different times near the end of training. This leads to better
generalization than just using the final trained weights.
Because this algorithm needs to maintain both the current value of the
weights and the average of all of the sampled weights, it doubles the
model's memory consumption. Note that this does not mean that the total
memory required doubles, however, since stored activations and the
optimizer state are not doubled.
.. note::
The AveragedModel is currently stored on the CPU device, which may
cause slow training if the model weights are large.
Uses PyTorch's `torch.optim.swa_util
<https://pytorch.org/docs/stable/optim.html#stochastic-weight-averaging>`_
under the hood.
See the :doc:`Method Card </method_cards/swa>` for more details.
Example:
.. testcode::
from composer.algorithms import SWA
from composer.trainer import Trainer
swa_algorithm = SWA(
swa_start="6ep",
swa_end="8ep"
)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="10ep",
algorithms=[swa_algorithm],
optimizers=[optimizer]
)
Args:
swa_start (str, optional): The time string denoting the amount of training
completed before stochastic weight averaging begins. Currently only units of
duration ('dur') and epoch ('ep') are supported. Default: ``'0.7dur'``.
swa_end (str, optional): The time string denoting the amount of training
completed before the baseline (non-averaged) model is replaced with the
stochastic weight averaged model. It's important to have at least one epoch
of training after the baseline model is replaced by the SWA model so that the
SWA model can have its buffers (most importantly its batch norm statistics)
updated. If ``swa_end`` occurs during the final epoch of training (e.g.
``swa_end = 0.9dur`` and ``max_duration = "5ep"``, or ``swa_end = 1.0dur``),
the SWA model will not have its buffers updated, which can negatively impact
accuracy, so ensure ``swa_end`` < :math:`\\frac{N_{epochs}-1}{N_{epochs}}`.
Currently only units of duration ('dur') and epoch ('ep') are supported.
Default: ``'0.97dur'``.
update_interval (str, optional): Time string denoting how often the averaged
model is updated. For example, ``'1ep'`` means the averaged model will be
updated once per epoch and ``'5ba'`` means the averaged model will be updated
every 5 batches. Note that for single-epoch training runs (e.g. many NLP
training runs), ``update_interval`` must be specified in units of ``'ba'``,
otherwise SWA won't happen. Also note that very small update intervals (e.g.
``"1ba"``) can substantially slow down training. Default: ``'1ep'``.
schedule_swa_lr (bool, optional): Flag to determine whether to apply an
SWA-specific LR schedule during the period in which SWA is active. Default:
``False``.
anneal_strategy (str, optional): SWA learning rate annealing schedule strategy.
``"linear"`` for linear annealing, ``"cos"`` for cosine annealing. Default:
``"linear"``.
anneal_steps (int, optional): Number of SWA model updates over which to
anneal SWA learning rate. Note that updates are determined by the
``update_interval`` argument. For example, if ``anneal_steps = 10`` and
``update_interval = '1ep'``, then the SWA LR will be annealed once per epoch
for 10 epochs; if ``anneal_steps = 20`` and ``update_interval = '8ba'``, then
the SWA LR will be annealed once every 8 batches over the course of 160
batches (20 steps * 8 batches/step). Default: ``10``.
swa_lr (float, optional): The final learning rate to anneal towards with the SWA
LR scheduler. Set to ``None`` for no annealing. Default: ``None``.
"""
def __init__(self,
swa_start: str = '0.7dur',
swa_end: str = '0.97dur',
update_interval: str = '1ep',
schedule_swa_lr: bool = False,
anneal_strategy: str = 'linear',
anneal_steps: int = 10,
swa_lr: Optional[float] = None):
warnings.warn(
'SWA has known issues when resuming from a checkpoint on multiple GPUs, which will cause an error when resuming without `load_weights_only=True`.'
)
self.schedule_swa_lr = schedule_swa_lr
self.anneal_strategy = anneal_strategy
self.anneal_steps = anneal_steps
self.swa_lr = swa_lr
self.swa_model: Optional[torch.nn.Module] = None
self.swa_completed = False
self.swa_started = False
# Check timestrings are parsable and convert into time objects
self.swa_start = Time.from_timestring(swa_start)
self.swa_end = Time.from_timestring(swa_end)
self.update_interval = Time.from_timestring(update_interval)
self._validate_time()
if anneal_steps <= 0:
raise ValueError('anneal_steps must be greater than 0')
# Check annealing_strategy string
if self.anneal_strategy.lower() in ['linear', 'lin']:
self.anneal_strategy = 'linear'
elif self.anneal_strategy.lower() in ['cos', 'cosine']:
self.anneal_strategy = 'cos'
else:
raise ValueError("anneal_strategy must be one of {'linear', 'cos'}.")
self.swa_scheduler = None
self.swa_model = None
# Keeps track of # steps so that we can know when to update averaged model
self.step_counter = 0
# Check units for update_interval and set match event accordingly
if self.update_interval.unit == TimeUnit.BATCH:
self.match_event = Event.BATCH_END
elif self.update_interval.unit == TimeUnit.EPOCH:
self.match_event = Event.EPOCH_END
def _validate_time(self):
# validate time units
if self.swa_start.unit != self.swa_end.unit:
raise ValueError(f'swa_start and swa_end must have same units, got {self.swa_start} and {self.swa_end}')
if self.swa_start.unit not in [TimeUnit.DURATION, TimeUnit.EPOCH]:
raise ValueError(f'swa_start must be DURATION or EPOCH, got {self.swa_start.unit}')
if self.update_interval.unit not in [TimeUnit.BATCH, TimeUnit.EPOCH]:
raise ValueError(f'update_iterval must be BATCH or EPOCH, got {self.update_interval.unit}')
# validate time
if self.swa_start >= self.swa_end:
raise ValueError('swa_end must be > swa_start.')
if self.swa_end.unit == TimeUnit.DURATION and self.swa_end == 1:
log.warning("'swa_end' = '1dur'. Batch norm statistics of averaged model "
'will not be updated. This will negatively impact accuracy. '
'See the documentation for the `swa_end` parameter for details.')
_assert_valid_duration(self.swa_start)
_assert_valid_duration(self.swa_end)
def _get_time(self, state: State):
"""helper function to retrieve either the epoch or the duration depending on the units"""
unit = self.swa_start.unit
if unit == TimeUnit.EPOCH:
return state.timestamp.epoch
elif unit == TimeUnit.DURATION:
time_elapsed = state.get_elapsed_duration()
assert time_elapsed is not None, 'Time should have been set on BATCH_END or EPOCH_END.'
return time_elapsed
else:
raise ValueError('units must be in epoch or duration.')
def _get_last_lr(self, schedulers: List[PyTorchScheduler]):
""" retrieves the last lr from current schedulers. """
if len(schedulers) == 0:
return 1.0
if len(schedulers) != 1:
raise RuntimeError(f'SWA supports only one scheduler, got {len(schedulers)}')
scheduler = schedulers[0]
last_lr = scheduler.get_last_lr()
if len(last_lr) != 1:
raise RuntimeError(f'SWA supports only one LR; instead found {len(last_lr)}')
return last_lr[0]
def match(self, event: Event, state: State) -> bool:
if event == Event.INIT:
return True
# only match on BATCH_END or EPOCH_END, depending on the setting
if event != self.match_event or self.swa_completed:
return False
return self._get_time(state) >= self.swa_start
def _initialize_swa(self, state: State) -> None:
if self.schedule_swa_lr:
self.swa_lr = self._get_last_lr(state.schedulers)
if len(state.optimizers) != 1:
raise RuntimeError('SWA supports only one optimizer')
self.swa_scheduler = SWALR(
state.optimizers[0],
swa_lr=self.swa_lr,
anneal_epochs=self.anneal_steps,
anneal_strategy=self.anneal_strategy,
)
self.swa_model = AveragedModel(state.model, device=torch.device('cpu'))
def apply(self, event: Event, state: State, logger: Logger) -> None:
if event == event.INIT:
# on trainer init, we create the schedulers and models
# so that the checkpoints can be loaded
self._initialize_swa(state)
return
if not self.swa_started:
# re-initialize swa once time > swa_start
self._initialize_swa(state)
self.swa_started = True
if self.step_counter % self.update_interval.value == 0:
assert self.swa_model is not None
self.swa_model.update_parameters(state.model) # type: ignore
if self.schedule_swa_lr:
assert self.swa_scheduler is not None
self.swa_scheduler.step()
self.step_counter += 1
# Determine whether it's time to end SWA
if self._get_time(state) >= self.swa_end:
self.swa_completed = True
if state.get_elapsed_duration() == 1:
log.warning(('The baseline model was replaced with the SWA model after the end of '
'training. This means that SWA model will not have its batch norm '
'statistics updated. This will negatively impact accuracy. See the '
'documentation for the `swa_end` parameter for details.'))
state.model.load_state_dict(self.swa_model.module.state_dict()) # type: ignore
log.info('Set model to the averaged model')
def state_dict(self) -> Dict[str, Any]:
state_dict = super().state_dict()
# we pop the anneal_func from the SWALR state
# since it is set in the SWALR __init__
swa_scheduler_state = None
if self.swa_scheduler:
swa_scheduler_state = self.swa_scheduler.state_dict()
swa_scheduler_state.pop('anneal_func')
state_dict = {
'swa_model': self.swa_model.state_dict() if self.swa_model else None,
'swa_completed': self.swa_completed,
'swa_started': self.swa_started,
'swa_scheduler': swa_scheduler_state,
'step_counter': self.step_counter,
**state_dict,
}
return state_dict
def load_state_dict(self, state: Dict[str, Any]) -> None:
self.swa_completed = state['swa_completed']
self.step_counter = state['step_counter']
self.swa_started = state['swa_started']
if self.swa_scheduler and state['swa_scheduler']:
self.swa_scheduler.load_state_dict(state['swa_scheduler'])
if self.swa_model and state['swa_model']:
self.swa_model.load_state_dict(state['swa_model'])
| composer-dev | composer/algorithms/swa/swa.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Stochastic Weight Averaging (SWA; `Izmailov et al, 2018 <https://arxiv.org/abs/1803.05407>`_) averages model weights
sampled at different times near the end of training.
This leads to better generalization than just using the final trained weights. See the :doc:`Method Card
</method_cards/swa>` for more details.
"""
from composer.algorithms.swa.swa import SWA
__all__ = ['SWA']
| composer-dev | composer/algorithms/swa/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core AugMix classes and functions."""
import functools
import textwrap
import weakref
from typing import List, TypeVar
import numpy as np
import torch
import torch.utils.data
from PIL import Image
from PIL.Image import Image as PillowImage
from torchvision.datasets import VisionDataset
from composer.algorithms.utils import augmentation_sets
from composer.algorithms.utils.augmentation_common import map_pillow_function
from composer.core import Algorithm, Event, State
from composer.datasets.utils import add_vision_dataset_transform
from composer.loggers import Logger
__all__ = ['AugMix', 'AugmentAndMixTransform', 'augmix_image']
ImgT = TypeVar('ImgT', torch.Tensor, PillowImage)
def augmix_image(img: ImgT,
severity: int = 3,
depth: int = -1,
width: int = 3,
alpha: float = 1.0,
augmentation_set: List = augmentation_sets['all']) -> ImgT:
r"""Applies the AugMix (`Hendrycks et al, 2020 <http://arxiv.org/abs/1912.02781>`_) data augmentation.
This function works on a single image or batch of images. See :class:`.AugMix` and
the :doc:`Method Card </method_cards/augmix>` for details. This function only acts on a
single image (or batch) per call and is unlikely to be used in a training loop.
Use :class:`.AugmentAndMixTransform` to use AugMix as
part of a :class:`torchvision.datasets.VisionDataset`\'s ``transform``.
Example:
.. testcode::
import composer.functional as cf
from composer.algorithms.utils import augmentation_sets
augmixed_image = cf.augmix_image(
img=image,
severity=3,
width=3,
depth=-1,
alpha=1.0,
augmentation_set=augmentation_sets["all"]
)
Args:
img (PIL.Image.Image | torch.Tensor): Image or batch of images to be AugMix'd.
severity (int, optional): See :class:`.AugMix`.
depth (int, optional): See :class:`.AugMix`.
width (int, optional): See :class:`.AugMix`.
alpha (float, optional): See :class:`.AugMix`.
augmentation_set (str, optional): See
:class:`.AugMix`.
Returns:
PIL.Image: AugMix'd image.
"""
def _augmix_pil_image(img_pil: PillowImage, severity: int, depth: int, width: int, alpha: float,
augmentation_set: List) -> PillowImage:
chain_weights = np.random.dirichlet([alpha] * width).astype(np.float32)
mixing_weight = np.float32(np.random.beta(alpha, alpha))
augmented_combination = np.zeros_like(img_pil, dtype=np.float32)
# Iterate over image chains
for chain_i in range(width):
augmented_image = img_pil.copy()
# Determine depth of current augmentation chain
if depth > 0:
d = depth
else:
d = np.random.randint(1, 4)
# Iterate through chain depth
for _ in range(d):
aug = np.random.choice(augmentation_set)
augmented_image = aug(augmented_image, severity)
augmented_combination += chain_weights[chain_i] * np.asarray(augmented_image)
mixed = (1 - mixing_weight) * np.asarray(img_pil) + mixing_weight * augmented_combination
mixed = Image.fromarray(np.uint8(mixed))
return mixed
f_pil = functools.partial(_augmix_pil_image,
severity=severity,
depth=depth,
width=width,
alpha=alpha,
augmentation_set=augmentation_set)
return map_pillow_function(f_pil, img)
class AugmentAndMixTransform(torch.nn.Module):
"""Wrapper module for :func:`.augmix_image` that can
be passed to :class:`torchvision.transforms.Compose`. See
:class:`.AugMix` and the :doc:`Method Card
</method_cards/augmix>` for details.
Example:
.. testcode::
import torchvision.transforms as transforms
from composer.algorithms.augmix import AugmentAndMixTransform
augmix_transform = AugmentAndMixTransform(
severity=3,
width=3,
depth=-1,
alpha=1.0,
augmentation_set="all"
)
composed = transforms.Compose([
augmix_transform,
transforms.RandomHorizontalFlip()
])
transformed_image = composed(image)
Args:
severity (int, optional): See :class:`.AugMix`.
depth (int, optional): See :class:`.AugMix`.
width (int, optional): See :class:`.AugMix`.
alpha (float, optional): See :class:`.AugMix`.
augmentation_set (str, optional): See
:class:`.AugMix`.
"""
def __init__(self,
severity: int = 3,
depth: int = -1,
width: int = 3,
alpha: float = 1.0,
augmentation_set: str = 'all'):
super().__init__()
if severity < 0 or severity > 10:
raise ValueError('AugMix severity value must satisfy 0 ≤ severity ≤ 10')
if width < 1:
raise ValueError('AugMix width must be ≥ 1')
if augmentation_set not in augmentation_sets.keys():
raise KeyError(f'AugMix augmentation_set is not one of {augmentation_sets.keys()}')
self.severity = severity
self.depth = depth
self.width = width
self.alpha = alpha
self.augmentation_set = augmentation_sets[augmentation_set]
def forward(self, img: PillowImage) -> PillowImage:
return augmix_image(img=img,
severity=self.severity,
depth=self.depth,
width=self.width,
alpha=self.alpha,
augmentation_set=self.augmentation_set)
class AugMix(Algorithm):
r"""The AugMix data augmentation technique.
AugMix (`Hendrycks et al, 2020 <http://arxiv.org/abs/1912.02781>`_) creates ``width`` sequences of ``depth``
image augmentations, applies each sequence with random intensity, and returns a convex combination of the ``width``
augmented images and the original image. The coefficients for mixing the augmented images are drawn from a uniform
``Dirichlet(alpha, alpha, ...)`` distribution. The coefficient for mixing the combined augmented image and the
original image is drawn from a ``Beta(alpha, alpha)`` distribution, using the same ``alpha``.
This algorithm runs on on :attr:`.Event.FIT_START` to insert a dataset transformation.
It is a no-op if this algorithm already applied itself on the :attr:`State.train_dataloader.dataset`.
See the :doc:`Method Card </method_cards/augmix>` for more details.
Example:
.. testcode::
from composer.algorithms import AugMix
from composer.trainer import Trainer
augmix_algorithm = AugMix(
severity=3,
width=3,
depth=-1,
alpha=1.0,
augmentation_set="all"
)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[augmix_algorithm],
optimizers=[optimizer]
)
Args:
severity (int, optional): Severity of augmentations; ranges from 0
(no augmentation) to 10 (most severe). Default: ``3``.
depth (int, optional): Number of augmentations per sequence. -1 enables stochastic
depth sampled uniformly from ``[1, 3]``. Default: ``-1``.
width (int, optional): Number of augmentation sequences. Default: ``3``.
alpha (float, optional): Pseudocount for Beta and Dirichlet distributions. Must be
> 0. Higher values yield mixing coefficients closer to uniform weighting. As
the value approaches 0, the mixing coefficients approach using only one
version of each image. Default: ``1.0``.
augmentation_set (str, optional): Must be one of the following options as also described
in :attr:`~composer.algorithms.utils.augmentation_primitives.augmentation_sets`:
* ``"all"``
Uses all augmentations from the paper.
* ``"safe"``
Like ``"all"``, but excludes transforms that are part of
the ImageNet-C/CIFAR10-C test sets.
* ``"original"``
Like ``"all"``, but some of the implementations
are identical to the original Github repository, which contains
implementation specificities for the augmentations
``"color"``, ``"contrast"``, ``"sharpness"``, and ``"brightness"``. The
original implementations have an intensity sampling scheme that samples a
value bounded by 0.118 at a minimum, and a maximum value of
:math:`intensity \times 0.18 + .1`, which ranges from 0.28 (intensity = 1)
to 1.9 (intensity 10). These augmentations have different effects
depending on whether they are < 0 or > 0 (or < 1 or > 1).
``"all"`` uses implementations of ``"color"``, ``"contrast"``,
``"sharpness"``, and ``"brightness"`` that account for diverging effects around 0
(or 1).
Default: ``"all"``.
"""
# TODO document each value of augmentation_set in more detail; i.e.,
# which augmentations are actually used
def __init__(self,
severity: int = 3,
depth: int = -1,
width: int = 3,
alpha: float = 1.0,
augmentation_set: str = 'all'):
if severity < 0 or severity > 10:
raise ValueError('AugMix severity value must satisfy 0 ≤ severity ≤ 10')
if width < 1:
raise ValueError('AugMix width must be ≥ 1')
if augmentation_set not in augmentation_sets.keys():
raise KeyError(f'AugMix augmentation_set is not one of {augmentation_sets.keys()}')
self.severity = severity
self.depth = depth
self.width = width
self.alpha = alpha
self.augmentation_set = augmentation_set
self._transformed_datasets = weakref.WeakSet()
def match(self, event: Event, state: State) -> bool:
if event != Event.FIT_START:
return False
assert state.dataloader is not None, 'dataloader should be defined on fit start'
if not isinstance(state.dataloader, torch.utils.data.DataLoader):
raise TypeError(f'{type(self).__name__} requires a PyTorch dataloader.')
return state.dataloader.dataset not in self._transformed_datasets
def apply(self, event: Event, state: State, logger: Logger) -> None:
am = AugmentAndMixTransform(severity=self.severity,
depth=self.depth,
width=self.width,
alpha=self.alpha,
augmentation_set=self.augmentation_set)
assert isinstance(state.dataloader, torch.utils.data.DataLoader), 'dataloader type checked on match()'
dataset = state.dataloader.dataset
if not isinstance(dataset, VisionDataset):
raise TypeError(
textwrap.dedent(f"""\
To use {type(self).__name__}, the dataset must be a
{VisionDataset.__qualname__}, not {type(dataset).__name__}"""))
add_vision_dataset_transform(dataset, am, is_tensor_transform=False)
self._transformed_datasets.add(dataset)
| composer-dev | composer/algorithms/augmix/augmix.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""AugMix (`Hendrycks et al, 2020 <http://arxiv.org/abs/1912.02781>`_) creates multiple independent realizations of
sequences of image augmentations, applies each sequence with random intensity, and returns a convex combination of the
augmented images and the original image.
See the :doc:`Method Card </method_cards/augmix>` for more details.
"""
from composer.algorithms.augmix.augmix import AugmentAndMixTransform, AugMix, augmix_image
__all__ = ['AugmentAndMixTransform', 'AugMix', 'augmix_image']
| composer-dev | composer/algorithms/augmix/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# type: ignore
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.common_types import _size_2_t
from torch.nn.modules.utils import _pair
def _default_2d_filter():
default_filter = torch.tensor([[[
[1, 2, 1],
[2, 4, 2],
[1, 2, 1],
]]]) * 1 / 16.0
return default_filter
def _padding_for_filt_2d_same(filt: torch.Tensor):
_, _, h, w = filt.shape
if h % 2 == 0:
raise IndexError(f'Filter must have odd height; got {h}')
if w % 2 == 0:
raise IndexError(f'Filter must have odd width; got {w}')
return int(torch.div(h, 2)), int(torch.div(w, 2))
def blur_2d(input: torch.Tensor,
channels: int = -1,
stride: _size_2_t = 1,
filter: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Applies a spatial low-pass filter.
Args:
input (torch.Tensor): A 4d tensor of shape NCHW
channels (int, optional): The number of channels in the input tensor.
If non-positive, then dynamic control flow is used to determine the number of channels.
If positive, then static control flow is used and the filter dimensions should be appropriate for
the input size (note: this condition is always satisfied for the default filter and non-zero input size).
stride (int | tuple, optional): Stride(s) along H and W axes. If a single value is passed, this
value is used for both dimensions.
filter (torch.Tensor, optional): A 2d or 4d tensor to be cross-correlated with the input tensor
at each spatial position, within each channel. If 4d, the structure
is required to be ``(C, 1, kH, kW)`` where ``C`` is the number of
channels in the input tensor and ``kH`` and ``kW`` are the spatial
sizes of the filter.
By default, the filter used is:
.. code-block:: python
[1 2 1]
[2 4 2] * 1/16
[1 2 1]
Returns:
The blurred input
"""
if filter is None:
filter = _default_2d_filter()
# The dynamic control flow branch below does not affect the padding as only h and w are used.
padding = _padding_for_filt_2d_same(filter)
if channels < 1: # Use Dynamic Control Flow
_, channels, h, w = input.shape
if (filter.shape[0] == 1) and (channels > 1):
# assume filt is already a rank 4 tensor
filter = filter.repeat((channels, 1, 1, 1))
_, _, filter_h, filter_w = filter.shape
if h + 2 * padding[0] < filter_h:
return input
if w + 2 * padding[1] < filter_w:
return input
# Call F.conv2d without using keyword arguments as that triggers a bug in fx tracing quantization.
return F.conv2d(input, filter, None, _pair(stride), _pair(padding), _pair(1), channels)
def blurmax_pool2d(input: torch.Tensor,
kernel_size: Optional[_size_2_t] = None,
stride: _size_2_t = 2,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
ceil_mode: bool = False,
filter: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Max-pooling with anti-aliasing.
This is a nearly drop-in replacement for PyTorch's :func:`torch.nn.functional.max_pool2d`.
The only API difference is that the parameter ``return_indices`` is not
available, because it is ill-defined when using anti-aliasing.
See the associated `paper <http://proceedings.mlr.press/v97/zhang19a.html>`_
for more details, experimental results, etc.
This function can be understood as decoupling the max from the pooling, and
inserting a low-pass filtering step between the two. Concretely, this
function computes the max within spatial neighborhoods of shape
``kernel_size``, then applies an anti-aliasing filter to smooth the maxes,
and only then pools according to ``stride``.
See also: :func:`.blur_2d`.
Args:
input (torch.Tensor): A 4d tensor of shape NCHW
kernel_size (int | tuple, optional): Size(s) of the spatial neighborhoods over which to pool.
This is mostly commonly 2x2. If only a scalar ``s`` is provided, the
neighborhood is of size ``(s, s)``. Default: ``(2, 2)``.
stride (int | tuple, optional): Stride(s) along H and W axes. If a single value is passed, this
value is used for both dimensions. Default: 2.
padding (int | tuple, optional): implicit zero-padding to use. For the default 3x3 low-pass
filter, ``padding=1`` (the default) returns output of the same size
as the input. Default: 0.
dilation (int | tuple, optional): Amount by which to "stretch" the pooling region for a given
total size. See :class:`torch.nn.MaxPool2d`
for our favorite explanation of how this works. Default: 1.
ceil_mode (bool): When True, will use ceil instead of floor to compute the output shape. Default: ``False``.
filter (torch.Tensor, optional): A 2d or 4d tensor to be cross-correlated with the input tensor
at each spatial position, within each channel. If 4d, the structure
is required to be ``(C, 1, kH, kW)`` where ``C`` is the number of
channels in the input tensor and ``kH`` and ``kW`` are the spatial
sizes of the filter.
By default, the filter used is:
.. code-block:: python
[1 2 1]
[2 4 2] * 1/16
[1 2 1]
Returns:
The blurred and max-pooled input
"""
if kernel_size is None:
kernel_size = (2, 2)
maxs = F.max_pool2d(input,
kernel_size=kernel_size,
stride=1,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode)
return blur_2d(maxs, channels=-1, stride=stride, filter=filter)
class BlurMaxPool2d(nn.Module):
"""This module is a (nearly) drop-in replacement for :class:`torch.nn.MaxPool2d`, but with an anti-aliasing filter.
The only API difference is that the parameter ``return_indices`` is not
available, because it is ill-defined when using anti-aliasing.
See the associated `paper <http://proceedings.mlr.press/v97/zhang19a.html>`_
for more details, experimental results, etc.
See :func:`.blurmax_pool2d` for details.
"""
# based on https://pytorch.org/docs/stable/_modules/torch/nn/modules/pooling.html#MaxPool2d # noqa
def __init__(self,
kernel_size: _size_2_t,
stride: Optional[_size_2_t] = None,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
ceil_mode: bool = False):
super(BlurMaxPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride if (stride is not None) else kernel_size
self.padding = padding
self.dilation = dilation
self.ceil_mode = ceil_mode
# we don't need this as part of state_dict, but making it a buffer
# ensures that module.cuda(), module.to(), etc work out of the box
self.register_buffer('filt2d', _default_2d_filter())
def extra_repr(self) -> str:
return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
', dilation={dilation}, ceil_mode={ceil_mode}'.format(
**self.__dict__)
def forward(self, input: torch.Tensor):
return blurmax_pool2d(input,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
ceil_mode=self.ceil_mode,
filter=self.filt2d)
@staticmethod
def from_maxpool2d(module: torch.nn.MaxPool2d, module_index: int) -> 'BlurMaxPool2d':
return BlurMaxPool2d(kernel_size=module.kernel_size,
stride=module.stride,
padding=module.padding,
dilation=module.dilation,
ceil_mode=module.ceil_mode)
class BlurConv2d(nn.Module):
"""This module is a drop-in replacement for :class:`torch.nn.Conv2d`, but with an anti-aliasing filter.
The one new parameter is ``blur_first``. When set to ``True``, the
anti-aliasing filter is applied before the underlying convolution and
vice-versa when set to ``False``. This mostly makes a difference when the
stride is greater than one. In the former case, the only overhead is the
cost of doing the anti-aliasing operation. In the latter case, the ``Conv2d``
is applied with a stride of one to the input, and then the
anti-aliasing is applied with the provided stride to the result. Setting
the stride of the convolution to ``1`` can greatly increase the computational
cost. E.g., replacing a stride of ``(2, 2)`` with a stride of ``1`` increases
the number of operations by a factor of ``(2/1) * (2/1) = 4``. However,
this approach most closely matches the behavior specified in the paper.
This module should only be used to replace strided convolutions.
See the associated `paper <http://proceedings.mlr.press/v97/zhang19a.html>`_
for more details, experimental results, etc.
See also: :func:`.blur_2d`.
"""
# based partially on https://pytorch.org/docs/stable/_modules/torch/nn/modules/conv.html#Conv2d
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = None,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
blur_first: bool = True,
):
super(BlurConv2d, self).__init__()
self.blur_first = blur_first
if self.blur_first:
assert stride is not None
conv_stride = stride
self.blur_stride = 1
self.blur_nchannels = in_channels
else:
conv_stride = 1
self.blur_stride = kernel_size if (stride is None) else stride
self.blur_nchannels = out_channels
self.conv = torch.nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=conv_stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
)
self.conv._already_blurpooled = True # Mark to avoid rewrapping on duplicate calls
# this is the full 4d tensor we want; materialize it once, instead
# of just-in-time during forward; we can do this in this class but
# not the others because we know in_channels during __init__
filt = _default_2d_filter().repeat(self.blur_nchannels, 1, 1, 1)
self.register_buffer('blur_filter', filt)
def forward(self, input: torch.Tensor):
if self.blur_first:
# blur in place, then apply (probably strided) conv
# this is roughly the same number of flops as just applying
# the original conv (though has some memory bandwidth cost)
blurred = blur_2d(input, channels=self.blur_nchannels, filter=self.blur_filter, stride=self.blur_stride)
return self.conv.forward(blurred)
else:
# apply conv with stride of 1, then blur and (probably) downsample;
# this is much more costly than a strided conv, at least in the
# compute-bound regime
activations = self.conv.forward(input)
return blur_2d(activations, channels=self.blur_nchannels, filter=self.blur_filter, stride=self.blur_stride)
@staticmethod
def from_conv2d(module: torch.nn.Conv2d, module_index: int = -1, blur_first: bool = True):
has_bias = module.bias is not None and module.bias is not False
blurconv = BlurConv2d(
in_channels=module.in_channels,
out_channels=module.out_channels,
kernel_size=module.kernel_size,
stride=module.stride,
padding=module.padding,
dilation=module.dilation,
groups=module.groups,
bias=has_bias,
blur_first=blur_first,
)
with torch.no_grad():
blurconv.conv.weight.copy_(module.weight)
if has_bias:
blurconv.conv.bias.copy_(module.bias)
return blurconv
class BlurPool2d(nn.Module):
"""This module just calls :func:`.blur_2d` in ``forward`` using the provided arguments."""
def __init__(self, channels: int = 0, stride: _size_2_t = 2, padding: _size_2_t = 1) -> None:
super(BlurPool2d, self).__init__()
self.channels = channels
self.stride = stride
self.padding = padding
self.register_buffer('blur_filter', _default_2d_filter())
if self.channels > 0:
self.blur_filter = self.blur_filter.repeat(channels, 1, 1, 1)
def forward(self, input: torch.Tensor):
return blur_2d(input, channels=self.channels, stride=self.stride, filter=self.blur_filter)
| composer-dev | composer/algorithms/blurpool/blurpool_layers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""`BlurPool <http://proceedings.mlr.press/v97/zhang19a.html>`_ adds anti-aliasing filters to convolutional layers to
increase accuracy and invariance to small shifts in the input.
See :class:`~composer.algorithms.BlurPool` or the :doc:`Method Card </method_cards/blurpool>` for details.
"""
from composer.algorithms.blurpool.blurpool import BlurPool as BlurPool
from composer.algorithms.blurpool.blurpool import apply_blurpool as apply_blurpool
from composer.algorithms.blurpool.blurpool_layers import BlurConv2d as BlurConv2d
from composer.algorithms.blurpool.blurpool_layers import BlurMaxPool2d as BlurMaxPool2d
from composer.algorithms.blurpool.blurpool_layers import BlurPool2d as BlurPool2d
from composer.algorithms.blurpool.blurpool_layers import blur_2d as blur_2d
from composer.algorithms.blurpool.blurpool_layers import blurmax_pool2d as blurmax_pool2d
__all__ = [
'BlurPool',
'apply_blurpool',
'BlurConv2d',
'BlurMaxPool2d',
'BlurPool2d',
'blur_2d',
'blurmax_pool2d',
]
| composer-dev | composer/algorithms/blurpool/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import functools
import logging
import warnings
from typing import Optional, Sequence, Union
import numpy as np
import torch
from torch.optim import Optimizer
from composer.algorithms.blurpool.blurpool_layers import BlurConv2d, BlurMaxPool2d
from composer.algorithms.warnings import NoEffectWarning
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import module_surgery
log = logging.getLogger(__name__)
def apply_blurpool(model: torch.nn.Module,
replace_convs: bool = True,
replace_maxpools: bool = True,
blur_first: bool = True,
min_channels: int = 16,
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None) -> None:
"""Add anti-aliasing filters to strided :class:`torch.nn.Conv2d` and/or :class:`torch.nn.MaxPool2d` modules.
These filters increase invariance to small spatial shifts in the input
(`Zhang 2019 <http://proceedings.mlr.press/v97/zhang19a.html>`_).
Args:
model (:class:`torch.nn.Module`): the model to modify in-place
replace_convs (bool, optional): replace strided :class:`torch.nn.Conv2d` modules with
:class:`.BlurConv2d` modules. Default: ``True``.
replace_maxpools (bool, optional): replace eligible :class:`torch.nn.MaxPool2d` modules
with :class:`.BlurMaxPool2d` modules. Default: ``True``.
blur_first (bool, optional): for ``replace_convs``, blur input before the associated
convolution. When set to ``False``, the convolution is applied with
a stride of 1 before the blurring, resulting in significant
overhead (though more closely matching
`the paper <http://proceedings.mlr.press/v97/zhang19a.html>`_).
See :class:`.BlurConv2d` for further discussion. Default: ``True``.
min_channels (int, optional): Skip replacing layers with in_channels < min_channels.
Commonly used to prevent the blurring of the first layer. Default: 16.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional):
Existing optimizers bound to ``model.parameters()``. All optimizers that have already been
constructed with ``model.parameters()`` must be specified here so
they will optimize the correct parameters.
If the optimizer(s) are constructed *after* calling this function,
then it is safe to omit this parameter. These optimizers will see
the correct model parameters.
Example:
.. testcode::
import composer.functional as cf
from torchvision import models
model = models.resnet50()
cf.apply_blurpool(model)
"""
transforms = {}
if replace_maxpools:
transforms[torch.nn.MaxPool2d] = BlurMaxPool2d.from_maxpool2d
if replace_convs:
transforms[torch.nn.Conv2d] = functools.partial(
_maybe_replace_strided_conv2d,
blur_first=blur_first,
min_channels=min_channels,
)
module_surgery.replace_module_classes(model, optimizers=optimizers, policies=transforms)
_log_surgery_result(model)
class BlurPool(Algorithm):
"""`BlurPool <http://proceedings.mlr.press/v97/zhang19a.html>`_ adds anti-aliasing filters to convolutional layers.
This algorithm increases accuracy and invariance to small shifts in the input. It runs on
:attr:`.Event.INIT`.
Args:
replace_convs (bool): replace strided :class:`torch.nn.Conv2d` modules with
:class:`.BlurConv2d` modules. Default: ``True``.
replace_maxpools (bool): replace eligible :class:`torch.nn.MaxPool2d` modules
with :class:`.BlurMaxPool2d` modules. Default: ``True``.
blur_first (bool): when ``replace_convs`` is ``True``, blur input before the
associated convolution. When set to ``False``, the convolution is
applied with a stride of 1 before the blurring, resulting in
significant overhead (though more closely matching the paper).
See :class:`.BlurConv2d` for further discussion. Default: ``True``.
min_channels (int, optional): Skip replacing layers with in_channels < min_channels.
Commonly used to prevent the blurring of the first layer. Default: 16.
"""
def __init__(
self,
replace_convs: bool = True,
replace_maxpools: bool = True,
blur_first: bool = True,
min_channels: int = 16,
) -> None:
self.replace_convs = replace_convs
self.replace_maxpools = replace_maxpools
self.blur_first = blur_first
self.min_channels = min_channels
if self.replace_maxpools is False and self.replace_convs is False:
raise ValueError(
'Both replace_maxpool and replace_convs are set to False. BlurPool will not be modifying the model.')
def __repr__(self) -> str:
return f'{self.__class__.__name__}(replace_convs={self.replace_convs},replace_maxpools={self.replace_maxpools},blur_first={self.blur_first},min_channels={self.min_channels})'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
return event == Event.INIT
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
assert state.model is not None
apply_blurpool(state.model,
optimizers=state.optimizers,
replace_convs=self.replace_convs,
replace_maxpools=self.replace_maxpools,
blur_first=self.blur_first,
min_channels=self.min_channels)
self._log_results(event, state, logger)
def _log_results(self, event: Event, state: State, logger: Logger) -> None:
"""Logs the result of BlurPool application, including the number of layers that have been replaced."""
assert state.model is not None
num_blurpool_layers = module_surgery.count_module_instances(state.model, BlurMaxPool2d)
num_blurconv_layers = module_surgery.count_module_instances(state.model, BlurConv2d)
# python logger
log.info(f'Applied BlurPool to model {state.model.__class__.__name__} '
f'with replace_maxpools={self.replace_maxpools}, '
f'replace_convs={self.replace_convs}. '
f'Model now has {num_blurpool_layers} BlurMaxPool2d '
f'and {num_blurconv_layers} BlurConv2D layers.')
logger.log_hyperparameters({
'blurpool/num_blurpool_layers': num_blurpool_layers,
'blurpool/num_blurconv_layers': num_blurconv_layers,
})
def _log_surgery_result(model: torch.nn.Module):
num_blurpool_layers = module_surgery.count_module_instances(model, BlurMaxPool2d)
num_blurconv_layers = module_surgery.count_module_instances(model, BlurConv2d)
if num_blurconv_layers == 0 and num_blurpool_layers == 0:
warnings.warn(
NoEffectWarning('Applying BlurPool did not change any layers. '
'No strided Conv2d or Pool2d layers were found.'))
log.info(f'Applied BlurPool to model {model.__class__.__name__}. '
f'Model now has {num_blurpool_layers} BlurMaxPool2d '
f'and {num_blurconv_layers} BlurConv2D layers.')
def _maybe_replace_strided_conv2d(
module: torch.nn.Conv2d,
module_index: int,
blur_first: bool,
min_channels: int = 16,
):
already_blurpooled = hasattr(module, '_already_blurpooled') and module._already_blurpooled
if np.max(module.stride) > 1 and module.in_channels >= min_channels and not already_blurpooled:
return BlurConv2d.from_conv2d(module, module_index, blur_first=blur_first)
return None
| composer-dev | composer/algorithms/blurpool/blurpool.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core ColOut classes and functions."""
from __future__ import annotations
import logging
import textwrap
import weakref
from typing import Any, Callable, Tuple, TypeVar, Union
import torch
import torch.utils.data
from PIL.Image import Image as PillowImage
from torch import Tensor
from torchvision.datasets import VisionDataset
from composer.algorithms.utils.augmentation_common import image_as_type
from composer.core import Algorithm, Event, State
from composer.datasets.utils import add_vision_dataset_transform
from composer.loggers import Logger
from composer.utils import ensure_tuple
log = logging.getLogger(__name__)
ImgT = TypeVar('ImgT', torch.Tensor, PillowImage)
__all__ = ['ColOut', 'ColOutTransform', 'colout_batch']
def colout_batch(sample: Union[ImgT, Tuple[ImgT, ImgT]],
p_row: float = 0.15,
p_col: float = 0.15,
resize_target: Union[bool, str] = 'auto') -> Union[ImgT, Tuple[ImgT, ImgT]]:
"""Applies ColOut augmentation to a batch of images and (optionally) targets,
dropping the same random rows and columns from all images and targets in a batch.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from composer.algorithms.colout import colout_batch
new_X = colout_batch(X_example, p_row=0.15, p_col=0.15)
Args:
sample (torch.Tensor | PIL.Image | Tuple[torch.Tensor, torch.Tensor] | Tuple[PIL.Image, PIL.Image]):
Either a single tensor or image or a 2-tuple of tensors or images. When tensor(s), the tensor must be of shape
``CHW`` for a single image or ``NCHW`` for a batch of images of shape.
p_row (float, optional): Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col (float, optional): Fraction of columns to drop (drop along W). Default: ``0.15``.
resize_target (bool | str, optional): If ``sample`` is a tuple, whether to resize both objects in the tuple.
If set to ``'auto'``, both objects will be resized if they have the same spatial dimensions.
Otherwise, only the first object is resized. Default: ``'auto'``.
Returns:
torch.Tensor | PIL.Image | Tuple[torch.Tensor, torch.Tensor] | Tuple[PIL.Image, PIL.Image]:
A smaller image or 2-tuple of images with random rows and columns dropped.
"""
sample = ensure_tuple(sample)
if len(sample) > 2:
raise ValueError('sample must either be single object or a tuple with a max length of 2')
input = sample[0]
# Convert image to Tensor if needed
X_tensor = image_as_type(input, torch.Tensor)
# Get the dimensions of the image
row_size = X_tensor.shape[-2]
col_size = X_tensor.shape[-1]
# Determine how many rows and columns to keep
kept_row_size = int((1 - p_row) * row_size)
kept_col_size = int((1 - p_col) * col_size)
# Randomly choose indices to keep. Must be sorted for slicing
kept_row_idx = sorted(torch.randperm(row_size)[:kept_row_size].numpy())
kept_col_idx = sorted(torch.randperm(col_size)[:kept_col_size].numpy())
# Keep only the selected row and columns
X_colout = X_tensor[..., kept_row_idx, :]
X_colout = X_colout[..., :, kept_col_idx]
# convert back to same type as input, and strip added batch dim if needed;
# we can't just reshape to input shape because we've reduced the spatial size
if not isinstance(input, torch.Tensor) or (input.ndim < X_colout.ndim):
X_colout = X_colout.reshape(X_colout.shape[-3:])
X_colout = image_as_type(X_colout, type(input))
resize_target = _should_resize_target(sample, resize_target)
if resize_target:
target = sample[1]
Y_tensor = image_as_type(target, torch.Tensor)
Y_colout = Y_tensor[..., kept_row_idx, :]
Y_colout = Y_colout[..., :, kept_col_idx]
# convert back to same type as input, and strip added batch dim if needed;
# we can't just reshape to input shape because we've reduced the spatial size
if not isinstance(target, torch.Tensor) or (target.ndim < Y_colout.ndim):
Y_colout = Y_colout.reshape(Y_colout.shape[-3:])
Y_colout = image_as_type(Y_colout, type(target))
return X_colout, Y_colout
return X_colout
class ColOutTransform:
"""Torchvision-like transform for performing the ColOut augmentation,
where random rows and columns are dropped from up to two Torch tensors
or two PIL images.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from torchvision import datasets, transforms
from composer.algorithms.colout import ColOutTransform
colout_transform = ColOutTransform(p_row=0.15, p_col=0.15)
transforms = transforms.Compose([colout_transform, transforms.ToTensor()])
Args:
p_row (float, optional): Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col (float, optional): Fraction of columns to drop (drop along W). Default: ``0.15``.
resize_target (bool | str, optional): Whether to resize the target in addition to the input.
If set to ``'auto'``, resizing the target will be based on if the target has the same spatial
dimensions as the input. Default: ``'auto'``.
"""
def __init__(self, p_row: float = 0.15, p_col: float = 0.15, resize_target: Union[bool, str] = 'auto'):
self.p_row = p_row
self.p_col = p_col
self.resize_target = resize_target
def __call__(self, sample: Union[ImgT, Tuple[ImgT, ImgT]]) -> Union[ImgT, Tuple[ImgT, ImgT]]:
"""Drops random rows and columns from up to two images.
Args:
sample (torch.Tensor | PIL.Image | Tuple[torch.Tensor, torch.Tensor] | Tuple[PIL.Image, PIL.Image]):
A single image or a 2-tuple of images as either :class:`torch.Tensor` or :class:`PIL.Image`.
Returns:
torch.Tensor | PIL.Image | Tuple[torch.Tensor, torch.Tensor] | Tuple[PIL.Image, PIL.Image]:
A smaller image or 2-tuple of images with random rows and columns dropped.
"""
sample = ensure_tuple(sample)
if len(sample) > 2:
raise ValueError(f'Colout transform does not support sample tuple of length {len(sample)} > 2')
return colout_batch(sample, p_row=self.p_row, p_col=self.p_col, resize_target=self.resize_target)
class ColOut(Algorithm):
"""Drops a fraction of the rows and columns of an input image and (optionally) a target image. If the fraction of
rows/columns dropped isn't too large, this does not significantly alter the content of the image, but reduces its
size and provides extra variability.
If ``batch`` is True (the default), then this algorithm runs on :attr:`.Event.AFTER_DATALOADER`
to modify the batch.
Otherwise, if ``batch=False`` (the default), this algorithm runs on :attr:`.Event.INIT` to insert
a dataset transformation. It is a no-op if this algorithm already applied itself on the :attr:`State.train_dataloader.dataset`.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from composer.algorithms import ColOut
from composer.trainer import Trainer
colout_algorithm = ColOut(p_row=0.15, p_col=0.15, batch=True)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[colout_algorithm],
optimizers=[optimizer]
)
Args:
p_row (float, optional): Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col (float, optional): Fraction of columns to drop (drop along W). Default: ``0.15``.
batch (bool, optional): Run ColOut at the batch level. Default: ``True``.
resize_target (bool | str, optional): Whether to resize the target in addition to the input. If set to ``'auto'``, resizing
the target will be based on if the target has the same spatial dimensions as the input. Default: ``auto``.
input_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the input
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 0, which corresponds to any sequence, where the first element
is the input. Default: ``0``.
target_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the target
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 1, which corresponds to any sequence, where the second element
is the target. Default: ``1``.
"""
def __init__(
self,
p_row: float = 0.15,
p_col: float = 0.15,
batch: bool = True,
resize_target: Union[bool, str] = 'auto',
input_key: Union[str, int, Tuple[Callable, Callable], Any] = 0,
target_key: Union[str, int, Tuple[Callable, Callable], Any] = 1,
):
if not (0 <= p_col <= 1):
raise ValueError('p_col must be between 0 and 1')
if not (0 <= p_row <= 1):
raise ValueError('p_row must be between 0 and 1')
if (not isinstance(resize_target, bool)) and (isinstance(resize_target, str) and resize_target != 'auto'):
raise ValueError(f'resize_target must be a boolean or ``auto``. Received: {resize_target}')
if resize_target is True and batch is False:
raise NotImplementedError(f'Resizing targets is not currently support with batch=``False``')
self.p_row = p_row
self.p_col = p_col
self.batch = batch
self.resize_target = resize_target
self._transformed_datasets = weakref.WeakSet()
self.input_key, self.target_key = input_key, target_key
def match(self, event: Event, state: State) -> bool:
if self.batch:
return event == Event.AFTER_DATALOADER
else:
if event != Event.FIT_START:
return False
assert state.dataloader is not None, 'dataloader should be defined on fit start'
if not isinstance(state.dataloader, torch.utils.data.DataLoader):
raise TypeError(f'{type(self).__name__} requires a PyTorch dataloader.')
return state.dataloader.dataset not in self._transformed_datasets
def _apply_sample(self, state: State) -> None:
"""Add the ColOut dataset transform to the dataloader."""
assert isinstance(state.dataloader, torch.utils.data.DataLoader), 'dataloader type checked on match()'
dataset = state.dataloader.dataset
transform = ColOutTransform(p_row=self.p_row, p_col=self.p_col, resize_target=self.resize_target)
if not isinstance(dataset, VisionDataset):
raise TypeError(
textwrap.dedent(f"""\
To use {type(self).__name__}, the dataset must be a
{VisionDataset.__qualname__}, not {type(dataset).__name__}"""))
add_vision_dataset_transform(dataset, transform, is_tensor_transform=False)
self._transformed_datasets.add(dataset)
def _apply_batch(self, state: State) -> None:
"""Transform a batch of images using the ColOut augmentation."""
inputs, target = state.batch_get_item(key=self.input_key), state.batch_get_item(key=self.target_key)
assert isinstance(inputs, Tensor) and isinstance(target, Tensor), \
'Inputs and target must be of type torch.Tensor for batch-wise ColOut'
sample = (inputs, target)
resize_target = _should_resize_target(sample, resize_target=self.resize_target)
colout_result = colout_batch(sample, p_row=self.p_row, p_col=self.p_col, resize_target=resize_target)
# colout_result will be a tuple if the targets are resized and a single object otherwise
if resize_target:
new_input, new_target = colout_result
state.batch_set_item(self.input_key, new_input)
state.batch_set_item(self.target_key, new_target)
else:
new_input = colout_result
state.batch_set_item(self.input_key, new_input)
def apply(self, event: Event, state: State, logger: Logger) -> None:
if self.batch:
self._apply_batch(state)
else:
self._apply_sample(state)
def _should_resize_target(sample: Union[ImgT, Tuple[ImgT, ImgT]], resize_target: Union[bool, str]) -> bool:
"""Helper function to determine if both objects in the tuple should be resized.
Decision is based on ``resize_target`` and if both objects in the tuple have the same spatial size.
"""
sample = ensure_tuple(sample)
if len(sample) > 2:
raise ValueError('sample must either be single object or a tuple with a max length of 2')
input = sample[0]
if isinstance(resize_target, bool):
return resize_target
if len(sample) == 1:
return False
if isinstance(resize_target, str) and resize_target.lower() == 'auto':
input_size = input.shape[-2:] if isinstance(input, torch.Tensor) else input.size[::-1]
target = sample[1]
if isinstance(target, PillowImage):
return target.size[::-1] == input_size
else:
return target.ndim > 2 and target.shape[-2:] == input_size
raise ValueError("resize_target must either be a boolean or 'auto'")
| composer-dev | composer/algorithms/colout/colout.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Drops a fraction of the rows and columns of an input image. If the fraction of rows/columns dropped isn't too large,
this does not significantly alter the content of the image, but reduces its size and provides extra variability.
See the :doc:`Method Card </method_cards/colout>` for more details.
"""
from composer.algorithms.colout.colout import ColOut as ColOut
from composer.algorithms.colout.colout import ColOutTransform as ColOutTransform
from composer.algorithms.colout.colout import colout_batch as colout_batch
__all__ = ['ColOut', 'ColOutTransform', 'colout_batch']
| composer-dev | composer/algorithms/colout/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Optimizers and learning rate schedulers.
Composer is compatible with optimizers based off of PyTorch's native :class:`~torch.optim.Optimizer` API, and common
optimizers such
However, where applicable, it is recommended to use the optimizers provided in :mod:`~.decoupled_weight_decay` since
they improve off of their PyTorch equivalents.
PyTorch schedulers can be used with Composer, but this is explicitly discouraged. Instead, it is recommended to use
schedulers based off of Composer's :class:`~.scheduler.ComposerScheduler` API, which allows more flexibility and
configuration in writing schedulers.
"""
from composer.optim.decoupled_weight_decay import DecoupledAdamW, DecoupledSGDW
from composer.optim.scheduler import (ComposerScheduler, ConstantScheduler, ConstantWithWarmupScheduler,
CosineAnnealingScheduler, CosineAnnealingWarmRestartsScheduler,
CosineAnnealingWithWarmupScheduler, ExponentialScheduler, LinearScheduler,
LinearWithWarmupScheduler, MultiStepScheduler, MultiStepWithWarmupScheduler,
PolynomialScheduler, PolynomialWithWarmupScheduler, StepScheduler,
compile_composer_scheduler)
__all__ = [
'DecoupledAdamW',
'DecoupledSGDW',
'ComposerScheduler',
'ConstantScheduler',
'ConstantWithWarmupScheduler',
'CosineAnnealingScheduler',
'CosineAnnealingWarmRestartsScheduler',
'CosineAnnealingWithWarmupScheduler',
'ExponentialScheduler',
'LinearScheduler',
'LinearWithWarmupScheduler',
'MultiStepScheduler',
'MultiStepWithWarmupScheduler',
'PolynomialScheduler',
'PolynomialWithWarmupScheduler',
'StepScheduler',
'compile_composer_scheduler',
]
| composer-dev | composer/optim/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Optimizers with weight decay decoupled from the learning rate.
These optimizers are based off of `Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`_, which
proposes this decoupling. In general, it is recommended to use these optimizers over their native PyTorch equivalents.
"""
from __future__ import annotations
import logging
import math
from typing import Iterable, List, Tuple, Union
import torch
from torch.optim import SGD, AdamW
from torch.optim.optimizer import required # type: ignore
from composer.utils import dist
log = logging.getLogger(__name__)
__all__ = ['DecoupledSGDW', 'DecoupledAdamW']
class DecoupledSGDW(SGD):
"""SGD optimizer with the weight decay term decoupled from the learning rate.
NOTE: Since `weight_decay` is no longer scaled by `lr`, you will likely want to use much smaller values
for `weight_decay` than you would if using `torch.optim.SGD`. In this optimizer, the value `weight_decay` translates exactly to:
'On every optimizer update, every weight element will be multiplied by `(1.0 - weight_decay_t)`'.
The term `weight_decay_t` will follow the same schedule as `lr_t` but crucially will not be scaled by `lr`.
Argument defaults are copied from :class:`torch.optim.SGD`.
Why use this optimizer? The standard `SGD <https://pytorch.org/docs/stable/generated/torch.optim.SGD.html?highlight=sgd#torch.optim.SGD>`_
optimizer couples the weight decay term with the gradient calculation. This ties the optimal value
of :attr:`weight_decay` to :attr:`lr` and can also hurt generalization in practice. For more details
on why decoupling might be desirable, see `Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`_.
Args:
params (iterable): Iterable of parameters to optimize or dicts defining parameter groups.
lr (float): Learning rate.
momentum (int, optional): Momentum factor. Default: ``0``.
dampening (int, optional): Dampening factor applied to the momentum. Default: ``0``.
weight_decay (int, optional): Decoupled weight decay factor. Default: ``0``.
nesterov (bool, optional): Enables Nesterov momentum updates. Default: ``False``.
"""
def __init__(self,
params: Union[Iterable[torch.Tensor], Iterable[dict]],
lr: float = required,
momentum: float = 0,
dampening: float = 0,
weight_decay: float = 0,
nesterov: bool = False):
if weight_decay >= 1e-3:
log.warning(
f'You are using a high value of `weight_decay={weight_decay}` for the `DecoupledSGDW` optimizer. Are you sure you want to do this? '
f'Your model\'s weights will be multiplied by {1.0 - weight_decay} on every step!')
super().__init__(params=params,
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov)
for group in self.param_groups:
group['initial_lr'] = group['lr']
@staticmethod
def sgdw(params: List[torch.Tensor], d_p_list: List[torch.Tensor], momentum_buffer_list: List[torch.Tensor], *,
weight_decay: float, momentum: float, lr: float, initial_lr: float, dampening: float, nesterov: bool):
r"""Functional API that performs SGDW algorithm computation.
Args:
params (list): List of parameters to update
d_p_list (list): List of parameter gradients
momentum_buffer_list (list): List of momentum buffers
weight_decay (float): Decoupled weight decay factor
momentum (float): Momentum factor
lr (float): Learning rate
initial_lr (float): Initial learning rate
dampening (float): Dampening factor for momentum update
nesterov (bool): Enables Nesterov momentum updates
"""
for i, param in enumerate(params):
d_p = d_p_list[i]
if momentum != 0:
buf = momentum_buffer_list[i]
if buf is None:
buf = torch.clone(d_p).detach()
momentum_buffer_list[i] = buf
else:
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
if weight_decay != 0:
decay_factor = (lr / initial_lr) if initial_lr else 1.0
param.mul_(1 - decay_factor * weight_decay)
param.add_(d_p, alpha=-lr)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
d_p_list = []
momentum_buffer_list = []
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
lr = group['lr']
initial_lr = group['initial_lr']
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
d_p_list.append(p.grad)
state = self.state[p]
if 'momentum_buffer' not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state['momentum_buffer'])
self.sgdw(params_with_grad,
d_p_list,
momentum_buffer_list,
weight_decay=weight_decay,
momentum=momentum,
lr=lr,
initial_lr=initial_lr,
dampening=dampening,
nesterov=nesterov)
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state['momentum_buffer'] = momentum_buffer
return loss
class DecoupledAdamW(AdamW):
"""Adam optimizer with the weight decay term decoupled from the learning rate.
NOTE: Since `weight_decay` is no longer scaled by `lr`, you will likely want to use much smaller values
for `weight_decay` than you would if using `torch.optim.Adam` or `torch.optim.AdamW`. In this optimizer, the value `weight_decay` translates exactly to:
'On every optimizer update, every weight element will be multiplied by `(1.0 - weight_decay_t)`'.
The term `weight_decay_t` will follow the same schedule as `lr_t` but crucially will not be scaled by `lr`.
Argument defaults are similar to :class:`torch.optim.AdamW` but we make two changes:
* The default for ``weight_decay`` is changed from ``1e-2`` -> ``1e-5`` because in `DecoupledAdamW`, the weight decay is decoupled and no longer scaled by the `lr=1e-3`.
* The default for ``betas`` is changed from ``(0.9, 0.999)`` to ``(0.9, 0.95)`` to reflect community best-practices for the beta2 hyperparameter.
Why use this optimizer? The standard `AdamW <https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html#torch.optim.AdamW>`_
optimizer explicitly couples the weight decay term with the learning rate. This ties the
optimal value of :attr:`weight_decay` to :attr:`lr` and can also hurt generalization in practice. For more details on
why decoupling might be desirable, see `Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`_.
Args:
params (iterable): Iterable of parameters to optimize or dicts defining parameter groups.
lr (float, optional): Learning rate. Default: ``1e-3``.
betas (tuple, optional): Coefficients used for computing running averages of gradient and its square
Default: ``(0.9, 0.95)``.
eps (float, optional): Term added to the denominator to improve numerical stability. Default: ``1e-8``.
weight_decay (float, optional): Decoupled weight decay factor. Default: ``1e-5``.
amsgrad (bool, optional): Enables the amsgrad variant of Adam. Default: ``False``.
"""
metric_functions = {
'l2_norm/moment':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(optim_state['exp_avg']),
'l2_norm/param':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(param.data),
'l2_norm/second_moment_sqrt':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(optim_state['exp_avg_sq']).sqrt(),
'l2_norm/update':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(step_tensor),
'l2_norm/grad':
lambda param, optim_state, step_tensor: torch.linalg.vector_norm(param.grad),
'cosine/update_grad':
lambda param, optim_state, step_tensor: torch.nn.functional.cosine_similarity(
param.grad.flatten(), step_tensor.flatten(), dim=0),
'cosine/moment_grad':
lambda param, optim_state, step_tensor: torch.nn.functional.cosine_similarity(
param.grad.flatten(), optim_state['exp_avg'].flatten(), dim=0)
}
def __init__(self,
params: Union[Iterable[torch.Tensor], Iterable[dict]],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.95),
eps: float = 1e-8,
weight_decay: float = 1e-5,
amsgrad: bool = False):
if weight_decay >= 1e-3:
log.warning(
f'You are using a high value of `weight_decay={weight_decay}` for the `DecoupledAdamW` optimizer. Are you sure you want to do this? '
f'Your model\'s weights will be multiplied by {1.0 - weight_decay} on every step!')
super().__init__(params=params, lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
for group in self.param_groups:
group['initial_lr'] = group['lr']
self.amsgrad = amsgrad
@staticmethod
def adamw(params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor],
exp_avg_sqs: List[torch.Tensor], max_exp_avg_sqs: List[torch.Tensor], state_steps: List[int], *,
amsgrad: bool, beta1: float, beta2: float, lr: float, initial_lr: float, weight_decay: float,
eps: float) -> None:
r"""Functional API that performs AdamW algorithm computation with decoupled weight decay.
Args:
params (list): List of parameters to update.
grads (list): List of parameter gradients.
exp_avgs (list): List of average gradients.
exp_avg_sqs (list): List of average squared gradients.
max_exp_avg_sqs (list): List of max average squared gradients for amsgrad updates.
state_steps (list): List of steps taken for all parameters.
amsgrad (bool): Enables amsgrad variant of Adam.
beta1 (float): Coefficient for computing the moving average of gradient values.
beta2 (float): Coefficient for computing the moving average of squared gradient values.
lr (float): Learning rate.
initial_lr (float): Initial learning rate.
weight_decay (float): Factor for decoupled weight decay
eps (float): Term added to the denominator to improve numerical stability.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
# Perform stepweight decay
if weight_decay != 0:
decay_factor = (lr / initial_lr) if initial_lr else 1.0
param.mul_(1 - decay_factor * weight_decay)
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
param.addcdiv_(exp_avg, denom, value=-step_size)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
amsgrad = group['amsgrad']
beta1, beta2 = group['betas']
eps = group['eps']
lr = group['lr']
if 'initial_lr' not in group:
group['initial_lr'] = lr
initial_lr = group['initial_lr']
weight_decay = group['weight_decay']
for p in group['params']:
if p.grad is None or not p.requires_grad:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('AdamW does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# State initialization
if 'step' not in state:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
# Update the steps for each param group update
state['step'] += 1
# Record the step after step update
state_steps.append(state['step'])
self.adamw(params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=lr,
initial_lr=initial_lr,
weight_decay=weight_decay,
eps=eps)
return loss
def dist_reduce_metrics(self, optimizer_metrics):
for metric in optimizer_metrics:
if metric.startswith('l2_norm'):
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
optimizer_metrics[metric] = math.sqrt(reduced)
elif metric.startswith('cosine'):
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
_, vectors, layer = tuple(metric.split('/'))
A, B = tuple(vectors.split('_'))
A_reduced_norm = optimizer_metrics[f'l2_norm/{A}/{layer}']
B_reduced_norm = optimizer_metrics[f'l2_norm/{B}/{layer}']
optimizer_metrics[metric] = reduced / (A_reduced_norm * B_reduced_norm)
else:
reduced = optimizer_metrics[metric]
if dist.get_world_size() > 1:
dist.all_reduce(reduced, reduce_operation='SUM')
optimizer_metrics[metric] = reduced / dist.get_world_size()
return optimizer_metrics
def pre_reduce_metrics(self, optimizer_metrics):
"""Preprocess metrics to reduce across ranks correctly."""
# Sort L2 norms first so they are squared before other metrics, which depend on squared values
metrics = optimizer_metrics.keys()
metrics = sorted(metrics, key=lambda metric: 0 if 'l2_norm' in metric else 1)
for metric in metrics:
if metric.startswith('l2_norm'):
# L2 norms need to be squared, before they are reduced via summation
optimizer_metrics[metric] = optimizer_metrics[metric]**2
elif metric.startswith('cosine'):
_, vectors, layer = tuple(metric.split('/'))
A, B = tuple(vectors.split('_'))
# L2 norm would've been squared in previous branch
A_rank_subset_norm = math.sqrt(optimizer_metrics[f'l2_norm/{A}/{layer}'])
B_rank_subset_norm = math.sqrt(optimizer_metrics[f'l2_norm/{B}/{layer}'])
optimizer_metrics[metric] *= A_rank_subset_norm * B_rank_subset_norm
return optimizer_metrics
def report_per_parameter_metrics(self, param: torch.Tensor, name: str, optimizer_metrics: dict):
lr = self.param_groups[0]['lr']
eps = self.param_groups[0]['eps']
weight_decay = self.param_groups[0]['weight_decay']
initial_lr = self.param_groups[0]['initial_lr']
beta1, beta2 = self.param_groups[0]['betas']
if param in self.state:
param_optim_state = self.state[param]
step = param_optim_state['step']
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
denom = (param_optim_state['exp_avg_sq'].sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
step_tensor = step_size * param_optim_state['exp_avg'].div(denom)
decay_factor = (lr / initial_lr) if initial_lr else 1.0
step_tensor.add_(param, alpha=-weight_decay * decay_factor)
for metric in self.metric_functions:
optimizer_metrics[f'{metric}/{name}'] = self.metric_functions[metric](param, param_optim_state,
step_tensor)
return optimizer_metrics
| composer-dev | composer/optim/decoupled_weight_decay.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Stateless learning rate schedulers.
Stateless schedulers solve some of the problems associated with PyTorch's built-in schedulers provided in
:mod:`torch.optim.lr_scheduler`. The primary design goal of the schedulers provided in this module is to allow
schedulers to interface directly with Composer's :mod:`~composer.core.time` abstraction. This means that schedulers can
be configured using arbitrary but explicit time units.
See :class:`~.ComposerScheduler` for more information on stateless schedulers.
"""
import inspect
import logging
import math
import textwrap
import warnings
from typing import TYPE_CHECKING, List, Union
from torch.optim.lr_scheduler import LambdaLR
from composer.core import PyTorchScheduler, State, Time, TimeUnit
if TYPE_CHECKING:
from typing import Protocol
else:
# subclasses of Protocol cannot be instantiated in Python 3.8
Protocol = object
log = logging.getLogger(__name__)
__all__ = [
'ComposerScheduler', 'compile_composer_scheduler', 'StepScheduler', 'MultiStepScheduler', 'ConstantScheduler',
'LinearScheduler', 'ExponentialScheduler', 'CosineAnnealingScheduler', 'CosineAnnealingWarmRestartsScheduler',
'PolynomialScheduler', 'MultiStepWithWarmupScheduler', 'ConstantWithWarmupScheduler', 'LinearWithWarmupScheduler',
'CosineAnnealingWithWarmupScheduler', 'PolynomialWithWarmupScheduler'
]
class ComposerScheduler(Protocol):
r"""Specification for a stateless scheduler function.
While this specification is provided as a Python class, an ordinary function can implement this interface as long
as it matches the signature of this interface's :meth:`~.ComposerScheduler.__call__` method.
For example, a scheduler that halves the learning rate after 10 epochs could be written as:
.. code:: python
def ten_epoch_decay_scheduler(state: State) -> float:
if state.timestamp.epoch < 10:
return 1.0
return 0.5
# ten_epoch_decay_scheduler is a valid ComposerScheduler
trainer = Trainer(
schedulers=[ten_epoch_decay_scheduler],
...
)
In order to allow schedulers to be configured, schedulers may also written as callable classes:
.. code:: python
class VariableEpochDecayScheduler(ComposerScheduler):
def __init__(num_epochs: int):
self.num_epochs = num_epochs
def __call__(state: State) -> float:
if state.time.epoch < self.num_epochs:
return 1.0
return 0.5
ten_epoch_decay_scheduler = VariableEpochDecayScheduler(num_epochs=10)
# ten_epoch_decay_scheduler is also a valid ComposerScheduler
trainer = Trainer(
schedulers=[ten_epoch_decay_scheduler],
...
)
The constructions of ``ten_epoch_decay_scheduler`` in each of the examples above are equivalent. Note that neither
scheduler uses the ``scale_schedule_ratio`` parameter. As long as this parameter is not used when initializing
:class:`.Trainer`, it is not required that any schedulers implement that parameter.
.. automethod:: __call__
"""
def __call__(self, state: State, ssr: float = 1.0) -> float:
r"""Calculate the current learning rate multiplier :math:`\alpha`.
A scheduler function should be a pure function that returns a multiplier to apply to the optimizer's provided
learning rate, given the current trainer state, and optionally a "scale schedule ratio" (SSR). A typical
implementation will read ``state.timestamp``, and possibly other fields like ``state.max_duration``, to determine
the trainer's latest temporal progress.
.. note::
All instances of :class:`~.ComposerScheduler` output a `multiplier` for the learning rate, rather than the
learning rate directly. By convention, we use the symbol :math:`\alpha` to refer to this multiplier. This
means that the learning rate :math:`\eta` at time :math:`t` can be represented as
:math:`\eta(t) = \eta_i \times \alpha(t)`, where :math:`\eta_i` represents the learning rate used to
initialize the optimizer.
.. note::
It is possible to use multiple schedulers, in which case their effects will stack multiplicatively.
The ``ssr`` param indicates that the schedule should be "stretched" accordingly. In symbolic terms, where
:math:`\alpha_\sigma(t)` represents the scheduler output at time :math:`t` using scale schedule ratio
:math:`\sigma`:
.. math::
\alpha_{\sigma}(t) = \alpha(t / \sigma)
Args:
state (State): The current Composer Trainer state.
ssr (float): The scale schedule ratio. In general, the learning rate computed by this
scheduler at time :math:`t` with an SSR of 1.0 should be the same as that computed by
this scheduler at time :math:`t \times s` with an SSR of :math:`s`. Default = ``1.0``.
Returns:
alpha (float): A multiplier to apply to the optimizer's provided learning rate.
"""
raise NotImplementedError
def _convert_time(time: Union[str, Time[int], Time[float]], state: State, ssr: float = 1.0) -> Time[int]:
if isinstance(time, str):
time = Time.from_timestring(time)
assert state.max_duration is not None, 'max_duration should be set whenever schedulers are invoked'
if time.unit == TimeUnit.DURATION:
if state.max_duration.unit == TimeUnit.EPOCH:
if state.dataloader_len is None:
raise RuntimeError('Cannot convert time, as state.dataloader_len is None.')
return Time(int(time.value * int(state.dataloader_len) * state.max_duration.value), TimeUnit.BATCH)
return Time(int(time.value * state.max_duration.value), state.max_duration.unit)
elif time.unit == TimeUnit.EPOCH:
# Epochs do not provide sufficient granularity for SSR scaling
# e.g. if max_duration = 1ep, then any SSR would result in a new duration of 0.
# so, convert the time into batches
if state.dataloader_len is None:
raise RuntimeError('Cannot convert time, as state.dataloader_len is None.')
time = Time(value=time.value * int(state.dataloader_len), unit=TimeUnit.BATCH)
return Time(value=int(time.value * ssr), unit=time.unit)
def compile_composer_scheduler(scheduler: ComposerScheduler, state: State, ssr: float = 1.0) -> PyTorchScheduler:
"""Converts a stateless scheduler into a PyTorch scheduler object.
While the resulting scheduler provides a ``.step()`` interface similar to other PyTorch schedulers, the scheduler is
also given a bound reference to the current :class:`~composer.core.State`. This means that any internal state updated
by ``.step()`` can be ignored, and the scheduler can instead simply use the bound state to recalculate the current
learning rate.
Args:
scheduler (ComposerScheduler): A stateless scheduler, provided as a :class:`~.ComposerScheduler` object.
state (State): The Composer Trainer's state.
Returns:
compiled_scheduler (PyTorchScheduler): The scheduler, in a form compatible with PyTorch scheduler interfaces.
"""
optimizers = state.optimizers
if len(optimizers) != 1:
raise NotImplementedError('Providing functional schedulers is unsupported with multiple optimizers.')
optimizer = optimizers[0]
scheduler_sig = inspect.signature(scheduler)
def scheduler_fn(epoch: int) -> float:
del epoch # unused. Provided by the pytorch LambdaLR
# if the ssr is 1.0, don't pass it to the scheduler. This allows users to pass in lambdas that only take
# one parameter -- the state
if len(scheduler_sig.parameters) == 1:
if ssr == 1.0:
return scheduler(state)
else:
raise ValueError(
textwrap.dedent(f"""\
Scheduler {scheduler} does not support `scale_schedule_ratio`.
To use `scale_schedule_ratio`, the scheduler must take two arguments (state, ssr)"""))
return scheduler(state, ssr)
lambda_scheduler = LambdaLR(optimizer, scheduler_fn)
return lambda_scheduler
class StepScheduler(ComposerScheduler):
r"""Decays the learning rate discretely at fixed intervals.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.StepLR` from PyTorch.
Decays the learning rate by a factor of ``gamma`` periodically, with a frequency determined by ``step_size``.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \gamma ^ {\text{floor}(t / \rho)}
Where :math:`\rho` represents the time between changes to the learning rate (the step size), and
:math:`\gamma` represents the multiplicative decay factor.
Args:
step_size (str | Time): Time between changes to the learning rate.
gamma (float): Multiplicative decay factor. Default = ``0.1``.
"""
def __init__(self, step_size: Union[str, Time], gamma: float = 0.1):
self.step_size = step_size
self.gamma = gamma
def __call__(self, state: State, ssr: float = 1.0):
step_size = _convert_time(self.step_size, state, ssr=ssr)
current_time = state.timestamp.get(step_size.unit)
steps = int(current_time / step_size)
return self.gamma**steps
class MultiStepScheduler(ComposerScheduler):
r"""Decays the learning rate discretely at fixed milestones.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.MultiStepLR` from PyTorch.
Decays the learning rate by a factor of ``gamma`` whenever a time milestone in ``milestones`` is reached.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \gamma ^ x
Where :math:`x` represents the amount of milestones that have been reached, and :math:`\gamma` represents the
multiplicative decay factor.
Args:
milestones (List[str | Time]): Times at which the learning rate should change.
gamma (float): Multiplicative decay factor. Default = ``0.1``.
"""
def __init__(self, milestones: List[Union[str, Time]], gamma: float = 0.1):
self.milestones = milestones
self.gamma = gamma
def __call__(self, state: State, ssr: float = 1.0):
milestones = [_convert_time(milestone, state, ssr=ssr) for milestone in self.milestones]
factor = 1.0
for milestone in milestones:
if state.timestamp >= milestone:
factor *= self.gamma
return factor
class ConstantScheduler(ComposerScheduler):
r"""Maintains a fixed learning rate.
This scheduler is based on :class:`~torch.optim.lr_scheduler.ConstantLR` from PyTorch.
The default settings for this scheduler simply maintain a learning rate factor of 1 for the entire training
duration. However, both the factor and the duration of this scheduler can be configured.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases} \alpha, & \text{if } t < t_{max} \\ 1.0 & \text{otherwise} \end{cases}
Where :math:`\alpha` represents the learning rate multiplier to maintain while this scheduler is active, and
:math:`t_{max}` represents the duration of this scheduler.
Args:
alpha (float): Learning rate multiplier to maintain while this scheduler is active. Default = ``1.0``.
t_max (str | Time): Duration of this scheduler. Default = ``"1dur"``.
"""
def __init__(self, alpha: float = 1.0, t_max: Union[str, Time] = '1dur') -> None:
self.alpha = alpha
self.t_max = t_max
def __call__(self, state: State, ssr: float = 1.0) -> float:
t_max = _convert_time(self.t_max, state, ssr=ssr)
if state.timestamp < t_max:
return self.alpha
return 1.0
class LinearScheduler(ComposerScheduler):
r"""Adjusts the learning rate linearly.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.LinearLR` from PyTorch.
.. warning::
Note that the defaults for this scheduler differ from the defaults for
:class:`~torch.optim.lr_scheduler.LinearLR`. The PyTorch scheduler, by default, linearly increases the learning
rate multiplier from 1.0 / 3 to 1.0, whereas this implementation, by default, linearly decreases the multiplier
rom 1.0 to 0.0.
Linearly adjusts the learning rate multiplier from ``alpha_i`` to ``alpha_f`` over ``t_{max}`` time.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_i + (alpha_f - \alpha_i) \times \tau
Given :math:`\tau`, the fraction of time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau = t / t_{max}
Where :math:`\alpha_i` represents the initial learning rate multiplier, :math:`\alpha_f` represents
the learning rate multiplier to decay to, and :math:`t_{max}` represents the duration of this scheduler.
Args:
alpha_i (float): Initial learning rate multiplier. Default = ``1.0``.
alpha_f (float): Final learning rate multiplier. Default = ``0.0``.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
"""
def __init__(self, alpha_i: float = 1.0, alpha_f: float = 0.0, t_max: Union[str, Time] = '1dur'):
self.alpha_i = alpha_i
self.alpha_f = alpha_f
self.t_max = Time.from_timestring(t_max) if isinstance(t_max, str) else t_max
def __call__(self, state: State, ssr: float = 1.0):
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_max.unit)
frac_of_total = min(1.0, (current_time / t_max).value)
current_factor = self.alpha_i + frac_of_total * (self.alpha_f - self.alpha_i)
return current_factor
class ExponentialScheduler(ComposerScheduler):
r"""Decays the learning rate exponentially.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.ExponentialLR` from PyTorch.
Exponentially decays the learning rate such that it decays by a factor of ``gamma`` every ``decay_period`` time.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \gamma ^ {t / \rho}
Where :math:`\rho` represents the decay period, and :math:`\gamma` represents the multiplicative decay factor.
Args:
decay_period (str | Time): Decay period. Default = ``"1ep"``.
gamma (float): Multiplicative decay factor.
"""
def __init__(self, gamma: float, decay_period: Union[str, Time] = '1ep'):
self.gamma = gamma
self.decay_period = decay_period
def __call__(self, state: State, ssr: float = 1.0):
decay_period = _convert_time(self.decay_period, state, ssr)
current_time_in_decay_units = state.timestamp.get(decay_period.unit)
return self.gamma**float(current_time_in_decay_units / decay_period)
def _cosine_anneal(x: float, min_y: float = 0.0, max_y: float = 1.0) -> float:
"""Implements a cosine decay curve.
Curve is cos(x) on domain [0, pi], stretched to the domain [0, 1] and range [min_y, max_y]. Additionally, param x is
clipped to the interval [0, 1]
"""
x = min(max(x, 0.0), 1.0)
return min_y + (max_y - min_y) * (1 + math.cos(x * math.pi)) / 2
class CosineAnnealingScheduler(ComposerScheduler):
r"""Decays the learning rate according to the decreasing part of a cosine curve.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.CosineAnnealingLR` from PyTorch.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_f + (1 - \alpha_f) \times \frac{1}{2} (1 + \cos(\pi \times \tau))
Given :math:`\tau`, the fraction of time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau = t / t_{max}
Where :math:`t_{max}`
represents the duration of this scheduler, and :math:`\alpha_f` represents the learning rate multiplier to decay to.
Args:
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
"""
def __init__(self, t_max: Union[str, Time] = '1dur', alpha_f: float = 0.0):
self.t_max = t_max
self.alpha_f = alpha_f
def __call__(self, state: State, ssr: float = 1.0):
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_max.unit)
frac_of_total = (current_time / t_max).value
return _cosine_anneal(x=frac_of_total, min_y=self.alpha_f)
class CosineAnnealingWarmRestartsScheduler(ComposerScheduler):
r"""Cyclically decays the learning rate according to the decreasing part of a cosine curve.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.CosineAnnealingWarmRestarts` from PyTorch.
This scheduler resembles a regular cosine annealing curve, as seen in :class:`~.CosineAnnealingScheduler`, except
that after the curve first completes ``t_0`` time, the curve resets to the start. The durations of subsequent cycles
are each multiplied by ``t_mult``.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_f + (1 - \alpha_f) \times \frac{1}{2}(1 + \cos(\pi \times \tau_i))
Given :math:`\tau_i`, the fraction of time elapsed through the :math:`i^\text{th}` cycle, as:
.. math::
\tau_i = (t - \sum_{j=0}^{i-1} t_0 t_{mult}^j) / (t_0 t_{mult}^i)
Where :math:`t_0`
represents the period of the first cycle, :math:`t_{mult}` represents the multiplier for the duration of successive
cycles, and :math:`\alpha_f` represents the learning rate multiplier to decay to.
Args:
t_0 (str | Time): The period of the first cycle.
t_mult (float): The multiplier for the duration of successive cycles. Default = ``1.0``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
"""
def __init__(self, t_0: Union[str, Time], t_mult: float = 1.0, alpha_f: float = 0.0):
self.t_0 = t_0
self.t_mult = t_mult
self.alpha_f = alpha_f
def __call__(self, state: State, ssr: float = 1.0):
t_0 = _convert_time(self.t_0, state, ssr=ssr)
current_interval_len = t_0
current_interval_end = t_0
while current_interval_end <= state.timestamp.get(current_interval_end.unit):
if current_interval_len.value == 0:
raise ValueError(
'Interval between restarts for cosine annealing/warm restarts scheduler has decayed to 0.')
current_interval_len = Time(value=int(self.t_mult * current_interval_len.value),
unit=current_interval_len.unit)
current_interval_end += current_interval_len
current_interval_start = current_interval_end - current_interval_len
frac_of_current_interval = ((state.timestamp.get(t_0.unit) - current_interval_start) /
current_interval_len).value
return _cosine_anneal(x=frac_of_current_interval, min_y=self.alpha_f)
class PolynomialScheduler(ComposerScheduler):
r"""Sets the learning rate to be proportional to a power of the fraction of training time left.
Specifially, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_f + (1 - \alpha_f) \times (1 - \tau) ^ {\kappa}
Given :math:`\tau`, the fraction of time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau = t / t_{max}
Where :math:`\kappa`
represents the exponent to be used for the proportionality relationship, :math:`t_{max}` represents the duration of
this scheduler, and :math:`\alpha_f` represents the learning rate multiplier to decay to.
Args:
power (float): The exponent to be used for the proportionality relationship.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
"""
def __init__(self, power: float, t_max: Union[str, Time] = '1dur', alpha_f: float = 0.0):
self.t_max = t_max
self.power = power
self.alpha_f = alpha_f
def __call__(self, state: State, ssr: float = 1.0):
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_max.unit)
frac_of_total = (current_time / t_max).value
coeff = (1 - frac_of_total)**self.power
current_factor = self.alpha_f + coeff * (1.0 - self.alpha_f)
return current_factor
class MultiStepWithWarmupScheduler(ComposerScheduler):
r"""Decays the learning rate discretely at fixed milestones, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.MultiStepScheduler`, with an added warmup.
Starts with a linear warmup over ``t_warmup`` time, then decays the learning rate by a factor of ``gamma``
whenever a time milestone in ``milestones`` is reached.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\gamma ^ x & \text{otherwise}
\end{cases}
Where :math:`t_{warmup}` represents the warmup time, :math:`x` represents the amount of milestones that have been
reached, and :math:`\gamma` represents the multiplicative decay factor.
.. warning::
All milestones should be greater than ``t_warmup``; otherwise, they will have no effect on the computed learning
rate multiplier until the warmup has completed.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
milestones (List[str | Time]): Times at which the learning rate should change.
gamma (float): Multiplicative decay factor. Default = ``0.1``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
milestones: List[Union[str, Time]],
gamma: float = 0.1,
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.milestones = milestones
self.gamma = gamma
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=1.0, t_max=t_warmup)
self.step_scheduler = MultiStepScheduler(milestones=milestones, gamma=gamma)
def __call__(self, state: State, ssr: float = 1.0):
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
return self.step_scheduler(state, ssr)
class ConstantWithWarmupScheduler(ComposerScheduler):
r"""Maintains a fixed learning rate, with an initial warmup.
This scheduler is based on :class:`~torch.optim.lr_scheduler.ConstantLR` from PyTorch, with an added warmup.
Starts with a linear warmup over ``t_warmup`` time, then simply maintains a learning rate factor of 1 for the entire training
duration. However, both the factor and the duration of this scheduler can be configured.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha, & \text{if } t < t_{max} \\
1.0 & \text{otherwise} \end{cases}
Where :math:`\alpha` represents the learning rate multiplier to maintain while this scheduler is active, and
:math:`t_{max}` represents the duration of this scheduler.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
alpha (float): Learning rate multiplier to maintain while this scheduler is active. Default = ``1.0``.
t_max (str | Time): Duration of this scheduler. Default = ``"1dur"``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
alpha: float = 1.0,
t_max: Union[str, Time] = '1dur',
scale_warmup: bool = False) -> None:
self.t_warmup = t_warmup
self.alpha = alpha
self.t_max = t_max
self.scale_warmup = scale_warmup
self.scheduler = LinearWithWarmupScheduler(t_warmup=t_warmup,
alpha_i=alpha,
alpha_f=alpha,
t_max=t_max,
scale_warmup=scale_warmup)
def __call__(self, state: State, ssr: float = 1.0) -> float:
return self.scheduler(state, ssr)
class LinearWithWarmupScheduler(ComposerScheduler):
r"""Adjusts the learning rate linearly, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.LinearScheduler`, with an added warmup.
Linearly adjusts the learning rate multiplier from ``alpha_i`` to ``alpha_f`` over ``t_{max}`` time.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha_i + (alpha_f - \alpha_i) \times \tau_w & \text{otherwise}
\end{cases}
Given :math:`\tau_w`, the fraction of post-warmup time elpased (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau_w = (t - t_{warmup}) / t_{max}
Where :math:`t_{warmup}` represents the warmup time, :math:`\alpha_i` represents the initial learning rate multiplier,
and :math:`\alpha_f` represents the learning rate multiplier to decay to, and :math:`t_{max}` represents the duration
of this scheduler.
.. warning::
By default, the initial warmup time is **not** scaled according to any provided scale schedule ratio! However, the duration of
the scheduler is still scaled accordingly. To achieve this, after warmup, the scheduler's "slope" will be
slightly distorted from what would otherwise be expected. To scale the entire schedule, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
alpha_i (float): Initial learning rate multiplier. Default = ``1.0``.
alpha_f (float): Final learning rate multiplier. Default = ``0.0``.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
alpha_i: float = 1.0,
alpha_f: float = 0.0,
t_max: Union[str, Time] = '1dur',
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.alpha_i = alpha_i
self.alpha_f = alpha_f
self.t_max = t_max
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=alpha_i, t_max=t_warmup)
def __call__(self, state: State, ssr: float = 1.0):
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_warmup.unit)
frac_of_total = ((current_time - t_warmup) / (t_max - t_warmup)).value if (t_max > t_warmup) else 0.0
frac_of_total = min(1.0, frac_of_total)
current_factor = self.alpha_i + frac_of_total * (self.alpha_f - self.alpha_i)
return current_factor
class CosineAnnealingWithWarmupScheduler(ComposerScheduler):
r"""Decays the learning rate according to the decreasing part of a cosine curve, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.CosineAnnealingScheduler`, with an added warmup.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha_f + (1 - \alpha_f) \times \frac{1}{2} (1 + \cos(\pi \times \tau_w)) & \text{otherwise}
\end{cases}
Given :math:`\tau_w`, the fraction of post-warmup time elpased (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau_w = (t - t_{warmup}) / t_{max}
Where :math:`t_{warmup}` represents the warmup time, :math:`t_{max}` represents the duration of this scheduler, and
:math:`\alpha_f` represents the learning rate multiplier to decay to.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
t_max: Union[str, Time] = '1dur',
alpha_f: float = 0.0,
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.t_max = t_max
self.alpha_f = alpha_f
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=1.0, t_max=t_warmup)
def __call__(self, state: State, ssr: float = 1.0):
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_warmup.unit)
frac_of_total = ((current_time - t_warmup) / (t_max - t_warmup)).value if (t_max > t_warmup) else 0.0
frac_of_total = min(1.0, frac_of_total)
return _cosine_anneal(x=frac_of_total, min_y=self.alpha_f)
class PolynomialWithWarmupScheduler(ComposerScheduler):
r"""Decays the learning rate according to a power of the fraction of training time left, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.PolynomialScheduler`, with an added warmup.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha_f + (1 - \alpha_f) \times (1 - \tau_w) ^ {\kappa} & \text{otherwise}
\end{cases}
Given :math:`\tau_w`, the fraction of post-warmup time elpased (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau_w = (t - t_{warmup}) / t_{max}
Where :math:`\kappa` represents the exponent to be used for the proportionality relationship,
:math:`t_{warmup}` represents the warmup time, :math:`t_{max}` represents the duration of this scheduler, and
:math:`\alpha_f` represents the learning rate multiplier to decay to.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
power (float): The exponent to be used for the proportionality relationship. Default = ``2.0``.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
power: float = 2.0,
t_max: Union[str, Time] = '1dur',
alpha_f: float = 0.0,
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.power = power
self.t_max = t_max
self.alpha_f = alpha_f
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=1.0, t_max=t_warmup)
def __call__(self, state: State, ssr: float = 1.0):
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_warmup.unit)
frac_of_total = ((current_time - t_warmup) / (t_max - t_warmup)).value if (t_max > t_warmup) else 0.0
frac_of_total = min(1.0, frac_of_total)
coeff = (1 - frac_of_total)**self.power
current_factor = self.alpha_f + coeff * (1.0 - self.alpha_f)
return current_factor
| composer-dev | composer/optim/scheduler.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Retry helper."""
from __future__ import annotations
import collections.abc
import functools
import random
import time
from typing import Any, Callable, Sequence, Type, TypeVar, Union, cast, overload
TCallable = TypeVar('TCallable', bound=Callable)
__all__ = ['retry']
@overload
def retry(
exc_class: Union[Type[Exception], Sequence[Type[Exception]]] = ...,
num_attempts: int = ...,
initial_backoff: float = ...,
max_jitter: float = ...,
) -> Callable[[TCallable], TCallable]:
...
@overload
def retry(exc_class: TCallable) -> TCallable:
# Use the decorator without parenthesis
...
# error: Type "(TCallable@retry) -> TCallable@retry" cannot be assigned to type "(func: Never) -> Never"
def retry( # type: ignore
exc_class: Union[TCallable, Type[Exception], Sequence[Type[Exception]]] = Exception,
num_attempts: int = 3,
initial_backoff: float = 1.0,
max_jitter: float = 0.5,
):
"""Decorator to retry a function with backoff and jitter.
Attempts are spaced out with ``initial_backoff + 2**num_attempts + random.random() * max_jitter`` seconds.
Example:
.. testcode::
from composer.utils import retry
num_tries = 0
@retry(RuntimeError, num_attempts=3, initial_backoff=0.1)
def flaky_function():
global num_tries
if num_tries < 2:
num_tries += 1
raise RuntimeError("Called too soon!")
return "Third time's a charm."
print(flaky_function())
.. testoutput::
Third time's a charm.
Args:
exc_class (Type[Exception] | Sequence[Type[Exception]]], optional): The exception class or classes to retry.
Defaults to Exception.
num_attempts (int, optional): The total number of attempts to make. Defaults to 3.
initial_backoff (float, optional): The initial backoff, in seconds. Defaults to 1.0.
max_jitter (float, optional): The maximum amount of random jitter to add. Defaults to 0.5.
Increasing the ``max_jitter`` can help prevent overloading a resource when multiple processes in parallel
are calling the same underlying function.
"""
if num_attempts < 1:
raise ValueError('num_attempts must be at leats 1')
def wrapped_func(func: TCallable) -> TCallable:
@functools.wraps(func)
def new_func(*args: Any, **kwargs: Any):
i = 0
while True:
try:
return func(*args, **kwargs)
except exc_class as e:
if i + 1 == num_attempts:
raise e
else:
time.sleep(initial_backoff * 2**i + random.random() * max_jitter)
i += 1
return cast(TCallable, new_func)
if not isinstance(exc_class, collections.abc.Sequence) and not (isinstance(exc_class, type) and
issubclass(exc_class, Exception)):
# Using the decorator without (), like @retry_with_backoff
func = cast(TCallable, exc_class)
exc_class = Exception
return wrapped_func(func)
return wrapped_func
| composer-dev | composer/utils/retrying.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Miscellaneous Helpers."""
import socket
from contextlib import contextmanager
from typing import Type
import torch
from packaging import version
from torch.nn.parallel import DistributedDataParallel
__all__ = [
'is_model_deepspeed', 'is_model_fsdp', 'is_notebook', 'warning_on_one_line', 'get_free_tcp_port', 'model_eval_mode'
]
def is_model_deepspeed(model: torch.nn.Module) -> bool:
"""Whether ``model`` is an instance of a :class:`~deepspeed.DeepSpeedEngine`."""
try:
import deepspeed
except ImportError:
return False
else:
return isinstance(model, deepspeed.DeepSpeedEngine)
def is_model_ddp(model: torch.nn.Module) -> bool:
"""Whether ``model`` is an instance of a :class:`.DistributedDataParallel`."""
return isinstance(model, DistributedDataParallel)
def is_model_fsdp(model: torch.nn.Module) -> bool:
"""Whether ``model`` is an instance of a :class:`.FullyShardedDataParallel`."""
try:
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
is_fsdp = False
# Check if model is wrapped with FSDP
for _, obj in model.named_children():
if isinstance(obj, FSDP):
is_fsdp = True
return is_fsdp
except ImportError:
return False
def is_notebook():
"""Whether Composer is running in a IPython/Jupyter Notebook."""
try:
__IPYTHON__ #type: ignore
return True
except NameError:
return False
def warning_on_one_line(message: str, category: Type[Warning], filename: str, lineno: int, file=None, line=None):
"""Force Python warnings to consolidate into one line."""
# From https://stackoverflow.com/questions/26430861/make-pythons-warnings-warn-not-mention-itself
return f'{category.__name__}: {message} (source: {filename}:{lineno})\n'
def get_free_tcp_port() -> int:
"""Get free socket port to use as MASTER_PORT."""
# from https://www.programcreek.com/python/?CodeExample=get+free+port
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(('', 0))
_, port = tcp.getsockname()
tcp.close()
return port
@contextmanager
def model_eval_mode(model: torch.nn.Module):
"""Set model.eval() for context duration, restoring model status at end."""
is_training = model.training
try:
model.eval()
yield
finally:
model.train(mode=is_training)
def using_torch_2_0():
is_torch_2_0 = False
if version.parse(torch.__version__) >= version.parse('2.0.0'):
is_torch_2_0 = True
return is_torch_2_0
| composer-dev | composer/utils/misc.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# To keep the typing organized for this file, see iter_helpers.pyi
# All typing annotations are in there
# All methods signatures must be defined in there.
"""Utilities for iterating over collections."""
import collections.abc
import io
def map_collection(collection, map_fn):
"""Applies ``map_fn`` on each element in ``collection``.
* If ``collection`` is a tuple or list of elements, ``map_fn`` is applied on each element,
and a tuple or list, respectively, containing mapped values is returned.
* If ``collection`` is a dictionary, ``map_fn`` is applied on each value, and a dictionary
containing the mapped values is returned.
* If ``collection`` is ``None``, ``None`` is returned.
* If ``collection`` is a single element, the result of applying ``map_fn`` on it is returned.
Args:
collection: The element, or a tuple of elements.
map_fn: A function to invoke on each element.
Returns:
Collection: The result of applying ``map_fn`` on each element of ``collection``.
The type of ``collection`` is preserved.
"""
if collection is None:
return None
if isinstance(collection, (tuple, list)):
return type(collection)(map_fn(x) for x in collection)
if isinstance(collection, dict):
return {k: map_fn(v) for k, v in collection.items()}
return map_fn(collection)
def ensure_tuple(x):
"""Converts ``x`` into a tuple.
* If ``x`` is ``None``, then ``tuple()`` is returned.
* If ``x`` is a tuple, then ``x`` is returned as-is.
* If ``x`` is a list, then ``tuple(x)`` is returned.
* If ``x`` is a dict, then ``tuple(v for v in x.values())`` is returned.
Otherwise, a single element tuple of ``(x,)`` is returned.
Args:
x (Any): The input to convert into a tuple.
Returns:
tuple: A tuple of ``x``.
"""
if x is None:
return ()
if isinstance(x, (str, bytes, bytearray)):
return (x,)
if isinstance(x, collections.abc.Sequence):
return tuple(x)
if isinstance(x, dict):
return tuple(x.values())
return (x,)
class IteratorFileStream(io.RawIOBase):
"""Class used to convert iterator of bytes into a file-like binary stream object.
Original implementation found `here <https://stackoverflow.com/questions/6657820/how-to-convert-an-iterable-to-a-stream/20260030#20260030>`_.
.. note
A usage example ``f = io.BufferedReader(IteratorFileStream(iterator), buffer_size=buffer_size)``
Args:
iterator: An iterator over bytes objects
"""
def __init__(self, iterator):
self.leftover = None
self.iterator = iterator
def readinto(self, b):
try:
l = len(b) # max bytes to read
if self.leftover:
chunk = self.leftover
else:
chunk = next(self.iterator)
output, self.leftover = chunk[:l], chunk[l:]
b[:len(output)] = output
return len(output)
except StopIteration:
return 0 #EOF
def readable(self):
return True
def iterate_with_callback(iterator, total_len, callback=None):
"""Invoke ``callback`` after each chunk is yielded from ``iterator``.
Args:
iterator (Iterator): The iterator, which should yield chunks of data.
total_len (int): The total length of the iterator.
callback (Callable[[int, int], None], optional): The callback to invoke after
each chunk of data is yielded back to the caller. Defaults to None, for no callback.
It is called with the cumulative size of all chunks yielded thus far and the ``total_len``.
"""
current_len = 0
if callback is not None:
# Call the callback for any initialization
callback(current_len, total_len)
for chunk in iterator:
current_len += len(chunk)
yield chunk
if callback is not None:
callback(current_len, total_len)
| composer-dev | composer/utils/iter_helpers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Dynamically import a Python object (e.g. module, class, function, ...)."""
import importlib
from typing import Any, Optional
__all__ = ['MissingConditionalImportError', 'import_object']
class MissingConditionalImportError(ImportError):
"""Handles errors for external packages that might not be installed.
Args:
extra_deps_group (str): the pip package group, found in setup.py. For example, nlp for `mosaicml[nlp]`.
conda_package (str, optional): The package(s) to install if using conda.
conda_channel (str, optional): The conda channel to install packages from. Set to ``None`` if the
package is not published on conda and must be installed via pip.
"""
def __init__(self, extra_deps_group: str, conda_package: str, conda_channel: Optional[str] = 'conda-forge'):
if conda_channel:
conda_command = f'conda install -c {conda_channel} {conda_package}'
else:
# Install via pip, as these packages are not installed via conda.
conda_command = f'pip install {conda_package}'
super().__init__(
(f'Composer was installed without {extra_deps_group} support. To use {extra_deps_group} related '
f"packages, with Composer, run `pip install 'mosaicml[{extra_deps_group}]'` if using pip or "
f'`{conda_command}` if using Anaconda.'
''))
def import_object(name: str) -> Any:
"""Dynamically import a Python object (e.g. class, function, ...).
.. note::
To dynamically import a module, use :func:`importlib.import_module`.
Args:
name (str): The path to the Python object to import.
Separate the module name and class name with a ``':'`` (e.g. ``'path.to.module:function_name'``).
Example:
>>> from composer.utils import import_object
>>> import_object('functools:partial')
<class 'functools.partial'>
.. note::
The module name must be discoverale with the Python path, as determined by :attr:`sys.path`.
Returns:
Any: The imported object.
"""
module_name, object_name = name.split(':')
module = importlib.import_module(module_name)
return getattr(module, object_name)
| composer-dev | composer/utils/import_helpers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Utilities for working with training checkpoints."""
from __future__ import annotations
import contextlib
import fnmatch
import logging
import os
import shutil
import tarfile
import tempfile
import textwrap
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from composer.utils import dist, reproducibility
from composer.utils.file_helpers import (FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, format_name_with_dist,
format_name_with_dist_and_time, get_file, is_tar)
from composer.utils.misc import is_model_deepspeed
from composer.utils.object_store import ObjectStore
if TYPE_CHECKING:
from composer.core.passes import AlgorithmPass
from composer.core.state import State
from composer.loggers import Logger, LoggerDestination
log = logging.getLogger(__name__)
__all__ = ['load_checkpoint', 'save_checkpoint', 'download_checkpoint']
_COMPOSER_STATES_FILENAME = 'composer_states.pt'
_DEEPSPEED_TAG = 'deepspeed' # always tag with the same, deterministic name. We'll rename the tarball to the appropriate name.
def _format_path_with_rank_zero(path: str) -> str:
"""Formats ``path`` with the rank zero values."""
return path.format(
rank=0,
local_rank=0,
node_rank=0,
)
def _format_path_with_current_rank(path: str) -> str:
"""Formats ``path`` formatted with the current rank values."""
return path.format(
rank=dist.get_global_rank(),
local_rank=dist.get_local_rank(),
node_rank=dist.get_node_rank(),
)
def _get_write_mode(name: str) -> str:
"""Get the write mode to use with :func:`tarfile.open`."""
if name.endswith('.tar'):
return 'w'
if name.endswith('.tar.gz') or name.endswith('.tgz'):
return 'w:gz'
if name.endswith('.tar.bz2'):
return 'w:bz2'
if name.endswith('.tar.lzma'):
return 'w:xz'
raise ValueError(f'{name} does not end with a valid tarfile extension.')
class PartialFilePath:
def __init__(self, filename: str, folder: Optional[str] = None):
self.folder = folder
self.filename = filename
def format(self, state: State, is_deepspeed: bool = False) -> str:
# if filename already has a suffix (e.g. file.pt), this would append to be file.pt.tar
extra_suffix = '.tar' if is_deepspeed and not is_tar(self.filename) else ''
if self.folder:
return os.path.join(
format_name_with_dist(self.folder, state.run_name),
format_name_with_dist_and_time(self.filename, state.run_name, state.timestamp),
) + extra_suffix
else:
return format_name_with_dist_and_time(
self.filename,
state.run_name,
state.timestamp,
) + extra_suffix
def load_checkpoint(
path: str,
state: State,
logger: Logger,
object_store: Optional[Union[ObjectStore, LoggerDestination]] = None,
load_weights_only: bool = False,
strict_model_weights: bool = False,
progress_bar: bool = True,
ignore_keys: Optional[Union[List[str], Callable[[Dict], None]]] = None,
exclude_algorithms: Optional[List[str]] = None,
algorithm_passes: Optional[List[AlgorithmPass]] = None,
):
"""Load a checkpoint from a local file, URI, or cloud object store into ``state``.
Args:
path (str): The path format string to an existing checkpoint file.
It can be a path to a file on the local disk, a URL, or if ``object_store`` is set, the object name
for a checkpoint in a cloud bucket.
When using `Deepspeed ZeRO <https://www.deepspeed.ai/tutorials/zero/>`_, checkpoints are sharded by rank.
Instead of hard-coding the rank in the ``path``, use the following format variables:
+------------------------+-------------------------------------------------------+
| Variable | Description |
+========================+=======================================================+
| ``{rank}`` | The global rank, as returned by |
| | :func:`~.dist.get_global_rank`. |
+------------------------+-------------------------------------------------------+
| ``{local_rank}`` | The local rank of the process, as returned by |
| | :func:`~.dist.get_local_rank`. |
+------------------------+-------------------------------------------------------+
| ``{node_rank}`` | The node rank, as returned by |
| | :func:`~.dist.get_node_rank`. |
+------------------------+-------------------------------------------------------+
For example, suppose that checkpoints are stored in the following structure:
.. code-block::
my_model/ep1-rank0.tar
my_model/ep1-rank1.tar
my_model/ep1-rank2.tar
...
Then, ``path`` should be set to ``my_model/ep1-rank{rank}.tar``, and all ranks will load the
correct state.
state (State): The :class:`~composer.core.State` to load the checkpoint into.
logger (Logger): The :class:`~composer.logger.Logger` to log any information.
object_store (Union[ObjectStore, LoggerDestination], optional): If the ``path`` is in an object store
(i.e. AWS S3 or Google Cloud Storage), an instance of
:class:`~.ObjectStore` or :class:`~.LoggerDestination` which will be used
to retrieve the checkpoint. Otherwise, if the checkpoint is a local filepath, set to ``None``.
(default: ``None``)
load_weights_only (bool, optional): Whether or not to only restore the model weights from the checkpoint without
restoring the associated state. (default: ``False``)
strict_model_weights (bool, optional): Whether or not to force that the checkpointed weights must exactly
match the model weights. (default: ``False``)
progress_bar (bool, optional): Whether or not to show a progress bar when downloading checkpoints.
Ignored if the checkpoint is a local file path. (default: ``True``)
ignore_keys (List[str] | (Dict) -> None, optional): A list of paths for the ``state_dict`` of the checkpoint,
which, when provided, will be ignored from the state_dict before a checkpoint is loaded. Each path is a list
of strings specifying the keys to index into ``state_dict`` joined together with `/` as a separator (as PyTorch
uses `.` in parameter names). If a prefix is provided, all children are also ignored (see Example 2).
See :mod:`composer.core.state` for the structure of state_dict.
Example 1: ``ignore_keys = ["state/model/layer1.weights", "state/model/layer1.bias"]`` would ignore
layer 1 weights and bias.
Example 2: ``ignore_keys = ["state/model/*"]`` would ignore the entire model, which would have the same
effect as the previous example if there was only 1 layer.
Example 3: ``ignore_keys = ["state/model/layer*.weights"]`` would ignore all weights in the model.
Example 4: ``ignore_keys = ["state/rank_zero_seed", "rng"]`` would reset all randomness when
loading the checkpoint.
If a callable, it should take one argument which is the state_dict. The callable is free to arbitrarily modify
the state_dict before it is loaded.
(default: ``None``)
exclude_algorithms (List[str], optional): A list of algorithm names to exclude from loading.
By default, algorithms with `required_on_load=True` which were enabled when training the loaded
checkpoint are automatically applied unless they conflict with a user specified algorithm. These
algorithms often change the model, and not applying them could result in certain layers not having
weights loaded.
Example 1: ``exclude_algorithms = ["BlurPool"]`` would exclude BlurPool from loading.
Example 2: ``exclude_algorithms = ["FusedLayerNorm", "Alibi"]`` would exclude FusedLayerNorm and Alibi from loading.
(default: ``None``)
algorithm_passes (List[AlgorithmPass], optional): A list of algorithm passes to apply to autoloaded algorithms
to sort them into the correct order. (default: ``None``)
Returns:
Optional[List[Dict[str, Any]]]: The RNG state dicts, indexed by global rank, if
:attr:`load_weights_only` is not None. Otherwise, None.
"""
# download the checkpoint to the node-local folder
log.debug('Loading checkpoint at %s', path)
# Each node gets one unique folder to store checkpoints that is shared amongst all local ranks in that node.
# If fsdp sharded state_dicts is enabled then EVERY rank gets a unique checkpoint folder.
tempdir_ctx = (tempfile.TemporaryDirectory() if (state.fsdp_sharded_state_dict_enabled or
dist.get_local_rank() == 0) else contextlib.nullcontext(None))
with tempdir_ctx as tempdir:
try:
# Get the path to the proper checkpoint folder corresponding to the current rank's node.
# If fsdp_sharded_state_dict_enabled then just use that rank's unique tempdir.
node_checkpoint_folder = (tempdir if state.fsdp_sharded_state_dict_enabled else
_get_node_checkpoint_download_folder(tempdir))
assert node_checkpoint_folder is not None
composer_states_filepath, extracted_checkpoint_folder, extracted_rank_n = download_checkpoint(
path=path,
node_checkpoint_folder=node_checkpoint_folder,
object_store=object_store,
progress_bar=progress_bar,
fsdp_sharded_state_dict_enabled=state.fsdp_sharded_state_dict_enabled,
deepspeed_sharded_checkpoint=is_model_deepspeed(state.model),
)
rng_state_dicts = _restore_checkpoint(
state,
logger,
composer_states_filepath,
extracted_rank_n,
extracted_checkpoint_folder,
load_weights_only=load_weights_only,
strict_model_weights=strict_model_weights,
ignore_keys=ignore_keys,
exclude_algorithms=exclude_algorithms,
algorithm_passes=algorithm_passes,
)
finally:
# Wait for all ranks to finish restoring the checkpoint before releasing the tempdir, since tempdir can
# be a shared resource between nodes.
dist.barrier()
log.info('%s loaded from %s', 'Model weights' if load_weights_only else 'Trainer checkpoint', path)
return rng_state_dicts
def _get_node_checkpoint_download_folder(path: Optional[str]) -> str:
"""Broadcasts the ``path`` from the LOCAL rank zero to all LOCAL ranks."""
local_rank_zero = dist.get_local_world_size() * dist.get_node_rank()
paths = dist.all_gather_object(path)
local_rank_zero_path = paths[local_rank_zero]
assert local_rank_zero_path is not None, 'local rank zero provides the path'
return local_rank_zero_path
def download_checkpoint(path: str,
node_checkpoint_folder: str,
object_store: Optional[Union[ObjectStore, LoggerDestination]],
progress_bar: bool,
fsdp_sharded_state_dict_enabled: bool = False,
deepspeed_sharded_checkpoint: bool = False) -> Tuple[str, Optional[str], bool]:
"""Download the checkpoint stored at ``path``, potentially in ``object_store``, to ``node_checkpoint_folder``.
Returns a tuple of (``composer_states_filepath``, ``extracted_checkpoint_folder``, ``extracted_rank_n``).
* The ``composer_states_filepath``, is the path to the composer states, which can be passed into
:meth:`torch.load`.
* The ``extracted_checkpoint_folder`` is the path to the checkpoint folder, which can be passed into
:meth:`deepspeed.DeepSpeedEngine.load_checkpoint`.
* The ``extracted_rank_n`` is a boolean flag indicating whether a tarball was extracted on global
rank greater than 0.
"""
log.debug('Downloading checkpoint to folder %s', node_checkpoint_folder)
rank_zero_checkpoint_filepath = os.path.join(node_checkpoint_folder, 'rank0_checkpoint')
rank_n_checkpoint_filepath = os.path.join(node_checkpoint_folder, f'rank{dist.get_global_rank()}_checkpoint')
extracted_checkpoint_folder = None
extracted_rank_n = False
if is_tar(path):
extracted_checkpoint_folder = os.path.join(node_checkpoint_folder, 'checkpoint')
composer_states_filepath = os.path.join(extracted_checkpoint_folder, _COMPOSER_STATES_FILENAME)
else:
# it's not an archive; it's just the composer state dict
# and only rank zero has this file unless fsdp_sharded_state_dict_enabled then
# every rank has it's own file.
extracted_checkpoint_folder = None
composer_states_filepath = (rank_n_checkpoint_filepath
if fsdp_sharded_state_dict_enabled else rank_zero_checkpoint_filepath)
checkpoint_is_sharded = fsdp_sharded_state_dict_enabled or deepspeed_sharded_checkpoint
try:
if not checkpoint_is_sharded and dist.get_local_rank() == 0:
# if the checkpoint is not sharded, then local rank 0 on each node needs to download the
# global rank 0 checkpoint
path = _format_path_with_rank_zero(path)
get_file(destination=rank_zero_checkpoint_filepath,
path=path,
object_store=object_store,
progress_bar=progress_bar)
if extracted_checkpoint_folder is not None:
try:
with tarfile.open(rank_zero_checkpoint_filepath) as tarball:
tarball.extractall(extracted_checkpoint_folder)
except FileNotFoundError:
# Not re-raising the file-not-found error as that is irrelevant;
# the underlying issue is that the checkpoint file does not exist on the disk
# or could not be downloaded
raise RuntimeError(f'Checkpoint {path} does not exist')
elif checkpoint_is_sharded:
# if the checkpoint is sharded, then every rank needs to download its own checkpoint
try:
get_file(destination=rank_n_checkpoint_filepath,
path=_format_path_with_current_rank(path),
object_store=object_store,
progress_bar=progress_bar)
except FileNotFoundError as e:
raise FileNotFoundError(
(f'Checkpoint {_format_path_with_current_rank(path)} does not exist, '
f'but is required for sharded checkpointing on rank {dist.get_global_rank()}. '
'Please ensure that the checkpoint exists and your load_path was specified as a format string'
'with the {rank} argument.')) from e
if extracted_checkpoint_folder is not None:
try:
# it's an archive and needs to be extracted
with tarfile.open(rank_n_checkpoint_filepath) as tarball:
tarball.extractall(extracted_checkpoint_folder)
extracted_rank_n = True
except FileNotFoundError:
# this will happen most of the time (i.e. whenever deepspeed
# is not being used) so not logging anything
pass
finally:
# Wait for all checkpoints on the node to finish downloading
# First we wait for the local rank 0 to finish its download. This prevents timeouts
# in cases where the local rank 0 is downloading a monolithic checkpoint, and so takes
# much longer than the other ranks, which have nothing to download
# Putting the barrier in a finally so the rank will always block on the barrier,
# even if it has an exception.
# Any exception will be re-raised after the barrier passes. The launcher script
# will detect the process crash and terminate the other ranks
signal_file_path = os.path.join(node_checkpoint_folder, '.local_rank0_completed')
if dist.get_local_rank() == 0:
with open(signal_file_path, 'wb') as f:
f.write(b'local_rank0_completed')
dist.local_rank_zero_download_and_wait(signal_file_path)
if dist.get_local_rank() == 0:
os.remove(signal_file_path)
dist.barrier()
return composer_states_filepath, extracted_checkpoint_folder, extracted_rank_n
def _flatten_keys(obj: Any, paths: List[str], existing_path: str):
"""Recursively flatten the keys of a dictionary or list into a set of paths."""
# Store path when we reach end, which is either non-Dict or empty Dict
if isinstance(obj, list) and len(obj) > 0:
for i, elm in enumerate(obj):
_flatten_keys(elm, paths, f'{existing_path}/{i}')
elif isinstance(obj, dict) and len(obj) > 0:
for k, v in obj.items():
_flatten_keys(v, paths, f'{existing_path}/{k}')
# Remove leading /
paths.append(existing_path.lstrip('/'))
def _remove_paths(obj: Union[list, Dict[str, Any]], exclude_paths: List[List[str]]):
# First determine the keys which will be recursed on and which will be removed entirely
# Group the `exclude_paths` by the key
keys_to_recurse = {}
keys_to_remove = []
for exclude_path_parts in exclude_paths:
key = exclude_path_parts[0]
if isinstance(obj, list):
key = int(key)
if len(exclude_path_parts) == 1:
keys_to_remove.append(key)
else:
if key not in keys_to_recurse:
keys_to_recurse[key] = []
keys_to_recurse[key].append(exclude_path_parts[1:])
# Recurse first, so in the case of a list, the indexing is consistent
for key, paths_to_recurse in keys_to_recurse.items():
_remove_paths(obj[key], paths_to_recurse)
# Sort the keys in reverse order, so in the case of a list, the indexing is consistent
keys_to_remove.sort(reverse=True)
# Remove the keys
for key in keys_to_remove:
del obj[key]
def glob_filter(exclude_globs: List[str]) -> Callable[[Dict], None]:
"""Provides a function which deletes all subparts of a dictionary based on a list of paths."""
def filter_func(state_dict: Dict) -> None:
# Flatten dictionary into paths
paths = []
_flatten_keys(state_dict, paths, '/')
filtered_paths = []
for exclude_glob in exclude_globs:
filtered_paths_from_glob = fnmatch.filter(paths, exclude_glob)
if len(filtered_paths_from_glob) == 0:
warnings.warn(
f'No parts from loaded checkpoint state_dict were ignored by load_ignore_key {exclude_glob}')
filtered_paths.extend(filtered_paths_from_glob)
filtered_paths = list(set(filtered_paths))
filtered_paths_str = ', '.join(filtered_paths)
if filtered_paths:
log.info(f'Ignoring the following paths from the loaded checkpoint state_dict: {filtered_paths_str}')
# Loop through all paths to exclude
paths_to_remove = [path.split('/') for path in filtered_paths]
_remove_paths(state_dict, paths_to_remove)
return filter_func
def safe_torch_load(composer_states_filepath: Union[Path, str], map_location: str = 'cpu'):
"""Load a torch checkpoint, catching errors due to backwards compatibility issues.
Args:
composer_states_filepath: The path to the checkpoint file.
map_location: The location to load the checkpoint to.
"""
try:
state_dict = torch.load(composer_states_filepath, map_location=map_location)
return state_dict
except TypeError as e:
if 'Accuracy.__new__() missing 1 required positional argument' in str(e):
raise Exception('As of v0.10.0, torchmetrics introduces a new required argument to Accuracy which '
'breaks backwards compatibility. Unfortunately, this means that older checkpoints '
'cannot be loaded with the metrics. In order to successfully load this model, please '
'pass `load_ignore_keys = ["state/train_metrics/*", "state/eval_metrics/*"]`.') from e
raise e
def _restore_checkpoint(
state: State,
logger: Logger,
composer_states_filepath: str,
extracted_rank_n: bool,
extracted_checkpoint_folder: Optional[str],
load_weights_only: bool,
strict_model_weights: bool,
ignore_keys: Optional[Union[List[str], Callable[[Dict], None]]],
exclude_algorithms: Optional[List[str]],
algorithm_passes: Optional[List[AlgorithmPass]],
) -> Optional[List[Dict[str, Any]]]:
"""Restore a checkpoint into ``state`` and returns the rng state dicts (if ``load_weights_only`` is False)."""
# Now, all ranks load the checkpoint that local rank zero downloaded
state_dict = safe_torch_load(composer_states_filepath)
if ignore_keys:
# Filter provided list of key paths
if not callable(ignore_keys):
ignore_keys = glob_filter(ignore_keys)
# Call function to modify state_dict
ignore_keys(state_dict)
log.debug(f"Loaded checkpoint with keys {state_dict.keys()} and state keys {state_dict['state'].keys()}")
if is_model_deepspeed(state.model):
if extracted_checkpoint_folder is None:
raise RuntimeError('Deepspeed checkpoints require a tarball, not a weights file.')
global_rank = dist.get_global_rank()
if global_rank > 0 and not extracted_rank_n:
raise RuntimeError(f'Deepspeed checkpoint missing for rank {global_rank}')
load_path, _ = state.deepspeed_model.load_checkpoint(
extracted_checkpoint_folder,
tag=_DEEPSPEED_TAG,
load_module_only=load_weights_only,
load_module_strict=strict_model_weights,
)
if load_path is None:
raise RuntimeError(f'Failed to load DeepSpeed checkpoint')
elif load_weights_only:
state.load_model_state(
state_dict['state'],
logger,
strict=strict_model_weights,
exclude_algorithms=exclude_algorithms,
algorithm_passes=algorithm_passes,
)
if not load_weights_only:
state.load_state_dict(
state_dict['state'],
logger,
exclude_algorithms=exclude_algorithms,
algorithm_passes=algorithm_passes,
)
return state_dict['rng']
def save_checkpoint(
state: State,
filename: str = 'ep{epoch}-ba{batch}-rank{rank}',
*,
weights_only: bool = False,
) -> Union[str, None]: # noqa: D103
log.debug('Saving checkpoint to %s', filename)
is_deepspeed = is_model_deepspeed(state.model)
state_dict = {
'state': state.state_dict(),
'rng': reproducibility.get_rng_state(),
}
if weights_only and not is_deepspeed:
state_dict['state'] = {'model': state_dict['state']['model']}
save_filename = PartialFilePath(filename).format(state, is_deepspeed)
dirname = os.path.dirname(save_filename)
if dirname:
os.makedirs(dirname, exist_ok=True)
# only rank 0 saves the state_dict unless state.fsdp_sharded_state_dict_enabled=True.
if dist.get_global_rank() == 0 or state.fsdp_sharded_state_dict_enabled:
with open(save_filename, 'wb') as f:
torch.save(state_dict, f)
if is_tar(save_filename):
_compress_file(save_filename, basename=_COMPOSER_STATES_FILENAME)
# all ranks save for deepspeed
if is_deepspeed:
_save_deepspeed_model(state.deepspeed_model, save_filename)
dist.barrier() # ensure all ranks saved their files
if dist.get_global_rank() == 0 or is_deepspeed or state.fsdp_sharded_state_dict_enabled:
assert os.path.exists(save_filename), 'Expected file to have been saved.'
return save_filename
else:
# no file saved
return None
def _compress_file(filename: str, basename: str):
"""Replace a file with its compressed version.
The contents will be called ``basename`` inside
the compressed archive.
"""
write_mode = _get_write_mode(filename)
with tempfile.TemporaryDirectory() as tmpdir:
shutil.move(filename, os.path.join(tmpdir, basename))
with tarfile.open(filename, write_mode) as tarball:
tarball.add(tmpdir, arcname='')
def _save_deepspeed_model(model, filename: str):
"""Save Deepspeed model and tarball the files."""
write_mode = _get_write_mode(filename)
read_mode = 'r' + write_mode[1:]
with tempfile.TemporaryDirectory() as tmpdir:
model.save_checkpoint(tmpdir, _DEEPSPEED_TAG)
if os.path.exists(filename):
# extract to tmpdir to append below
# not all compression formats support direct append
with tarfile.open(filename, read_mode) as tar:
tar.extractall(tmpdir)
with tarfile.open(filename, write_mode) as tar:
tar.add(tmpdir, arcname='')
save_checkpoint.__doc__ = f"""Checkpoint the training ``state``.
Args:
state (State): The training state.
logger (Logger): The logger.
filename (str): A format string describing how to name checkpoints.
(default: ``'ep{{epoch}}-ba{{batch}}-rank{{rank}}'``)
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, prefix=' ')}
.. note::
* By default, only the rank zero process will save a checkpoint file.
* When using DeepSpeed, each rank will save a checkpoint file in tarball format. DeepSpeed
requires tarball format, as it saves model and optimizer states in separate files.
Ensure that ``'{{rank}}'`` appears within the ``filename``. Otherwise, multiple ranks
may attempt to write to the same file(s), leading to corrupted checkpoints. If no tarball file
extension is specified, ``.tar`` will be used.
* To use compression (regardless of whether DeepSpeed is enabled), set the file extension
to ``'.tar.gz'``, ``'.tgz'``, ``'.tar.bzip'``, or ``'.tar.lzma'`` (depending on the desired
compression algorithm).
.. warning::
Using compression will block the training loop while checkpoints are being compressed. As such, we
recommend saving checkpoints without compression.
Consider the following scenario, where:
* The default ``name='ep{{epoch}}-ba{{batch}}-rank{{rank}}'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
When DeepSpeed is not being used, the rank zero process will save the checkpoint to ``'ep1-ba42-rank0'``.
When DeepSpeed is being used, each rank (process) will save checkpoints to::
ep1-ba42-rank0.tar
ep1-ba42-rank1.tar
ep1-ba42-rank2.tar
...
weights_only (bool, optional): If ``True``, save only the model weights instead of the entire training state.
(default: ``False``)
.. note::
When using DeepSpeed, this parameter must be ``False``. Weights-only checkpointing is not currently
compatible with DeepSpeed,
Returns:
List[pathlib.Path]: The list of checkpoint files saved, indexed by the rank of the process.
.. note::
When using DeepSpeed, each process (rank) saves its own checkpoint file.
When doing multi-node training, the filepaths are valid only on each process's node;
Composer does not move checkpoint files between nodes.
Otherwise, when not using DeepSpeed, each list will contain only one filepath,
since only the rank zero process saves checkpoints.
"""
| composer-dev | composer/utils/checkpoint.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Device-related helper methods and utilities."""
from typing import TYPE_CHECKING, Optional, Union
import torch.cuda
if TYPE_CHECKING:
from composer.devices import Device
__all__ = ['get_device', 'is_tpu_installed']
def get_device(device: Optional[Union[str, 'Device']]) -> 'Device':
"""Takes string or Device and returns the corresponding :class:`~composer.devices.Device`.
Args:
device (str | Device, optional): A string corresponding to a device (one of
``'cpu'``, ``'gpu'``, ``'mps'``, or ``'tpu'``) or a :class:`.Device`.
Returns:
Device: Device corresponding to the passed string or
Device. If no argument is passed, returns :class:`.DeviceGPU` if available,
or :class:`.DeviceCPU` if no GPU is available.
"""
from composer.devices import DeviceCPU, DeviceGPU, DeviceMPS, DeviceTPU
if not device:
device = DeviceGPU() if torch.cuda.is_available() else DeviceCPU()
elif isinstance(device, str):
if device.lower() == 'cpu':
device = DeviceCPU()
elif device.lower() == 'gpu':
device = DeviceGPU()
elif device.lower() == 'mps':
device = DeviceMPS()
elif device.lower() == 'tpu':
if not is_tpu_installed():
raise ImportError(
'Unable to import torch_xla. Please follow installation instructions at https://github.com/pytorch/xla'
)
device = DeviceTPU()
else:
raise ValueError(f'device ({device}) must be one of (cpu, gpu, mps, tpu).')
return device
def is_tpu_installed() -> bool:
"""Determines whether the module needed for training on TPUs—torch_xla—is installed.
Returns:
bool: Whether torch_xla is installed.
"""
try:
import torch_xla
del torch_xla
return True
except ModuleNotFoundError:
return False
| composer-dev | composer/utils/device.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helpers to get items and set items in a batch."""
from operator import attrgetter, itemgetter
from typing import Any, Callable, Sequence, Union, cast
__all__ = ['batch_get', 'batch_set']
def batch_get(batch: Any, key: Union[str, int, Callable, Any]):
"""Indexes into the batch given the key.
>>> from composer.utils.batch_helpers import batch_get
>>> batch_get([1,2,3], 1)
2
>>> batch_get({'a':1, 'b':7}, 'b')
7
>>> batch_get([{'a':1, 'b':7},{'c':5}], lambda x: x[1]['c'])
5
>>> batch_get([{'a':1, 'b':7},{'c':5}], (lambda x: x[1]['c'], lambda x: 10))
5
Args:
batch (Any): An object that contains the input and label of the items in the batch.
Can be any abritrary type that user creates, but we assume some sort of
sequence (list, tuple, tensor, array), mapping (dictionary),
or attribute store (object with data members, namedtuple).
key (str | int | Tuple[Callable, Callable] | Any, optional): A key to index into the batch or a
user-specified function to do the extracting. A pair of callables is also
supported for cases where a get and set function pair are both passed
(like in Algorithms). The getter is assumed to be the first of the pair.
Returns:
The part of the batch specified by the key. This could be any type
depending on what the batch is composed of.
"""
# Case 1: key is a tuple of (getter, setter).
if (isinstance(key, Sequence) and not isinstance(key, str) and _is_key_get_and_set_fn_pair(key)):
get_fn, _ = key
return get_fn(batch)
# Case 2: key is a getter Callable.
if isinstance(key, Callable):
return key(batch)
# Case 3: key some sort of index or key to use to directly extract from the batch.
try:
return itemgetter(key)(batch)
except (IndexError, TypeError):
try:
return itemgetter(*key)(batch)
except TypeError:
try:
return attrgetter(cast(str, key))(batch)
except TypeError:
return attrgetter(*key)(batch)
def batch_set(batch: Any, key: Union[str, int, Callable, Any], value: Any) -> Any:
"""Indexes into the batch given the key and sets the element at that index to value.
This is not an in-place operation for batches of type tuple as tuples are not mutable.
>>> from composer.utils.batch_helpers import batch_set
>>> batch_set([1,2,3], key=1, value=8)
[1, 8, 3]
>>> batch_set({'a':1, 'b':7}, key='b', value=11)
{'a': 1, 'b': 11}
>>> def setter(batch, value):
... batch[1]['d'] = value
... return batch
...
>>> batch_set([{'a':1, 'b':7},{'d':3}], key=setter, value=20)
[{'a': 1, 'b': 7}, {'d': 20}]
>>> batch_set([{'a':1, 'b':7},{'d':3}], key=(lambda x: x[0]['b'], setter), value=20)
[{'a': 1, 'b': 7}, {'d': 20}]
Args:
batch (Any): An object that contains the input and label of the items in the batch.
Can be any abritrary type that user creates, but we assume some sort of
sequence (list, tuple, tensor, array), mapping (dictionary),
or attribute store (object with data members, namedtuple).
key (str | int | Tuple[Callable, Callable] | Any, optional): A key to index into the batch or a user-specified function
to do the setting. A pair of callables is also supported for cases where a get
and set function pair are both passed (like in Algorithms). The setter is
assumed to be the second of the pair.
value (Any): The value that batch[key] or batch.key gets set to.
Returns:
batch (Any): updated batch with value set at key.
"""
# Case 1: key is a tuple of (getter, setter) callables.
if (isinstance(key, Sequence) and not isinstance(key, str) and _is_key_get_and_set_fn_pair(key)):
_, set_fn = key
return set_fn(batch, value)
# Case 2: key is a callable.
if isinstance(key, Callable):
return key(batch, value)
# Case 4: key is sequence of sub-keys.
if isinstance(key, Sequence) and not isinstance(key, str):
return _batch_set_multiple(batch, key, value)
# Case 5: key is single object, like string or int.
else:
return _batch_set(batch, key, value)
def _batch_set(batch: Any, key: Any, value: Any) -> Any:
"""Sets a key value pair in a non-tuple batch."""
if isinstance(batch, tuple):
return _batch_set_tuple(batch, key, value)
try:
# Check if one can do a __getitem__ before doing a __setitem__ because dicts can
# do __setitem__ for elements not in the dict and we do not want that.
# Note for defaultdict and Counter objects, just calling batch[key] for
# with a new keyword will create a new key, value pair in the object.
batch[key]
batch[key] = value
# The only acceptable TypeErrors are for an object that doesn't have a __setitem__ or a __getitem__,
# which is TypeError("... object does not support item assignment") and TypeError('.. object is not subscriptable')
except TypeError as e:
if 'object does not support item assignment' in str(e) or 'object is not subscriptable' in str(e):
pass
else: # Other type errors should be raised.
raise e
else:
return batch
try:
# Make sure batch has key before setting it.
getattr(batch, key)
setattr(batch, key, value)
# If both (setattr or getattr) and __setitem__ raise exceptions then raise both of them.
except (AttributeError, TypeError) as e:
raise RuntimeError(
f'Unable to set key {key} to value {value} on batch {batch}. Please specify a custom set_fn, if necessary.')
else:
return batch
def _batch_set_multiple(batch: Any, key: Any, value: Any) -> Any:
"""Sets multiple key value pairs in a non-tuple batch."""
# Numpy arrays and Torch tensors can take tuples and lists as keys, so try to do a normal
# __getitem__ call before resulting to list comprehension.
try:
# Check if one can do a __getitem__ before doing a __setitem__ because dicts can
# do __setitem__ for elements not in the dict and we do not want that.
batch[key]
batch[key] = value
return batch
# Indexing a list with a sequence results in TypeError
# Indexing an array/tensor with a sequence that is longer than the rank of the array
# results in an IndexError.
except (IndexError, TypeError, KeyError):
pass
if not hasattr(value, '__len__') or isinstance(value, str):
raise ValueError(f'value must be a sequence or array or tensor! and not {type(value)}')
if len(key) != len(value):
raise ValueError(f'value must be the same length as key ({len(key)}), but it is {len(value)} instead')
for single_key, single_value in zip(key, value):
batch = _batch_set(batch, single_key, single_value)
return batch
def _batch_set_tuple(batch: Any, key: Union[int, str], value: Any) -> Any:
"""Sets key value pairs in tuples and NamedTuples."""
if hasattr(batch, '_fields'): # NamedTuple
if isinstance(key, str):
batch = batch._replace(**{key: value})
else:
batch_list = list(batch)
batch_list[key] = value
batch = batch._make(batch_list)
else: # Normal tuple.
batch = list(batch)
batch[key] = value
batch = tuple(batch)
return batch
def _is_key_get_and_set_fn_pair(key):
if all([callable(key_element) for key_element in key]):
if len(key) == 2:
return True
else:
raise ValueError(f"If key is a sequence of Callables, it should be of length 2' not {len(key)}")
return False
| composer-dev | composer/utils/batch_helpers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helpers to gather system information for debugging and bug reporting.
Leverages PyTorch's :mod:`torch.utils.collect_env` package to gather pertinent system information.
The following information is additionally collected to faciliate Comopser specific debug:
* Composer version
* Number of nodes
* Host processor model name
* Host processor physical core count
* Number of accelerators per node
* Accelerator model name
This package can be invoked as a standalone console script or can be invoked from within
an application to gather and generate a system environment report.
The module can be invoked by using the entrypoint alias:
.. code-block::
$ composer_collect_env
Or manually as a standalone script:
.. code-block::
$ python composer/utils/collect_env.py
To generate a system report from within a user application see :func:`print_env`.
A custom excepthook wrapper is also provided which extends the original :func:`sys.excepthook`
to automatically collect system information when an exception is raised.
To override the original :func:`sys.excepthook` see :func:`configure_excepthook`.
By default, the Composer custom ``excepthook`` automatically generates the environment report.
To disable automatic environment report generation, use the :func:`disable_env_report` helper
function. Report generation can be re-enabled by using the :func:`enable_env_report` function.
"""
import functools
import json
import sys
import time
from typing import NamedTuple, Optional, TextIO
import cpuinfo
import importlib_metadata
import psutil
from composer.utils.misc import is_notebook
__all__ = ['configure_excepthook', 'disable_env_report', 'enable_env_report', 'print_env', 'get_composer_env_dict']
# Check if PyTorch is installed
try:
import torch.utils.collect_env as torchenv
from torch.cuda import device_count as cuda_device_count
from torch.cuda import get_device_name as accel_device_name
from torch.cuda import is_available as cuda_available
TORCH_AVAILABLE = True
except (ImportError,):
TORCH_AVAILABLE = False
# Check if Composer is installed
try:
import composer
from composer.utils import dist
COMPOSER_AVAILABLE = True
except (ImportError,):
COMPOSER_AVAILABLE = False
# Check if we're running in a notebook
IPYTHON_AVAILABLE = is_notebook()
if IPYTHON_AVAILABLE:
from composer.utils.import_helpers import import_object
get_ipython = import_object('IPython:get_ipython')
nb = get_ipython()
# Place to keep track of the original excepthook
_orig_excepthook = None
# Track if excepthook was previously registered, needed for indempotency
_EXCEPTHOOK_REGISTERED = False
# Track if environment report generation on exception is enabled, enabled by default
_ENV_EXCEPTION_REPORT = True
# Same convention as Torch collect_env, create a namedtuple to track collected fields
class ComposerEnv(NamedTuple):
composer_version: str
composer_commit_hash: Optional[str]
node_world_size: int
host_processor_model_name: str
host_processor_core_count: int
local_world_size: int
accelerator_model_name: str
cuda_device_count: int
def get_composer_commit_hash() -> Optional[str]:
# Use PEP-610 to get the commit hash
# See https://packaging.python.org/en/latest/specifications/direct-url/
# Try both package names that Composer is released under
try:
files = importlib_metadata.files('mosaicml')
except importlib_metadata.PackageNotFoundError:
try:
files = importlib_metadata.files('composer')
except importlib_metadata.PackageNotFoundError:
return
if files is None:
return
files = [f for f in files if str(f).endswith('direct_url.json')]
if len(files) == 0:
return
f = files[0]
direct_url = json.loads(f.read_text())
vcs_info = direct_url.get('vcs_info', {})
commit_id = vcs_info.get('commit_id')
return commit_id
# Helper functions to get Composer environment information
def get_composer_version() -> str:
"""Query the Composer version."""
return str(composer.__version__)
@functools.lru_cache(maxsize=1)
def get_host_processor_name() -> str:
"""Query the host processor name."""
cpu_info = cpuinfo.get_cpu_info()
return str(cpu_info.get('brand_raw', 'CPU'))
def get_host_processor_cores() -> int:
"""Determines the number of physical host processor cores."""
return psutil.cpu_count(logical=False)
def get_node_world_size() -> int:
"""Query the number of nodes."""
return int(dist.get_world_size() / dist.get_local_world_size())
def get_accel_model_name() -> str:
"""Query the accelerator name."""
return accel_device_name(None) if cuda_available() else 'N/A'
def get_local_world_size() -> int:
"""Determines the number of accelerators per node."""
return dist.get_local_world_size() if cuda_available() else 0
def get_cuda_device_count() -> int:
"""Get the number of CUDA devices on the system."""
return cuda_device_count() if TORCH_AVAILABLE else 0
# Exception message and environment report
COMPOSER_OPEN_ISSUE_URL = 'https://github.com/mosaicml/composer/issues/new/choose'
def _exc_report(exc_type) -> None:
"""Produces exception report (exception message + environment report).
Args:
exc_type (Exception): Type of exception.
"""
EXCEPTION_MSG = f'Bugs can be reported at: {COMPOSER_OPEN_ISSUE_URL}\n'
# Don't print exception report for KeyboardInterrupt
if not issubclass(exc_type, KeyboardInterrupt):
if issubclass(exc_type, AssertionError):
EXCEPTION_SEV_MSG = f'This is a Composer bug. Please submit a bug report.\n'
elif issubclass(exc_type, RuntimeError):
EXCEPTION_SEV_MSG = f'This could be due to user error but is most likely a Composer bug.\n'
elif issubclass(exc_type, ValueError) or issubclass(exc_type, TypeError):
EXCEPTION_SEV_MSG = f'This was most likely due to user error but please submit a bug report if you suspect a Composer issue.\n'
else:
EXCEPTION_SEV_MSG = f'If you would like support debugging, submit a bug report or reach out to us on our community channels.\n'
print('\n-------------------\n' + EXCEPTION_SEV_MSG + EXCEPTION_MSG, file=sys.stderr)
# Only print environment report if enabled
global _ENV_EXCEPTION_REPORT
if _ENV_EXCEPTION_REPORT:
print('Please include details on how to reproduce the issue and attach the following environment report:\n')
print_env(sys.stderr)
else:
print(
"Please run the \'composer_collect_env\' utility and include your environment information with the bug report\n"
)
def enable_env_report() -> None:
"""Enable environment report generation on exception."""
global _ENV_EXCEPTION_REPORT
_ENV_EXCEPTION_REPORT = True
def disable_env_report() -> None:
"""Disable environment report generation on exception."""
global _ENV_EXCEPTION_REPORT
_ENV_EXCEPTION_REPORT = False
# Excepthook wrapper, wraps default excepthook and prints env info
def _custom_exception_handler(type, value, tb) -> None:
"""Custom exception wrapper for sys.excepthook."""
_exc_report(exc_type=type)
assert _orig_excepthook
_orig_excepthook(type, value, tb)
# Custom exception handler for IPython notebooks
def _nb_custom_exception_handler(self, type, value, tb, tb_offset=None):
"""Custom exception handler for IPython."""
_exc_report(exc_type=type)
self.showtraceback((type, value, tb), tb_offset=tb_offset) # standard IPython's printout
# Public function to register excethook wrapper
def configure_excepthook() -> None:
"""Collect and print system information when :func:`sys.excepthook` is called.
The custom exception handler causes an exception message to be printed when :func:`sys.excepthook`
is called. The exception message provides the user with information on the nature of the exception
and directs the user to file GitHub issues as appropriate.
By default, the custom exception handler also generates an environment report users can attach to
bug reports. Environment report generation can be optionally enabled/disabled by using the
:func:`enable_env_report` and :func:`disable_env_report` helper functions, respectively.
Additioanlly, the custom exceptionhook checks if the user is running from an IPython session and
sets up the custom exception handler accordingly.
To override the default :func:`sys.excepthook` with the custom except hook:
.. testsetup:: composer.utils.collect_env.configure_excepthook
import sys
from composer.utils.collect_env import configure_excepthook
import composer.utils.collect_env as ce
sys.excepthook = sys.__excepthook__
ce._EXCEPTHOOK_REGISTERED = False
.. doctest:: composer.utils.collect_env.configure_excepthook
>>> configure_excepthook()
>>> sys.excepthook
<function _custom_exception_handler at ...>
"""
global _EXCEPTHOOK_REGISTERED
# Needs to be indempotent across multiple trainers, don't register if we've already registered
if not _EXCEPTHOOK_REGISTERED:
# Custom exceptions work differntly in notebooks
if IPYTHON_AVAILABLE:
# Set custom handler on Exception base class to apply to all exceptions
nb.set_custom_exc((Exception,), _nb_custom_exception_handler)
else:
# Save original excepthook and override
global _orig_excepthook
_orig_excepthook = sys.excepthook
sys.excepthook = _custom_exception_handler
_EXCEPTHOOK_REGISTERED = True
# Get Torch environment info
def get_torch_env() -> str:
"""Query Torch system environment via :mod:`torch.utils.collect_env`."""
return torchenv.get_pretty_env_info()
# Composer environment information string output format
_COMPOSER_ENV_INFO_FORMAT = """
Composer version: {composer_version}
Composer commit hash: {composer_commit_hash}
Host processor model name: {host_processor_model_name}
Host processor core count: {host_processor_core_count}
Number of nodes: {node_world_size}
Accelerator model name: {accelerator_model_name}
Accelerators per node: {local_world_size}
CUDA Device Count: {cuda_device_count}
""".strip()
# Get composer environment info as a dictionary
def get_composer_env_dict() -> dict:
"""Query Composer pertinent system information as a dict."""
mutable_dict = ComposerEnv(
composer_version=get_composer_version(),
composer_commit_hash=get_composer_commit_hash(),
host_processor_model_name=get_host_processor_name(),
host_processor_core_count=get_host_processor_cores(),
node_world_size=get_node_world_size(),
accelerator_model_name=get_accel_model_name(),
local_world_size=get_local_world_size(),
cuda_device_count=get_cuda_device_count(),
)._asdict()
return mutable_dict
# Get Composer environment info
def get_composer_env() -> str:
"""Query Composer pertinent system information."""
mutable_dict = get_composer_env_dict()
return _COMPOSER_ENV_INFO_FORMAT.format(**mutable_dict)
# Generate and print environment report
def print_env(file: Optional[TextIO] = None) -> None:
"""Generate system information report.
Example:
.. code-block:: python
from composer.utils.collect_env import print_env
print_env()
Sample Report:
.. code-block:: text
---------------------------------
System Environment Report
Created: 2022-04-27 00:25:33 UTC
---------------------------------
PyTorch information
-------------------
PyTorch version: 1-91+cu111
Is debug build: False
CUDA used to build PyTorch: 111
ROCM used to build PyTorch: N/A
OS: Ubuntu 18.04.6 LTS (x86_64)
GCC version: (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0
Clang version: Could not collect
CMake version: version 3.10.2
Libc version: glibc-2.27
Python version: 3.8 (64-bit runtime)
Python platform: Linux-5.8.0-63-generic-x86_64-with-glibc2.27
Is CUDA available: True
CUDA runtime version: 11.1.105
GPU models and configuration:
GPU 0: NVIDIA GeForce RTX 3080
GPU 1: NVIDIA GeForce RTX 3080
GPU 2: NVIDIA GeForce RTX 3080
GPU 3: NVIDIA GeForce RTX 3080
Nvidia driver version: 470.57.02
cuDNN version: Probably one of the following:
/usr/lib/x86_64-linux-gnu/libcudnn.so.8.0.5
/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.0.5
/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.0.5
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.0.5
/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.0.5
/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.0.5
/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.0.5
HIP runtime version: N/A
MIOpen runtime version: N/A
Versions of relevant libraries:
[pip3] numpy==1.22.3
[pip3] pytorch-ranger==0.1.1
[pip3] torch==1.9.1+cu111
[pip3] torch-optimizer==0.1.0
[pip3] torchmetrics==0.7.3
[pip3] torchvision==0.10.1+cu111
[pip3] vit-pytorch==0.27.0
[conda] Could not collect
Composer information
--------------------
Composer version: 0.8.2
Composer commit hash: 9e14a47562def0baa414242c36954eb3083dcd46
Host processor model name: AMD EPYC 7502 32-Core Processor
Host processor core count: 64
Number of nodes: 1
Accelerator model name: NVIDIA GeForce RTX 3080
Accelerators per node: 1
CUDA Device Count: 4
Args:
file (TextIO, optional): File handle, `sys.stdout` or `sys.stderr`. Defaults to `sys.stdout`.
"""
# Set stdout during runtime if no output file is specified
if file is None:
file = sys.stdout
# Creation timestamp for report
creation_time = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(time.time()))
report_hdr = f'---------------------------------\n' + \
f'System Environment Report \n' + \
f'Created: {creation_time}\n' + \
f'---------------------------------\n'
# Torch section
print(report_hdr, file=file)
print('PyTorch information', file=file)
print('-------------------', file=file)
if TORCH_AVAILABLE:
# Only print Torch system info if installed
print(get_torch_env() + '\n\n', file=file)
else:
print('Torch not installed', file=file)
print('Composer information', file=file)
print('--------------------', file=file)
if COMPOSER_AVAILABLE:
# Only print Composer system info if installed
print(get_composer_env() + '\n\n', file=file)
else:
print('Composer not installed', file=file)
# Invoke as standalone CLI script
def main() -> None:
print('Collecting system information...')
print_env()
if __name__ == '__main__':
sys.exit(main())
| composer-dev | composer/utils/collect_env.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper utilities for configuring deterministic training to ensure reproducibility.
.. note::
For deterministic model initialization, :func:`~.seed_all` and/or
:func:`~.configure_deterministic_mode` should be
invoked before creating and initializing a model, before creating the :class:`~.Trainer`.
For example:
.. testsetup::
import functools
import torch.nn
import warnings
warnings.filterwarnings(action="ignore", message="Deterministic mode is activated.")
MyModel = Model
.. doctest::
>>> import torch.nn
>>> from composer.utils import reproducibility
>>> reproducibility.configure_deterministic_mode()
>>> reproducibility.seed_all(42)
>>> model = MyModel()
>>> def init_weights(m):
... if isinstance(m, torch.nn.Linear):
... torch.nn.init.xavier_uniform(m.weight)
>>> # model will now be deterministically initialized, since the seed is set.
>>> init_weights(model)
>>> trainer = Trainer(model=model, seed=42)
Note that the seed must also be passed to the Trainer, otherwise the Trainer
would generate a random seed based on the timestamp (see :func:`~.get_random_seed`).
.. testcleanup::
warnings.resetwarnings()
Attributes:
MAX_SEED (int): The maximum allowed seed, which is :math:`2^{32} - 1`.
"""
from __future__ import annotations
import logging
import os
import random
import textwrap
import time
import warnings
from typing import Any, Dict, List
import numpy as np
import torch
import torch.backends.cudnn
from composer.utils import dist
__all__ = [
'configure_deterministic_mode',
'get_random_seed',
'seed_all',
'get_rng_state',
'load_rng_state',
'MAX_SEED',
]
log = logging.getLogger(__name__)
# seeds must be 32-bit unsigned integers
MAX_SEED = 2**32 - 1
def configure_deterministic_mode():
"""Configure PyTorch deterministic mode.
.. note::
When using the :class:`.Trainer`, you can use the ``deterministic_mode`` flag
instead of invoking this function directly.
For example:
.. testsetup::
import warnings
warnings.filterwarnings(action="ignore", message="Deterministic mode is activated.")
.. doctest::
>>> trainer = Trainer(deterministic_mode=True)
.. testcleanup::
warnings.resetwarnings()
However, to configure deterministic mode for operations before the trainer is initialized, manually invoke this
function at the beginning of your training script.
.. note::
When training on a GPU, this function must be invoked before any CUDA operations.
.. note::
Deterministic mode degrades performance. Do not use outside of testing and debugging.
"""
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# See https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
# and https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
warnings.warn('Deterministic mode is activated. This will negatively impact performance.', category=UserWarning)
def get_random_seed() -> int:
"""Get a randomly created seed to use for seeding rng objects.
.. warning::
This random seed is NOT cryptographically secure.
Returns:
int: A random seed.
"""
rng = random.Random(int(time.time_ns())) # get a new RNG does not respect the current seed
seed = rng.randint(0, MAX_SEED)
assert seed >= 0 and seed <= MAX_SEED, 'seed should be on this range'
return seed
def seed_all(seed: int):
"""Seed all rng objects.
.. note::
When using the :class:`.Trainer`, you can use the ``seed`` parameter
instead of invoking this function directly.
For example:
.. doctest::
>>> trainer = Trainer(seed=42)
However, to configure the random seed for operations before the trainer is initialized, manually invoke this
function at the beginning of your training script.
Args:
seed (int): The random seed
"""
if seed < 0 or seed > MAX_SEED:
raise ValueError(f'Seed {seed} is invalid. It must be on [0; 2^32 - 1]')
log.info('Setting seed to %d', seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.manual_seed may call manual_seed_all but calling it again here
# to make sure it gets called at least once
torch.cuda.manual_seed_all(seed)
def get_rng_state() -> List[Dict[str, Any]]:
"""The state of the RNG objects.
Returns:
List[Dict[str, Any]]: A list of RNG State Dicts, indexed by global rank.
"""
rng_state = {
'python': random.getstate(),
'numpy': np.random.get_state(),
'torch': torch.random.get_rng_state(),
}
if torch.cuda.is_available() and torch.cuda.is_initialized():
# This will not be compatible with model parallelism
rng_state['cuda'] = torch.cuda.get_rng_state()
return dist.all_gather_object(rng_state)
def load_rng_state(rng_state_dicts: List[Dict[str, Any]]):
"""Restore the RNG state.
Args:
rng_state_dicts (List[Dict[str, Any]]): The list of RNG state dicts to restore,
as returned by :func:`get_rng_state`.
"""
if dist.get_world_size() > len(rng_state_dicts):
warnings.warn(
textwrap.dedent(f"""\
The current world size ({dist.get_world_size()} is greater than the number of RNG state(s) serialized
({len(rng_state_dicts)}). Only the first {len(rng_state_dicts)} rank(s) will have their RNG restored.
"""))
if dist.get_world_size() < len(rng_state_dicts):
warnings.warn(
textwrap.dedent(f"""\
The current world size ({dist.get_world_size()} is less than the number of RNG state(s) serialized
({len(rng_state_dicts)}). Only the first {dist.get_world_size()} RNG state(s) will be consumed;
the remaining will be ignored."""))
if dist.get_global_rank() < len(rng_state_dicts):
rng_state_dict = rng_state_dicts[dist.get_global_rank()]
torch.set_rng_state(rng_state_dict['torch'])
random.setstate(rng_state_dict['python'])
np.random.set_state(rng_state_dict['numpy'])
is_cuda_available = torch.cuda.is_available() and torch.cuda.is_initialized()
has_cuda_rng_state = 'cuda' in rng_state_dict
log.debug('Restoring the RNG state')
if is_cuda_available and has_cuda_rng_state:
torch.cuda.set_rng_state(rng_state_dict['cuda'])
if is_cuda_available and not has_cuda_rng_state:
warnings.warn(
textwrap.dedent(f"""\
The checkpoint did not include the CUDA RNG state. The CUDA RNG will have a
non-deterministic state."""))
if not is_cuda_available and has_cuda_rng_state:
warnings.warn(
textwrap.dedent(f"""\
The checkpoint included CUDA RNG state, but CUDA is not being used.
As such, the CUDA RNG state will be ignored."""))
| composer-dev | composer/utils/reproducibility.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper utilities."""
from composer.utils.auto_log_hparams import (convert_flat_dict_to_nested_dict, convert_nested_dict_to_flat_dict,
extract_hparams)
from composer.utils.batch_helpers import batch_get, batch_set
from composer.utils.checkpoint import PartialFilePath, load_checkpoint, safe_torch_load, save_checkpoint
from composer.utils.collect_env import (configure_excepthook, disable_env_report, enable_env_report,
get_composer_env_dict, print_env)
from composer.utils.device import get_device, is_tpu_installed
from composer.utils.file_helpers import (FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, FORMAT_NAME_WITH_DIST_TABLE,
create_symlink_file, ensure_folder_has_no_conflicting_files,
ensure_folder_is_empty, format_name_with_dist, format_name_with_dist_and_time,
get_file, is_tar, maybe_create_object_store_from_uri,
maybe_create_remote_uploader_downloader_from_uri, parse_uri)
from composer.utils.import_helpers import MissingConditionalImportError, import_object
from composer.utils.inference import ExportFormat, Transform, export_for_inference, export_with_logger, quantize_dynamic
from composer.utils.iter_helpers import IteratorFileStream, ensure_tuple, map_collection
from composer.utils.misc import (get_free_tcp_port, is_model_deepspeed, is_model_fsdp, is_notebook, model_eval_mode,
using_torch_2_0)
from composer.utils.object_store import (LibcloudObjectStore, ObjectStore, ObjectStoreTransientError, OCIObjectStore,
S3ObjectStore, SFTPObjectStore)
from composer.utils.retrying import retry
from composer.utils.string_enum import StringEnum
__all__ = [
'ensure_tuple',
'get_free_tcp_port',
'map_collection',
'IteratorFileStream',
'FORMAT_NAME_WITH_DIST_AND_TIME_TABLE',
'FORMAT_NAME_WITH_DIST_TABLE',
'get_file',
'PartialFilePath',
'create_symlink_file',
'ObjectStore',
'ObjectStoreTransientError',
'LibcloudObjectStore',
'S3ObjectStore',
'SFTPObjectStore',
'OCIObjectStore',
'MissingConditionalImportError',
'import_object',
'is_model_deepspeed',
'is_model_fsdp',
'is_notebook',
'StringEnum',
'load_checkpoint',
'save_checkpoint',
'safe_torch_load',
'ensure_folder_is_empty',
'ensure_folder_has_no_conflicting_files',
'export_for_inference',
'export_with_logger',
'quantize_dynamic',
'format_name_with_dist',
'format_name_with_dist_and_time',
'is_tar',
'maybe_create_object_store_from_uri',
'maybe_create_remote_uploader_downloader_from_uri',
'parse_uri',
'batch_get',
'batch_set',
'configure_excepthook',
'disable_env_report',
'enable_env_report',
'print_env',
'get_composer_env_dict',
'retry',
'model_eval_mode',
'get_device',
'is_tpu_installed',
'ExportFormat',
'Transform',
'export_with_logger',
'extract_hparams',
'convert_nested_dict_to_flat_dict',
'convert_flat_dict_to_nested_dict',
'using_torch_2_0',
]
| composer-dev | composer/utils/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Contains helper functions for auto-logging hparams."""
from typing import Any, Dict, List, Tuple
__all__ = ['extract_hparams', 'convert_nested_dict_to_flat_dict', 'convert_flat_dict_to_nested_dict']
def extract_hparams(locals_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Takes in local symbol table and recursively grabs any hyperparameter.
Args:
locals_dict (Dict[str, Any]): The local symbol table returned when calling locals(),
which maps any free local variables' names to their values.
Returns:
Dict[str, Any]: A nested dictionary with every element of locals_dict mapped to its
value or to another sub_dict.
"""
hparams = {}
for k, v in locals_dict.items():
if k.startswith('_') or k == 'self':
continue
hparams_to_add = _grab_hparams(v)
hparams[k] = hparams_to_add
return hparams
def _grab_hparams(obj) -> Any:
"""Helper function parses objects for their hyperparameters going only one level deep."""
# If the object has already grabbed its hyperparameters (it calls extract_hparams inside __init__)
# then parse hparams attribute (which is a dict) and name those sub-hyperparameters
if hasattr(obj, 'local_hparams'):
return {obj.__class__.__name__: obj.local_hparams}
elif isinstance(obj, List) or isinstance(obj, Tuple):
return [_get_obj_repr(sub_obj) for sub_obj in obj]
elif isinstance(obj, Dict):
return {k: _get_obj_repr(sub_obj) for k, sub_obj in obj.items()}
else:
return _get_obj_repr(obj)
def _get_obj_repr(obj: Any):
"""Returns best representation of object.
Args:
obj (Any): the object.
Returns:
obj if obj is None or it is a int, float, str, bool type. Otherwise
returns obj.__class__.__name__.
"""
if any([isinstance(obj, type_) for type_ in [int, float, str, bool]]) or obj is None:
return obj
else:
return obj.__class__.__name__
def convert_nested_dict_to_flat_dict(nested_dict: Dict, prefix='') -> Dict:
"""Takes in a nested dict converts it to a flat dict with keys separated by slashes.
Args:
nested_dict (Dict): A dictionary containing at least one other dictionary.
prefix (str, optional): A prefix to left append to the keys in the dictionary.
'Defaults to ''.
Returns:
Dict: A flat dictionary representation of the nested one (contains no other
dictionaries inside of it)
"""
flat_dict = {}
for k, v in nested_dict.items():
key = prefix + '/' + k if prefix != '' else k
# Recursively crawl sub-dictionary.
if isinstance(v, dict):
sub_flat_dict = convert_nested_dict_to_flat_dict(prefix=key, nested_dict=v)
flat_dict.update(sub_flat_dict)
else:
flat_dict[key] = v
return flat_dict
def convert_flat_dict_to_nested_dict(flat_dict: Dict) -> Dict:
"""Converts flat dictionary separated by slashes to nested dictionary.
Args:
flat_dict (Dict): flat dictionary containing no sub-dictionary with keys
separated by slashes. e.g. {'a':1, 'b/c':2}
Returns:
Dict: a nested dict.
"""
nested_dict = {}
for k, v in flat_dict.items():
# Initially sub_dict is the main nested_dict, but we will continually update it to be the
# sub-dictionary of sub_dict.
sub_dict = nested_dict
sub_keys = k.split('/')
for sub_key in sub_keys[:-1]:
if sub_key not in sub_dict:
# Create a new sub-dictionary inside of sub_dict.
sub_dict[sub_key] = {}
# Change the sub_dict reference to be the sub-dictionary of sub_dict (i.e. go one level deeper).
sub_dict = sub_dict[sub_key]
# The last key in sub_keys does not map to a dict. It just maps to v.
sub_dict[sub_keys[-1]] = v
# Changes to sub_dict will be reflected in nested_dict, so we can just return nested_dict.
return nested_dict
| composer-dev | composer/utils/auto_log_hparams.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""FX-based model transformation and optimization.
Provides utilities to do FX-based model transformations.
"""
import logging
import operator
import re
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch.fx import GraphModule, Node
from torch.fx.passes.split_utils import split_by_tags
from composer.algorithms.stochastic_depth.stochastic_layers import BlockStochasticModule
from composer.utils import ensure_tuple
log = logging.getLogger(__name__)
__all__ = ['count_op_instances', 'replace_op', 'fuse_parallel_linears', 'apply_stochastic_residual']
def count_op_instances(gm: GraphModule, ops: Union[Callable, str, List[Union[Callable, str]]]) -> int:
"""Counts the number of instances of ``op`` in ``gm``.
.. rubric:: Example
.. testsetup::
import operator
import torch
from torch.fx import symbolic_trace
from composer.utils.fx_utils import count_op_instances
.. doctest::
>>> class M(torch.nn.Module):
... def forward(self, x, y):
... return x + y, torch.add(x, y), x.add(y)
>>> module = M()
>>> traced = symbolic_trace(module)
>>> count_op_instances(traced, torch.add)
1
>>> count_op_instances(traced, [operator.add, torch.add, "add"])
3
Arguments:
module (GraphModule): The source FX-traced graph.
op (Union[Callable, str, List[Union[Callable, str]]]):
The operations to count.
Returns:
int: The number of instances of ``ops`` in ``gm``
"""
ops = list(ensure_tuple(ops))
all_modules = dict(gm.named_modules())
count = 0
for n in gm.graph.nodes:
for op in ops:
if n.target == op:
count += 1
elif n.op == 'call_module' and isinstance(op, type) and isinstance(all_modules[n.target], op):
count += 1
return count
def replace_op(gm: GraphModule, src_ops: Union[Callable, str, List[Union[Callable, str]]],
tgt_op: Callable[..., Any]) -> GraphModule:
"""Replace a single operator, torch method or function with another.
.. rubric:: Example
.. testsetup::
import operator
import torch
from torch.fx import symbolic_trace
from composer.utils.fx_utils import replace_op, count_op_instances
.. doctest::
>>> class M(torch.nn.Module):
... def forward(self, x, y):
... return x + y, torch.add(x, y), x.add(y)
>>> module = M()
>>> traced = symbolic_trace(module)
>>> traced = replace_op(traced, [operator.add, torch.add, "add"], torch.mul)
>>> count_op_instances(traced, torch.mul)
3
Arguments:
module (GraphModule): The source FX-traced graph.
src_ops (Union[Callable, str, List[Union[Callable, str]]):
Replace these operations.
tgt_op (Callable): Replacement for the operations
Returns:
GraphModule: Modified GraphModule with each instance of an op in ``src_ops`` replaced with
``tgt_op``. Returns the input if no instances are found.
"""
src_ops = list(ensure_tuple(src_ops))
for n in gm.graph.nodes:
if any(n.target == op for op in src_ops):
with gm.graph.inserting_after(n):
new_node = gm.graph.call_function(tgt_op, n.args, n.kwargs)
n.replace_all_uses_with(new_node)
gm.graph.erase_node(n)
gm.recompile()
return gm
def _get_ancestors(node: Node) -> List[Node]:
ancestorNodes = []
while node.op != 'placeholder':
ancestorNodes.append(node)
node = node.all_input_nodes[0]
return ancestorNodes
def _get_residual_block_nodes(nodeLHS: Node, nodeRHS: Node) -> Tuple[List[Node], List[Node]]:
"""Walk backwards from nodeLHS and nodeRSH to the root and construct lists of their parents.
Arguments:
nodeLHS (Node): left-hand side node for a binary operator
nodeRHS (Node): right-hand side node for a binary operator
Returns:
(lhsAncestors, rhsAncestors): Two lists of nodes containing ancestors for ``nodeLHS`` and ``nodeRHS`` with
their common ancestors removed.
"""
lhsAncestors = _get_ancestors(nodeLHS)
rhsAncestors = _get_ancestors(nodeRHS)
# Iterate from back and eliminate common nodes
while lhsAncestors and rhsAncestors and lhsAncestors[-1] == rhsAncestors[-1]:
lhsAncestors.pop()
rhsAncestors.pop()
lhsAncestors.reverse()
rhsAncestors.reverse()
return lhsAncestors, rhsAncestors
def _attach_tag(nodes: List[Node], tag: str):
"""Attach tag to the given nodes for the splitter."""
for node in nodes:
node.tag = tag # type: ignore[attr-defined]
def _tag_residual_nodes(gm: GraphModule) -> Tuple[List[str], int]:
"""Tag nodes for splitting."""
# all nodes that are not a part of the residual blocks are tagged with "mainN_{count}".
# a tag is required for all nodes by split_by_tags
# Also an earlier tag can be repeated for later nodes.
count = 0
all_tags = []
# In this pass over all nodes, we just tag them
for node in gm.graph.nodes:
default_tag = f'mainN_{count}'
node.tag = default_tag
if default_tag not in all_tags:
all_tags.append(default_tag)
if node.op == 'call_function' and node.target in [torch.add, operator.add]:
assert len(node.all_input_nodes) == 2
node0, node1 = node.all_input_nodes[0], node.all_input_nodes[1]
lhs_nodes, rhs_nodes = _get_residual_block_nodes(node0, node1)
if lhs_nodes or rhs_nodes:
if len(lhs_nodes):
_attach_tag(lhs_nodes, f'non_res_{count}')
all_tags.append(f'non_res_{count}')
if len(rhs_nodes):
_attach_tag(rhs_nodes, f'residual_{count}')
all_tags.append(f'residual_{count}')
add_tag = f'addN_{count}'
if add_tag not in all_tags:
all_tags.append(add_tag)
node.tag = add_tag
count += 1
return all_tags, count
def _get_residual_modules(gm: GraphModule, node: Node) -> Tuple[Optional[GraphModule], Optional[GraphModule], int]:
"""Returns GraphModules for the main and residual branches.
node.op is assumed to be a call_module
"""
pattern = re.compile(r'non_res_(\d+)|residual_(\d+)')
matches = pattern.match(str(node.target))
if matches:
idx = int(matches[1]) if matches[1] else int(matches[2])
main_submod = getattr(gm, f'non_res_{idx}')
residual_submod = getattr(gm, f'residual_{idx}', None)
return main_submod, residual_submod, idx
else:
return None, None, 0
def _replace_residual_pattern(gm: GraphModule,
original_node: Node,
replacement_module: str,
has_residual_ops: bool = False) -> None:
"""Replaces main, residual and add_node with the ``replacement_module``.
``replacement_module`` is already added to the gm.
"""
insert_node = original_node.prev
add_node = original_node.next
if has_residual_ops:
add_node = original_node.next.next
with gm.graph.inserting_after(insert_node):
new_node = gm.graph.call_module(replacement_module, args=(insert_node,)) # type: ignore
add_node.replace_all_uses_with(new_node)
gm.graph.erase_node(add_node)
if has_residual_ops:
gm.graph.erase_node(original_node.next)
gm.graph.erase_node(original_node)
gm.graph.lint()
def apply_stochastic_residual(gm: GraphModule, drop_rate: float = 0.2) -> Tuple[GraphModule, int]:
"""Detect and replace residual pattern with their stochastic equivalent.
Arguments:
gm (GraphModule): The source FX-traced graph. It can be the whole model symbolically traced.
Returns:
GraphModule: Modified GraphModule that has stochastic residual connections.
"""
if not isinstance(gm, GraphModule):
raise ValueError(
f'Input to apply_stochastic_residual should be an instance of GraphModule. Received {type(gm)}')
all_tags, count = _tag_residual_nodes(gm)
split_gm = split_by_tags(gm, all_tags)
for node in split_gm.graph.nodes:
if node.op != 'call_module':
continue
main_submod, residual_submod, idx = _get_residual_modules(split_gm, node)
if main_submod:
residual_st_instance = BlockStochasticModule(main_submod, residual_submod, drop_rate)
split_gm.add_submodule(f'resi_st_{idx}', residual_st_instance) # type: ignore
_replace_residual_pattern(split_gm, node, f'resi_st_{idx}', residual_submod is not None)
split_gm.recompile()
return split_gm, count
def _can_linears_be_fused(linear_nodes: List[Node], all_modules: Mapping[str, nn.Module]) -> bool:
"""Check if all the linears have bias."""
# Forcing node.target to str is fine here as we are dealing with nn.Modules
# and their target is a str.
bias = all_modules[str(linear_nodes[0].target)].bias is None
return all(bias == (all_modules[str(node.target)].bias is None) for node in linear_nodes)
def _create_fused_linear(linear_nodes: List[Node],
all_modules: Mapping[str, nn.Module],
keep_weights: bool = False) -> Tuple[nn.Module, List[int]]:
"""Check if the linears can be fused.
If the linears can be fused, create a fused nn.Linear instance and return it.
"""
if keep_weights:
raise NotImplementedError('This feature is currently not implemented.')
assert len(linear_nodes) > 1, 'There should be at least 2 linears for fusion'
out_features = []
in_features = all_modules[str(linear_nodes[0].target)].in_features
bias = all_modules[str(linear_nodes[0].target)].bias is not None
for node in linear_nodes:
out_features.append(all_modules[str(node.target)].out_features)
assert in_features == all_modules[str(node.target)].in_features, 'mismatch in number of input features'
assert bias == (all_modules[str(node.target)].bias is not None), 'mismatch in bias'
return nn.Linear(in_features, sum(out_features), bias=bias), out_features # type: ignore
def fuse_parallel_linears(gm: GraphModule, keep_weights: bool = False) -> GraphModule:
"""If there are parallel linears in the model, fuse them together.
.. rubric:: Example
.. testsetup::
import torch
import torch.nn as nn
from torch.fx import symbolic_trace
from composer.utils.fx_utils import count_op_instances, fuse_parallel_linears
.. doctest::
>>> class M(nn.Module):
... def __init__(self):
... super().__init__()
... self.fc1 = nn.Linear(64, 64)
... self.fc2 = nn.Linear(64, 64)
... def forward(self, x):
... y = self.fc1(x)
... z = self.fc2(x)
... return y + z
>>> module = M()
>>> traced = symbolic_trace(module)
>>> count_op_instances(traced, nn.Linear)
2
>>> gm = fuse_parallel_linears(traced)
>>> count_op_instances(traced, nn.Linear)
1
Arguments:
gm (GraphModule): The source FX-traced graph.
Returns:
GraphModule: Modified GraphModule with parallel linears fused.
"""
all_modules: Dict[str, nn.Module] = dict(gm.named_modules())
fused_count = 0
for node in gm.graph.nodes:
# There could be more than two parallel linears
linears_to_fuse = []
# Check all the users of current node and collect all linear layers
for user in list(node.users):
if user.op == 'call_module' and isinstance(all_modules[user.target], nn.Linear):
linears_to_fuse.append(user)
# Fuse if there are more than 1 parallel linear layers
if len(linears_to_fuse) > 1 and _can_linears_be_fused(linears_to_fuse, all_modules):
lin, out_features = _create_fused_linear(linears_to_fuse, all_modules, keep_weights)
gm.add_submodule(f'fused_linear_{fused_count}', lin) # type: ignore
with gm.graph.inserting_after(node):
fused_node = gm.graph.call_module(f'fused_linear_{fused_count}', args=(node,))
# insert the split node
with gm.graph.inserting_after(fused_node):
kwargs = {'split_size_or_sections': out_features, 'dim': -1}
split_node = gm.graph.call_function(torch.split, args=(fused_node,), kwargs=kwargs)
insert_point = split_node
for idx, lin_node in enumerate(linears_to_fuse):
with gm.graph.inserting_after(insert_point):
split_item = gm.graph.call_function(operator.getitem, (split_node, idx), {})
lin_node.replace_all_uses_with(split_item)
insert_point = split_item
gm.graph.erase_node(lin_node)
fused_count += 1
gm.graph.lint()
if fused_count > 0:
gm.recompile()
return gm
| composer-dev | composer/utils/fx_utils.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Inference-related utility functions for model export and optimizations.
Used for exporting models into various formats such ONNX, torchscript etc. and apply optimizations such as fusion.
"""
from __future__ import annotations
import contextlib
import copy
import functools
import logging
import os
import tempfile
from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
from composer.utils import dist
from composer.utils.checkpoint import download_checkpoint, safe_torch_load
from composer.utils.device import get_device
from composer.utils.iter_helpers import ensure_tuple
from composer.utils.misc import is_model_ddp, is_model_deepspeed, is_model_fsdp, model_eval_mode
from composer.utils.object_store import ObjectStore
from composer.utils.string_enum import StringEnum
if TYPE_CHECKING:
from composer.devices import Device
from composer.loggers import Logger
log = logging.getLogger(__name__)
__all__ = ['export_for_inference', 'ExportFormat', 'export_with_logger', 'quantize_dynamic']
Transform = Callable[[nn.Module], nn.Module]
# This is the most common way to use dynamic quantization.
# Example:
# from composer.utils import quantize_dynamic
# export_for_inference(
# ...
# transforms = [quantize_dynamic],
# ...
# )
# A user can always redefine it with extra options. This also serves as an example of what to pass to transforms.
quantize_dynamic = functools.partial(torch.quantization.quantize_dynamic, qconfig_spec={torch.nn.Linear})
class ExportFormat(StringEnum):
"""Enum class for the supported export formats.
Attributes:
torchscript: Export in "torchscript" format.
onnx: Export in "onnx" format.
"""
TORCHSCRIPT = 'torchscript'
ONNX = 'onnx'
def _move_sample_input_to_device(sample_input: Optional[Union[torch.Tensor, dict, list, Tuple]],
device: Device) -> Optional[Union[torch.Tensor, dict, list, Tuple]]:
"""Handle moving sample_input of various types to a device. If possible, avoids creating copies of the input."""
output = None
if isinstance(sample_input, torch.Tensor):
output = device.tensor_to_device(sample_input)
elif isinstance(sample_input, dict):
for key, value in sample_input.items():
sample_input[key] = _move_sample_input_to_device(value, device)
output = sample_input
elif isinstance(sample_input, list):
for i in range(len(sample_input)):
sample_input[i] = _move_sample_input_to_device(sample_input[i], device)
output = sample_input
elif isinstance(sample_input, tuple):
new_tuple = []
for tuple_item in sample_input:
new_tuple.append(_move_sample_input_to_device(tuple_item, device))
output = tuple(new_tuple)
return output
def export_for_inference(
model: nn.Module,
save_format: Union[str, ExportFormat],
save_path: str,
save_object_store: Optional[ObjectStore] = None,
sample_input: Optional[Any] = None,
dynamic_axes: Optional[Any] = None,
surgery_algs: Optional[Union[Callable[[nn.Module], nn.Module], Sequence[Callable[[nn.Module], nn.Module]]]] = None,
transforms: Optional[Sequence[Transform]] = None,
load_path: Optional[str] = None,
load_object_store: Optional[ObjectStore] = None,
load_strict: bool = False,
) -> None:
"""Export a model for inference.
Args:
model (nn.Module): An instance of nn.Module. Please note that model is not modified inplace.
Instead, export-related transformations are applied to a copy of the model.
save_format (Union[str, ExportFormat]): Format to export to. Either ``"torchscript"`` or ``"onnx"``.
save_path: (str): The path for storing the exported model. It can be a path to a file on the local disk,
a URL, or if ``save_object_store`` is set, the object name
in a cloud bucket. For example, ``my_run/exported_model``.
save_object_store (ObjectStore, optional): If the ``save_path`` is in an object name in a cloud bucket
(i.e. AWS S3 or Google Cloud Storage), an instance of
:class:`~.ObjectStore` which will be used
to store the exported model. Set this to ``None`` if ``save_path`` is a local filepath.
(default: ``None``)
sample_input (Any, optional): Example model inputs used for tracing. This is needed for "onnx" export.
The ``sample_input`` need not match the batch size you intend to use for inference. However, the model
should accept the ``sample_input`` as is. (default: ``None``)
dynamic_axes (Any, optional): Dictionary specifying the axes of input/output tensors as dynamic. May be required
for exporting models using older versions of PyTorch when types cannot be inferred.
surgery_algs (Union[Callable, Sequence[Callable]], optional): Algorithms that should be applied to the model
before loading a checkpoint. Each should be callable that takes a model and returns None.
``surgery_algs`` are applied before ``transforms``. (default: ``None``)
transforms (Sequence[Transform], optional): transformations (usually optimizations) that should
be applied to the model. Each Transform should be a callable that takes a model and returns a modified model.
``transforms`` are applied after ``surgery_algs``. (default: ``None``)
load_path (str): The path to an existing checkpoint file.
It can be a path to a file on the local disk, a URL, or if ``load_object_store`` is set, the object name
for a checkpoint in a cloud bucket. For example, run_name/checkpoints/ep0-ba4-rank0. (default: ``None``)
load_object_store (ObjectStore, optional): If the ``load_path`` is in an object name in a cloud bucket
(i.e. AWS S3 or Google Cloud Storage), an instance of
:class:`~.ObjectStore` which will be used to retreive the checkpoint.
Otherwise, if the checkpoint is a local filepath, set to ``None``. (default: ``None``)
load_strict (bool): Whether the keys (i.e., model parameter names) in the model state dict should
perfectly match the keys in the model instance. (default: ``False``)
Returns:
None
"""
save_format = ExportFormat(save_format)
if is_model_deepspeed(model):
raise ValueError(f'Exporting for deepspeed models is currently not supported.')
if is_model_ddp(model):
raise ValueError(
f'Directly exporting a DistributedDataParallel model is not supported. Export the module instead.')
if is_model_fsdp(model):
raise ValueError(
'Directly exporting a FSDP wrapped module is not supported as the model is deepcopied to avoid '
'side-effects, and FSDP does not support deepcopying. To export the model, load it without FSDP '
'wrapping.')
# Only rank0 exports the model
if dist.get_global_rank() != 0:
return
# Make a copy of the model so that we don't modify the original model
model = copy.deepcopy(model)
# Make a copy of the sample input so that we don't modify the original sample input
sample_input = copy.deepcopy(sample_input)
# Move model and sample input to CPU for export
cpu = get_device('cpu')
cpu.module_to_device(model)
if sample_input is not None:
sample_input = ensure_tuple(sample_input)
sample_input = _move_sample_input_to_device(sample_input, cpu)
# Apply surgery algorithms in the given order
for alg in ensure_tuple(surgery_algs):
alg(model)
if load_path is not None:
# download checkpoint and load weights only
log.debug('Loading checkpoint at %s', load_path)
with tempfile.TemporaryDirectory() as tempdir:
composer_states_filepath, _, _ = download_checkpoint(path=load_path,
node_checkpoint_folder=tempdir,
object_store=load_object_store,
progress_bar=True)
state_dict = safe_torch_load(composer_states_filepath)
missing_keys, unexpected_keys = model.load_state_dict(state_dict['state']['model'], strict=load_strict)
if len(missing_keys) > 0:
log.warning(f"Found these missing keys in the checkpoint: {', '.join(missing_keys)}")
if len(unexpected_keys) > 0:
log.warning(f"Found these unexpected keys in the checkpoint: {', '.join(unexpected_keys)}")
with model_eval_mode(model):
# Apply transformations (i.e., inference optimizations) in the given order
for transform in ensure_tuple(transforms):
model = transform(model)
is_remote_store = save_object_store is not None
tempdir_ctx = tempfile.TemporaryDirectory() if is_remote_store else contextlib.nullcontext(None)
with tempdir_ctx as tempdir:
if is_remote_store:
local_save_path = os.path.join(str(tempdir), 'model.export')
else:
local_save_path = save_path
if save_format == ExportFormat.TORCHSCRIPT:
export_model = None
try:
export_model = torch.jit.script(model)
except Exception:
if sample_input is not None:
log.warning('Scripting with torch.jit.script failed. Trying torch.jit.trace!',)
export_model = torch.jit.trace(model, sample_input)
else:
log.warning(
'Scripting with torch.jit.script failed and sample inputs are not provided for tracing '
'with torch.jit.trace',
exc_info=True)
if export_model is not None:
torch.jit.save(export_model, local_save_path)
else:
raise RuntimeError('Scritping and tracing failed! No model is getting exported.')
if save_format == ExportFormat.ONNX:
if sample_input is None:
raise ValueError(f'sample_input argument is required for onnx export')
input_names = []
# assert statement for pyright error: Cannot access member "keys" for type "Tensor"
assert isinstance(sample_input, tuple)
# Extract input names from sample_input if it contains dicts
for i in range(len(sample_input)):
if isinstance(sample_input[i], dict):
input_names += list(sample_input[i].keys())
# Default input name if no dict present
if input_names == []:
input_names = ['input']
torch.onnx.export(
model,
sample_input,
local_save_path,
input_names=input_names,
output_names=['output'],
dynamic_axes=dynamic_axes,
opset_version=13,
)
# upload if required.
if is_remote_store:
save_object_store.upload_object(save_path, local_save_path)
def export_with_logger(
model: nn.Module,
save_format: Union[str, ExportFormat],
save_path: str,
logger: Logger,
save_object_store: Optional[ObjectStore] = None,
sample_input: Optional[Any] = None,
transforms: Optional[Sequence[Transform]] = None,
) -> None:
"""Helper method for exporting a model for inference.
Exports the model to:
1) save_object_store, if one is provided,
2) logger.upload_file(save_path), if (1) does not apply and the logger has a destination that supports file uploading,
3) locally, if (1) and (2) do not apply.
Args:
model (nn.Module): An instance of nn.Module. Please note that model is not modified inplace.
Instead, export-related transformations are applied to a copy of the model.
save_format (Union[str, ExportFormat]): Format to export to. Either ``"torchscript"`` or ``"onnx"``.
save_path: (str): The path for storing the exported model. It can be a path to a file on the local disk,
a URL, or if ``save_object_store`` is set, the object name
in a cloud bucket. For example, ``my_run/exported_model``.
logger (Logger): If this logger has a destination that supports file uploading, and save_object_store
is not provided, this logger is used to export the model.
save_object_store (ObjectStore, optional): If the ``save_path`` is in an object name in a cloud bucket
(i.e. AWS S3 or Google Cloud Storage), an instance of
:class:`~.ObjectStore` which will be used
to store the exported model. Set this to ``None`` if the logger should be used to export the model or
if ``save_path`` is a local filepath.
(default: ``None``)
sample_input (Any, optional): Example model inputs used for tracing. This is needed for "onnx" export.
The ``sample_input`` need not match the batch size you intend to use for inference. However, the model
should accept the ``sample_input`` as is. (default: ``None``)
transforms (Sequence[Transform], optional): transformations (usually optimizations) that should
be applied to the model. Each Transform should be a callable that takes a model and returns a modified model.
``transforms`` are applied after ``surgery_algs``. (default: ``None``)
Returns:
None
"""
if save_object_store == None and logger.has_file_upload_destination():
with tempfile.TemporaryDirectory() as tmpdir:
temp_local_save_path = os.path.join(str(tmpdir), f'model')
export_for_inference(model=model,
save_format=save_format,
save_path=temp_local_save_path,
sample_input=sample_input,
transforms=transforms)
logger.upload_file(remote_file_name=save_path, file_path=temp_local_save_path)
else:
export_for_inference(model=model,
save_format=save_format,
save_path=save_path,
save_object_store=save_object_store,
sample_input=sample_input,
transforms=transforms)
| composer-dev | composer/utils/inference.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Base class for Enums containing string values."""
from __future__ import annotations
import textwrap
import warnings
from enum import Enum
class StringEnum(Enum):
"""Base class for Enums containing string values.
This class enforces that all keys are uppercase and all values are lowercase. It also offers
the following convenience features:
* ``StringEnum(value)`` will perform a case-insensitive match on both the keys and value,
and is a no-op if given an existing instance of the class.
.. testsetup::
import warnings
warnings.filterwarnings(action="ignore", message="Detected comparision between a string")
.. doctest::
>>> from composer.utils import StringEnum
>>> class MyStringEnum(StringEnum):
... KEY = "value"
>>> MyStringEnum("KeY") # case-insensitive match on the key
<MyStringEnum.KEY: 'value'>
>>> MyStringEnum("VaLuE") # case-insensitive match on the value
<MyStringEnum.KEY: 'value'>
>>> MyStringEnum(MyStringEnum.KEY) # no-op if given an existing instance
<MyStringEnum.KEY: 'value'>
.. testcleanup::
warnings.resetwarnings()
* Equality checks support case-insensitive comparisions against strings:
.. testsetup::
import warnings
warnings.filterwarnings(action="ignore", message="Detected comparision between a string")
.. doctest::
>>> from composer.utils import StringEnum
>>> class MyStringEnum(StringEnum):
... KEY = "value"
>>> MyStringEnum.KEY == "KeY" # case-insensitive match on the key
True
>>> MyStringEnum.KEY == "VaLuE" # case-insensitive match on the value
True
>>> MyStringEnum.KEY == "something else"
False
.. testcleanup::
warnings.resetwarnings()
"""
__hash__ = Enum.__hash__
def __eq__(self, other: object) -> bool:
if isinstance(other, str):
cls_name = self.__class__.__name__
warnings.warn(
f"Detected comparision between a string and {cls_name}. Please use {cls_name}('{other}') "
f'to convert both types to {cls_name} before comparing.',
category=UserWarning)
try:
o_enum = type(self)(other)
except ValueError: # `other` is not a valid enum option
return NotImplemented
return super().__eq__(o_enum)
return super().__eq__(other)
def __init__(self, *args: object) -> None:
if self.name.upper() != self.name:
raise ValueError(
textwrap.dedent(f"""\
{self.__class__.__name__}.{self.name} is invalid.
All keys in {self.__class__.__name__} must be uppercase.
To fix, rename to '{self.name.upper()}'."""))
if self.value.lower() != self.value:
raise ValueError(
textwrap.dedent(f"""\
The value for {self.__class__.__name__}.{self.name}={self.value} is invalid.
All values in {self.__class__.__name__} must be lowercase. "
To fix, rename to '{self.value.lower()}'."""))
@classmethod
def _missing_(cls, value: object) -> StringEnum:
# Override _missing_ so both lowercase and uppercase names are supported,
# as well as passing an instance through
if isinstance(value, cls):
return value
if isinstance(value, str):
try:
return cls[value.upper()]
except KeyError:
if value.lower() != value:
return cls(value.lower())
raise ValueError(f'Value {value} not found in {cls.__name__}')
raise TypeError(f'Unable to convert value({value}) of type {type(value)} into {cls.__name__}')
| composer-dev | composer/utils/string_enum.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Modify model architectures.
Algorithms, such as :class:`~composer.algorithms.blurpool.BlurPool`, replace model parameters in-place.
This module contains helper functions to replace parameters in :class:`~torch.nn.Module` and
:class:`~torch.optim.Optimizer` instances.
Attributes:
ReplacementFunction ((torch.nn.Module, int) -> Optional[torch.nn.Module]): Surgery replacement function protocol.
The function is provided with a :class:`torch.nn.Module` and a counter for the number of
instances of the module type have been seen. The function should return a replacement
:class:`torch.nn.Module` if the module type should be replaced, or ``None`` otherwise.
Args:
module (torch.nn.Module): Source module
module_index (int): The i-th instance of module class.
Returns: Optional[torch.nn.Module]: The replacement module, or ``None`` to indicate no modification.
"""
import collections
import itertools
import logging
import textwrap
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, OrderedDict, Sequence, Tuple, Type, Union
import torch
import torch.distributed
from torch.optim import Optimizer
from composer.utils.iter_helpers import ensure_tuple
log = logging.getLogger(__name__)
__all__ = [
'ReplacementFunction',
'replace_module_classes',
'count_module_instances',
'update_params_in_optimizer',
]
ReplacementFunction = Callable[[torch.nn.Module, int], Optional[torch.nn.Module]]
def _add_children_recursive(
module: torch.nn.Module,
children_to_parents_and_names: OrderedDict[torch.nn.Module, List[Tuple[torch.nn.Module, str]]],
) -> None:
# recursively build up children_to_parents_and_names so it maps a module to the list of
# (parent_module, attribute name)
for name, child in module.named_children():
if child not in children_to_parents_and_names:
children_to_parents_and_names[child] = []
_add_children_recursive(child, children_to_parents_and_names)
children_to_parents_and_names[child].append((module, name))
# adapted from https://github.com/microsoft/DeepSpeed/blob/b8ff4825aae4bced15a29a4298cb3e59098df999/deepspeed/module_inject/replace_module.py#L699
def replace_module_classes(
module: torch.nn.Module,
policies: Mapping[Type[torch.nn.Module], ReplacementFunction],
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None,
recurse_on_replacements: bool = False,
indices: Optional[Dict[Any, int]] = None,
) -> Dict[torch.nn.Module, torch.nn.Module]:
"""Modify model in-place by recursively applying replacement policies.
.. rubric:: Example
The following example replaces all convolution layers with linear layers, and linear layers will be replaced if
there are 16 input features. Recursion occurs on replacement.
* The first replacement policy replaces the ``nn.Conv2d(1, 32, 3, 1)`` layer with a ``nn.Linear(16, 32)`` layer.
* The second replacement policy recurses on this replaced layer. Because ``in_features == 16``, this policy
replaces the layer with a ``nn.Linear(32, 64)``.
* This policy is invoked again on this new layer. However, since ``in_features == 32``,
no replacement occurs and this policy returns ``None``.
* Since all policies do not match or now return ``None`` on all layers, surgery is finished.
* All replacements, including intermediate replacements, are returned.
.. testsetup::
from composer.utils.module_surgery import replace_module_classes
.. doctest::
>>> from torch import nn
>>> module = nn.Sequential(
... nn.Conv2d(1, 32, 3, 1),
... nn.ReLU(),
... nn.MaxPool2d(2),
... nn.Flatten(),
... nn.Linear(5408, 128),
... nn.ReLU(),
... nn.LogSoftmax(dim=1),
... )
>>> policies = {
... nn.Conv2d: lambda x, idx: nn.Linear(16, 32),
... nn.Linear: lambda x, idx: nn.Linear(32, 64) if x.in_features == 16 else None
... }
>>> replace_module_classes(module, policies, recurse_on_replacements=True)
{Conv2d(1, 32, kernel_size=(3, 3), stride=(1, 1)): Linear(in_features=16, out_features=32, bias=True), Linear(in_features=16, out_features=32, bias=True): Linear(in_features=32, out_features=64, bias=True)}
.. warning::
When a module is replaced, any tensor values within the module are not copied over
to the new module even when the shape is identical. For example, if model weights
are initialized prior to calling this function, the initialized weights will not
be preserved in any replacements.
Arguments:
module (torch.nn.Module): Model to modify.
policies (Mapping[torch.nn.Module, ReplacementFunction]): Mapping of source module class to
a replacement function. Matching policies are applied in the iteration order of the dictionary, so
if order is important, an :class:`OrderedDict` should be used. The replacement function may
return either another :class:`~torch.nn.Module` or ``None``. If the latter, the source module
is not replaced.
recurse_on_replacements (bool): If true, policies will be applied to any module returned
by another policy. For example, if one policy replaces a :class:`~torch.nn.Conv2d`
with a module containing another :class:`~torch.nn.Conv2d`, the replacement function will
be invoked with this new child :class:`~torch.nn.Conv2d` instance. If the replacement policies
are not conditioned on module properties that change during replacement, infinite recursion is
possible.
indices (Dict[Any, int], optional): A dictionary mapping module types to the number of times
they've occurred so far in the recursive traversal of
``module`` and its child modules. The value is provided to replacement functions, so they
may switch behaviors depending on the number of replacements that occurred for a given module type.
.. note::
These indices may not correspond to the order in which modules get called in the forward pass.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional): One or more
:class:`~torch.optim.Optimizer` objects. If provided,
this function will attempt to remove parameters in replaced modules
from these optimizers, and add parameters from the newly-created
modules. See :func:`update_params_in_optimizer` for more information.
Returns:
Dict[torch.nn.Module, torch.nn.Module]:
A dictionary of ``{original_module: replacement_module}``
reflecting the replacements applied to ``module`` and its children.
"""
if isinstance(module, torch.nn.parallel.DistributedDataParallel):
raise TypeError(
textwrap.dedent("""\
Surgery is not supported after a module is wrapped with
`torch.nn.parallel.DistributedDataParallel` Instead, please preform surgery on the underlying
`module.module` and re-wrap the `module.module` with `torch.nn.parallel.DistributedDataParallel`"""))
try:
import deepspeed
except ImportError:
pass
else:
if isinstance(module, deepspeed.DeepSpeedEngine):
raise TypeError(
textwrap.dedent("""\
Surgery is not supported after a module is wrapped with
`deepspeed.DeepSpeedEngine` Instead, please perform surgery on the underlying module`,
and re-wrap it with `deepspeed.DeepSpeedEngine`"""))
replaced_pairs = {}
children_to_parents_and_names: OrderedDict[torch.nn.Module, List[Tuple[torch.nn.Module,
str]]] = collections.OrderedDict()
_add_children_recursive(module, children_to_parents_and_names)
indices = indices if indices is not None else {c: 0 for c in policies}
default_device = _infer_device(module)
while len(children_to_parents_and_names) > 0:
child, parents = children_to_parents_and_names.popitem(last=False)
for policy_class, replacement_fn in policies.items():
if not isinstance(child, policy_class):
continue
module_index = indices[policy_class]
replacement = replacement_fn(
child,
module_index,
)
indices[policy_class] += 1
if replacement is not None:
assert child not in replaced_pairs
# if no device inferred (child has no parameters, e.g. Pool2d),
# use the default device inferred from the entire module.
device = _infer_device(child)
if device is None:
device = default_device
if device:
replacement = replacement.to(device)
replaced_pairs[child] = replacement
for parent, name in parents:
# update each parent with the replaced child
setattr(parent, name, replacement)
# recurse on new child object
if recurse_on_replacements:
children_to_parents_and_names[replacement] = list(parents) # copy the parents list
_add_children_recursive(replacement, children_to_parents_and_names)
if optimizers:
for old_module, new_module in replaced_pairs.items():
update_params_in_optimizer(old_params=old_module.parameters(),
new_params=new_module.parameters(),
optimizers=optimizers)
elif len(replaced_pairs) > 0:
log.info(
textwrap.dedent("""\
optimizers was not provided. Be sure to either create the optimizer after
invoking this method, or manually add new parameters to the existing optimizer."""))
return replaced_pairs
def _infer_device(module: torch.nn.Module) -> Optional[torch.device]:
"""Attempt to infer a module's device by inspecting its parameters and buffers."""
try:
p = next(itertools.chain(module.parameters(), module.buffers()))
except StopIteration:
return None
else:
return p.device
def count_module_instances(module: torch.nn.Module, module_class: Union[Type[torch.nn.Module],
Tuple[Type[torch.nn.Module], ...]]) -> int:
"""Counts the number of instances of ``module_class`` in ``module``, recursively.
.. rubric:: Example
.. testsetup::
from composer.utils.module_surgery import count_module_instances
.. doctest::
>>> from torch import nn
>>> module = nn.Sequential(nn.Linear(16, 32), nn.Linear(32, 64), nn.ReLU())
>>> count_module_instances(module, nn.Linear)
2
>>> count_module_instances(module, (nn.Linear, nn.ReLU))
3
Args:
module (torch.nn.Module): The source module.
module_class (Type[torch.nn.Module] | Tuple[Type[torch.nn.Module], ...]):
The module type (or tuple of module types) to count.
Returns:
int: The number of instances of ``module_class`` in ``module``
"""
found_instances = set()
_recur_count_module_instances(module, module_class, found_instances)
return len(found_instances)
def _recur_count_module_instances(
module: torch.nn.Module,
module_class: Union[Type[torch.nn.Module], Tuple[Type[torch.nn.Module], ...]],
found_instances: set,
):
"""Counts instances of ``module_class`` in ``module``, recursively, using a set to deduplicate.
We require creating a set of all found modules of instance module_class since a model might
have duplicate references to a particular module.
"""
for _, child in module.named_children():
if isinstance(child, module_class):
found_instances.add(child)
_recur_count_module_instances(child, module_class, found_instances)
def _tensor_in(tensor: torch.Tensor, iterable: Iterable[torch.Tensor]):
"""Returns whether ``tensor is element`` for any element in ``iterable``.
This function is necessary because ``tensor in iterable`` does not work reliably for :class:`.Tensor` objects.
See https://discuss.pytorch.org/t/how-to-judge-a-tensor-is-in-a-list/15998/4
for further discussion.
"""
return any(tensor is elem for elem in iterable)
def _find_param_in_optimizer(param: torch.nn.parameter.Parameter, optimizer: Optimizer) -> int:
"""Returns the index of the optimizer ``param_group`` containing ``param``.
Optimizers store their parameters within an iterable of ``dict``s called
:attr:`~torch.optim.Optimizer.param_groups`.
By default, there is only one group in :attr:`~torch.optim.Optimizer.param_groups`
that containing all the parameters, but there can be more than one. This
function is a simple utility to identify which parameter group in
:attr:`~torch.optim.Optimizer.param_groups` contains a given parameter, if any. The information
might be desirable to, e.g., inspect the optimizer settings being used
for a given parameter, or to remove unused parameter tensors from
the optimizer.
Args:
param (torch.nn.parameter.Parameter): The parameter to search for.
optimizer (torch.optim.Optimizer): The optimizer to search within.
Returns:
int: The index within `opt.param_groups` of the first group containing ``param``,
or `-1` if ``param`` is not in the ``opt`.
"""
for i, group in enumerate(optimizer.param_groups):
param_list: List[torch.nn.parameter.Parameter] = group['params']
if _tensor_in(param, param_list):
return i
return -1
def _ordered_diff(first: List, second: List) -> List:
"""Returns first - second while maintaining the order in first."""
second_list = set(second)
return [item for item in first if item not in second_list]
def update_params_in_optimizer(old_params: Iterable[torch.nn.parameter.Parameter],
new_params: Iterable[torch.nn.parameter.Parameter],
optimizers: Union[Optimizer, Sequence[Optimizer]]) -> None:
r"""Remove ``old_params`` from the ``optimizers`` and insert ``new_params``.
Newly added parameters will be added to the same :attr:`~torch.optim.Optimizer.param_group` as the removed
parameters. A :class:`RuntimeError` will be raised if ``old_params`` is split across multiple parameter groups.
This function differs from :meth:`replace_params_in_optimizer` in that ``len(old_params)`` need not equal
``len(new_params)``. However, this function does not support replacing parameters across multiple optimizer
groups.
.. warning::
Dynamically removing parameters from a :class:`~torch.optim.Optimizer` and adding parameters
to an existing :attr:`~torch.optim.Optimizer.param_group`\s are not officially supported, so this
function may fail when PyTorch is updated. The
`recommended practice <https://github.com/pytorch/pytorch/issues/1489#issuecomment-355301737>`_ is
to instead recreate the optimizer when the parameter set changes
To simply add new parameters without replacing existing ones, use
:meth:`~torch.optim.Optimizer.add_param_group`.
Args:
old_params (Iterable[torch.nn.parameter.Parameter]):
Parameters in this iterable should be removed if they are not present in ``new_params``.
new_params: Parameters in this iterable should be added if they are
not present in ``old_params``.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer]): One or more
:class:`~torch.optim.Optimizer` objects
Raises:
NotImplementedError: If ``optimizers`` contains more than one optimizer.
RuntimeError: If not all removed parameters are found in the
same parameter group, or if any of them are not found at all.
"""
if len(ensure_tuple(optimizers)) > 1:
raise NotImplementedError('Surgery with multiple optimizers is not yet supported.')
opt = ensure_tuple(optimizers)[0]
# diff the two collection of parameters to find what needs to be removed or added
# We need to maintain the order of parameters here for training resumption
# with optimizers that store state so do not use set.
old_values = list(old_params)
new_values = list(new_params)
removed_params = _ordered_diff(old_values, new_values)
added_params = _ordered_diff(new_values, old_values)
if len(removed_params) == 0 and len(added_params) == 0:
return # nothing to do
# rip out the removed_params' states from the optimizer
for p in removed_params:
if _tensor_in(p, opt.state): # only true after training starts
opt.state.pop(p)
if len(opt.param_groups) == 1:
group_idx = 0
else:
# if there is more than one group, use the ripped out parameters to infer the group
# to add the new parameters into
old_group_idxs = [_find_param_in_optimizer(p, opt) for p in removed_params]
if len(old_group_idxs) == 0:
raise RuntimeError('No parameters were removed, so unable to infer the group into which to add parameters.')
missing_param_groups = [x for x in old_group_idxs if x < 0]
if len(missing_param_groups) > 0:
raise RuntimeError(f'Parameter groups {missing_param_groups} are not in the optimizer')
if min(old_group_idxs) != max(old_group_idxs) and len(added_params):
raise RuntimeError(
textwrap.dedent("""\
Not all removed parameters are in the same parameter group.
This makes it unclear where to add the new parameters."""))
group_idx = old_group_idxs[0]
param_group = opt.param_groups[group_idx]
new_param_list = [p for p in param_group['params'] if not _tensor_in(p, removed_params)]
new_param_list += list(added_params)
log.debug(f'adding {len(added_params)} new parameters to parameter group #{group_idx}')
param_group['params'] = new_param_list
| composer-dev | composer/utils/module_surgery.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper methods for :mod:`torch.distributed`.
To use :mod:`torch.distributed`, launch your training script with the
:ref:`composer launcher for distributed training <distributed-training>`. For example,
the following command launches an eight-process training run.
.. code-block::
composer -n 8 path/to/train.py
The composer launcher will automatically configure the following environment variables, which are
required for distributed training:
* ``RANK``: The global rank of the process, which should be on ``[0; WORLD_SIZE - 1]``.
* ``LOCAL_RANK``: The local rank for the process, which should be on ``[0; LOCAL_WORLD_SIZE - 1]``.
* ``NODE_RANK``: The rank of the node.
* ``WORLD_SIZE``: The total number of processes.
* ``LOCAL_WORLD_SIZE``: The number of processes on the current node.
* ``MASTER_ADDR``: The hostname for the rank-zero process.
* ``MASTER_PORT``: The port for the rank-zero process.
If none of these environment variables are set, this module will safely assume a single-rank configuration, where::
RANK=0
LOCAL_RANK=0
NODE_RANK=0
WORLD_SIZE=1
LOCAL_WORLD_SIZE=1
"""
from __future__ import annotations
import datetime
import logging
import os
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, TypeVar, Union, cast
import torch
import torch.distributed as dist
import torch.utils.data
from composer.utils.device import get_device
if TYPE_CHECKING:
from composer.devices import Device
TObj = TypeVar('TObj')
__all__ = [
'all_gather',
'all_gather_object',
'all_reduce',
'barrier',
'broadcast',
'broadcast_object_list',
'get_global_rank',
'get_local_rank',
'get_local_world_size',
'get_node_rank',
'get_sampler',
'get_world_size',
'initialize_dist',
'is_available',
'is_initialized',
]
log = logging.getLogger(__name__)
class MissingEnvironmentError(Exception):
pass
def _get_distributed_config_var(
env_var: str,
human_name: str,
default: int,
fetch_fn_name: Optional[str] = None,
) -> int:
if not dist.is_available():
return default
if dist.is_initialized() and fetch_fn_name is not None:
dist_value = int(getattr(dist, fetch_fn_name)())
if env_var in os.environ:
env_value = int(os.environ[env_var])
if dist_value != env_value:
raise RuntimeError('Torch distributed has been initialized with a value of '
f'{dist_value} for {human_name}, but environment variable '
f'{env_var} has value {env_value}.')
return dist_value
if env_var in os.environ:
return int(os.environ[env_var])
if dist.is_initialized():
raise MissingEnvironmentError('Torch distributed is initialized but environment variable '
f'{env_var} is not set.')
return default
def get_world_size() -> int:
"""Returns the world size, which is the number of processes participating in this training run.
Returns:
int: The world size.
"""
return _get_distributed_config_var(env_var='WORLD_SIZE',
human_name='world size',
default=1,
fetch_fn_name='get_world_size')
def get_global_rank() -> int:
"""Returns the global rank of the current process, which is on ``[0; WORLD_SIZE - 1]``.
Returns:
int: The global rank.
"""
return _get_distributed_config_var(env_var='RANK', human_name='global rank', default=0, fetch_fn_name='get_rank')
def get_local_world_size() -> int:
"""Returns the local world size, which is the number of processes for the current node.
Returns:
int: The local world size.
"""
return _get_distributed_config_var(env_var='LOCAL_WORLD_SIZE', default=1, human_name='local world size')
def get_local_rank() -> int:
"""Returns the local rank for the current process, which is on ``[0; LOCAL_WORLD_SIZE - 1]``.
Returns:
int: The local rank.
"""
return _get_distributed_config_var(env_var='LOCAL_RANK', default=0, human_name='local rank')
def get_node_rank() -> int:
"""Returns the node rank.
For example, if there are 2 nodes, and 2 ranks per node, then global ranks 0-1 will have a
node rank of 0, and global ranks 2-3 will have a node rank of 1.
Returns:
int: The node rank, starting at 0.
"""
return _get_distributed_config_var(env_var='NODE_RANK', default=0, human_name='node rank')
def barrier() -> None:
"""Synchronizes all processes.
This function blocks until all processes reach this function.
.. seealso:: :func:`torch.distributed.barrier`
"""
if dist.is_available() and dist.is_initialized():
dist.barrier()
return
world_size = get_world_size()
if world_size == 1:
return
raise RuntimeError(f'The world_size({world_size}) > 1, but the distributed package is not '
'available or has not been initialized. Please check you have initialized '
'the distributed runtime and that PyTorch has been built with distributed '
'support. If calling this function outside Trainer, please ensure that '
'`composer.utils.dist.initialize_dist` has been called first.')
def all_reduce(
tensor: torch.Tensor,
reduce_operation: str = 'SUM',
) -> None:
"""Reduce a ``tensor`` by applying the ``reduce_operation``.
All ranks get the same, bitwise-identical result.
.. seealso:: :func:`torch.distributed.all_reduce`
Args:
tensor (torch.Tensor): Input and output of the collective. The function
operates in-place.
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
Args:
tensor (torch.Tensor): Tensor to reduce. The function operates in-place.
reduce_operation (str, optional): The reduction operation (default: ``SUM``).
Valid options are:
* ``SUM``
* ``PRODUCT``
* ``MIN``
* ``MAX``
* ``BAND``
* ``BOR``
* ``BXOR``
Returns:
None: ``tensor`` is modified in-place.
"""
if dist.is_available() and dist.is_initialized():
reduce_op = getattr(dist.ReduceOp, reduce_operation.upper())
dist.all_reduce(tensor, op=reduce_op)
return
world_size = get_world_size()
if world_size == 1:
return
raise RuntimeError(f'The world_size({world_size}) > 1, but the distributed package is not '
'available or has not been initialized. Please check you have initialized '
'the distributed runtime and that PyTorch has been built with distributed '
'support. If calling this function outside Trainer, please ensure that '
'`composer.utils.dist.initialize_dist` has been called first.')
def broadcast(tensor: torch.Tensor, src: int) -> None:
"""Broadcasts the tensor to the whole group.
``tensor`` must have the same number of elements in all processes participating in the collective.
See :func:`torch.distributed.broadcast`.
Args:
tensor (torch.Tensor): Data to be sent if ``src`` is the rank of current process,
and tensor to be used to save received data otherwise.
src (int): Source rank
"""
if dist.is_available() and dist.is_initialized():
dist.broadcast(tensor, src)
return
world_size = get_world_size()
if world_size == 1:
return
raise RuntimeError(f'The world_size({world_size}) > 1, but the distributed package is not '
'available or has not been initialized. Please check you have initialized '
'the distributed runtime and that PyTorch has been built with distributed '
'support. If calling this function outside Trainer, please ensure that '
'`composer.utils.dist.initialize_dist` has been called first.')
def broadcast_object_list(object_list: List[Any], src: int = 0) -> None:
"""Broadcasts picklable objects in ``object_list`` to the whole group.
Similar to :func:`broadcast`, but Python objects can be passed in.
Note that all objects in ``object_list`` must be picklable in order to be broadcasted.
.. seealso:: :func:`torch.distributed.broadcast`.
Args:
object_list (torch.Tensor): List of input objects to broadcast.
Each object must be picklable. Only objects on the ``src`` rank will be broadcast,
but each rank must provide lists of equal sizes.
src (int, optional): Source rank (default: ``0``)
Returns:
None: ``object_list`` will be modified in-place and set to values of ``object_list`` from the ``src`` rank.
"""
if dist.is_available() and dist.is_initialized():
dist.broadcast_object_list(object_list, src)
# torch.distributed will replace the None's in obj_gather_list with the gathered objects on rank 0
# or will just be None on non-rank-0
return
world_size = get_world_size()
if world_size == 1:
return
raise RuntimeError(f'The world_size({world_size}) > 1, but the distributed package is not '
'available or has not been initialized. Please check you have initialized '
'the distributed runtime and that PyTorch has been built with distributed '
'support. If calling this function outside Trainer, please ensure that '
'`composer.utils.dist.initialize_dist` has been called first.')
def all_gather(tensor: torch.Tensor) -> Sequence[torch.Tensor]:
"""Collects a :class:`~torch.Tensor` from each rank.
.. seealso:: :func:`torch.distributed.all_gather`
Args:
tensor (torch.Tensor): Tensor from each rank to be gathered.
Returns:
Sequence[Tensor]: A sequence of tensors indexed by rank.
"""
if dist.is_available() and dist.is_initialized():
obj_gather_list = [torch.zeros_like(tensor) for _ in range(get_world_size())]
dist.all_gather(obj_gather_list, tensor)
return obj_gather_list
world_size = get_world_size()
if world_size == 1:
return [tensor]
raise RuntimeError(f'The world_size({world_size}) > 1, but the distributed package is not '
'available or has not been initialized. Please check you have initialized '
'the distributed runtime and that PyTorch has been built with distributed '
'support. If calling this function outside Trainer, please ensure that '
'`composer.utils.dist.initialize_dist` has been called first.')
def all_gather_object(obj: TObj) -> List[TObj]:
"""Collect a pickleable object from each rank and return a list of these objects indexed by rank.
.. seealso:: :func:`torch.distributed.all_gather_object`
Args:
obj (TObj): Object to be gathered.
Returns:
List[TObj]: A list of objects indexed by rank.
"""
if dist.is_available() and dist.is_initialized():
obj_gather_list = [None for _ in range(get_world_size())]
dist.all_gather_object(obj_gather_list, obj)
# torch.distributed will replace the None's in obj_gather_list with the gathered objects on rank 0
# or will just be None on non-rank-0
return cast(List[TObj], obj_gather_list)
world_size = get_world_size()
if world_size == 1:
return [obj]
raise RuntimeError(f'The world_size({world_size}) > 1, but the distributed package is not '
'available or has not been initialized. Please check you have initialized '
'the distributed runtime and that PyTorch has been built with distributed '
'support. If calling this function outside Trainer, please ensure that '
'`composer.utils.dist.initialize_dist` has been called first.')
def is_available():
"""Returns whether PyTorch was built with distributed support.
.. seealso:: :func:`torch.distributed.is_available`
Returns:
bool: Whether PyTorch distributed support is available.
"""
return dist.is_available()
def is_initialized():
"""Returns whether PyTorch distributed is initialized.
.. seealso:: :func:`torch.distributed.is_initialized`
Returns:
bool: Whether PyTorch distributed is initialized.
"""
return dist.is_initialized()
def initialize_dist(device: Union[str, Device], timeout: float = 300.0):
"""Initialize the default PyTorch distributed process group.
This function assumes that the following environment variables are set:
* ``RANK``: The global rank of the process, which should be on ``[0; WORLD_SIZE - 1]``.
* ``LOCAL_RANK``: The local rank for the process, which should be on ``[0; LOCAL_WORLD_SIZE - 1]``.
* ``NODE_RANK``: The rank of the node.
* ``WORLD_SIZE``: The total number of processes.
* ``LOCAL_WORLD_SIZE``: The number of processes on the current node.
* ``MASTER_ADDR``: The hostname for the rank-zero process.
* ``MASTER_PORT``: The port for the rank-zero process.
If none of the environment variables are set, this function will assume a single-rank
configuration and initialize the default process group using a :class:`torch.distributed.HashStore` store.
.. seealso:: :func:`torch.distributed.init_process_group`
Args:
device (str | Device): The device from which the distributed backend is
interpreted. Either a string corresponding to a device (one of ``'cpu'``,
``'gpu'``, ``'mps'``, or ``'tpu'``) or a :class:`.Device`.
timeout (float, optional): The timeout for operations executed against the process
group, expressed in seconds. (default: ``300.0``).
"""
# If device is string, get corresponding composer.devices.Device object
device_obj = get_device(device)
timeout_timedelta = datetime.timedelta(seconds=timeout)
if get_world_size() > 1 and not dist.is_available():
raise RuntimeError('When the world size is > 1, ``torch.distributed`` must be used. However, it is '
'not available in your installation of PyTorch. Please install or build PyTorch '
'with distributed support.')
if dist.is_initialized():
if dist.get_backend() != device_obj.dist_backend.lower():
raise RuntimeError(f'The requested backend ({device_obj.dist_backend}) differs from the backend '
f'of the current process group ({dist.get_backend()}). If you '
'wish to change backends, please restart the python process.')
return
# If any of these variables are set, and they do not match the single rank defaults,
# then do not automatically configure distributed. There are no reasonable defaults to infer
# for the other variables. Instead, let torch.dist error on an incomplete configuration.
# If none of these variables are set, or some are set but they match the single rank defaults,
# then fill the rest in.
dist_env_var_defaults = {
'NODE_RANK': '0',
'WORLD_SIZE': '1',
'LOCAL_WORLD_SIZE': '1',
'RANK': '0',
'LOCAL_RANK': '0',
}
log.debug(
'Initializing torch.dist: global_rank=%d, local_rank=%d, world_size=%d, local_world_size=%d, node_rank=%d',
get_global_rank(),
get_local_rank(),
get_world_size(),
get_local_world_size(),
get_node_rank(),
)
dist_env_vars_match_defaults = all(os.environ.get(k, v) == v for (k, v) in dist_env_var_defaults.items())
if dist_env_vars_match_defaults:
# Fill in the remaining single-rank variables
os.environ.update(dist_env_var_defaults)
dist.init_process_group(device_obj.dist_backend, store=dist.HashStore(), world_size=1, rank=0)
else:
dist.init_process_group(device_obj.dist_backend, timeout=timeout_timedelta)
def get_sampler(dataset: torch.utils.data.Dataset, *, drop_last: bool = False, shuffle: bool = False):
"""Constructs a :class:`~torch.utils.data.distributed.DistributedSampler` for a dataset.
The :class:`~torch.utils.data.distributed.DistributedSampler` assumes that each rank has a complete copy of the
dataset. It ensures that each rank sees a unique shard for each epoch containing
``len(dataset) / get_world_size()`` samples.
.. note::
If the ``dataset`` is already sharded by rank, use a :class:`~torch.utils.data.SequentialSampler`
or :class:`~torch.utils.data.RandomSampler`.
Args:
dataset (torch.utils.data.Dataset): The dataset.
drop_last (bool): Whether to trop the last batch.
shuffle (bool): Whether to shuffle the dataset.
Returns:
torch.utils.data.distributed.DistributedSampler: The sampler.
"""
return torch.utils.data.DistributedSampler[int](
dataset,
drop_last=drop_last,
shuffle=shuffle,
num_replicas=get_world_size(),
rank=get_global_rank(),
)
@contextmanager
def local_rank_zero_download_and_wait(expected_file_path: str):
"""Context manager to wait for a file to exist on all ranks except local rank zero.
It is expected that the file will be created by local rank zero. This function is useful
as an alternative to ``run_local_rank_zero_first`` when downloading a file, because it does
not require dist to be initialized. It only requires that the ``LOCAL_RANK`` environment variable
is set. If dist is initialized, you should use ``run_local_rank_zero_first`` instead to avoid busy waiting.
Args:
expected_file_path (str): The file to wait for existence of
"""
local_rank = get_local_rank()
if local_rank != 0:
while not os.path.exists(expected_file_path):
time.sleep(0.1)
yield
@contextmanager
def run_local_rank_zero_first():
"""Context manager to hold all non-zero ranks until rank zero completes.
The below example will let the local rank zero download
the dataset, and hold all non-rank zeros until the
download is complete.
.. code-block: python
with run_local_rank_zero_first():
dataset = CIFAR10(
...,
download=True,
)
This prevents race conditions where multiple
ranks attempt to download the dataset to the
same location.
"""
if dist.is_available() and dist.is_initialized():
# hold non-zero ranks until rank zero done
if get_local_rank() != 0:
dist.barrier()
yield
else:
yield
dist.barrier()
return
world_size = get_world_size()
if world_size == 1:
yield
return
raise RuntimeError(f'The world_size({world_size}) > 1, but the distributed package is not '
'available or has not been initialized. Please check you have initialized '
'the distributed runtime and that PyTorch has been built with distributed '
'support. If calling this function outside Trainer, please ensure that '
'`composer.utils.dist.initialize_dist` has been called first.')
| composer-dev | composer/utils/dist.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helpers for working with files."""
from __future__ import annotations
import logging
import os
import pathlib
import re
import tempfile
import uuid
import warnings
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from urllib.parse import urlparse
import requests
import tqdm
from composer.utils import dist
from composer.utils.iter_helpers import iterate_with_callback
from composer.utils.object_store import LibcloudObjectStore, ObjectStore, OCIObjectStore, S3ObjectStore
if TYPE_CHECKING:
from composer.core import Timestamp
from composer.loggers import LoggerDestination, RemoteUploaderDownloader
log = logging.getLogger(__name__)
__all__ = [
'get_file', 'ensure_folder_is_empty', 'ensure_folder_has_no_conflicting_files', 'format_name_with_dist',
'format_name_with_dist_and_time', 'is_tar', 'create_symlink_file', 'maybe_create_object_store_from_uri',
'maybe_create_remote_uploader_downloader_from_uri', 'parse_uri'
]
def _get_dist_config(strict: bool = True) -> Dict[str, Any]:
"""Returns a dict of distributed settings (rank, world_size, etc.).
If ``strict=True``, will error if a setting is not available (e.g. the
environment variable is not set). Otherwise, will only return settings
that are available.
"""
settings = {
'rank': dist.get_global_rank,
'local_rank': dist.get_local_rank,
'world_size': dist.get_world_size,
'local_world_size': dist.get_local_world_size,
'node_rank': dist.get_node_rank,
}
dist_config = {}
for name, func in settings.items():
try:
value = func()
except dist.MissingEnvironmentError as e:
if strict:
raise e
else:
dist_config[name] = value
return dist_config
def is_tar(name: Union[str, pathlib.Path]) -> bool:
"""Returns whether ``name`` has a tar-like extension.
Args:
name (str | pathlib.Path): The name to check.
Returns:
bool: Whether ``name`` is a tarball.
"""
return any(str(name).endswith(x) for x in ('.tar', '.tgz', '.tar.gz', '.tar.bz2', '.tar.lzma'))
def ensure_folder_is_empty(folder_name: Union[str, pathlib.Path]):
"""Ensure that the given folder is empty.
Hidden files and folders (those beginning with ``.``) and ignored. Sub-folders are checked recursively.
Args:
folder_name (str | pathlib.Path): The folder to ensure is empty.
Raises:
FileExistsError: If ``folder_name`` contains any non-hidden files, recursively.
"""
for root, dirs, files in os.walk(folder_name, topdown=True):
# Filter out hidden folders
dirs[:] = (x for x in dirs if not x.startswith('.'))
for file in files:
if not file.startswith('.'):
raise FileExistsError(f'{folder_name} is not empty; {os.path.join(root, file)} exists.')
def ensure_folder_has_no_conflicting_files(folder_name: Union[str, pathlib.Path], filename: str, timestamp: Timestamp):
"""Ensure that the given folder does not have any files conflicting with the ``filename`` format string.
If any filename is formatted with a timestamp where the epoch, batch, sample, or token counts are after
``timestamp``, a ``FileExistsError`` will be raised.
If ``filename`` and occurs later than ``timestamp``, raise a ``FileExistsError``.
Args:
folder_name (str | pathlib.Path): The folder to inspect.
filename (str): The pattern string for potential files.
timestamp (Timestamp): Ignore any files that occur before the provided timestamp.
Raises:
FileExistsError: If ``folder_name`` contains any files matching the ``filename`` template before ``timestamp``.
"""
# Prepare regex pattern by replacing f-string formatting with regex.
pattern = f'^{filename}$'
# Format time vars for regex match
for unit in ['epoch', 'batch', 'sample', 'token', 'batch_in_epoch', 'sample_in_epoch', 'token_in_epoch']:
if unit in filename:
pattern = pattern.replace(f'{{{unit}}}', f'(?P<{unit}>\\d+)')
# Format rank information
pattern = pattern.format(**_get_dist_config(strict=False))
template = re.compile(pattern)
for file in os.listdir(folder_name):
match = template.match(file)
if match is not None:
match = match.groupdict()
for unit, value in match.items():
if unit.endswith('_in_epoch'):
if 'epoch' not in match:
raise ValueError(f'{filename} has {{unit}} but not {{epoch}}. Add {{epoch}} for uniqueness.')
if int(match['epoch']) != timestamp.epoch:
continue # only check _in_epoch if both files have same epoch count
if int(value) > int(getattr(timestamp, unit)):
raise FileExistsError(
f'{os.path.join(folder_name, file)} may conflict with a future checkpoint of the current run.'
'Please delete that file, change to a new folder, or set overwrite=True.')
FORMAT_NAME_WITH_DIST_TABLE = """
+------------------------+-------------------------------------------------------+
| Variable | Description |
+========================+=======================================================+
| ``{run_name}`` | The name of the training run. See |
| | :attr:`.Logger.run_name`. |
+------------------------+-------------------------------------------------------+
| ``{rank}`` | The global rank, as returned by |
| | :func:`~composer.utils.dist.get_global_rank`. |
+------------------------+-------------------------------------------------------+
| ``{local_rank}`` | The local rank of the process, as returned by |
| | :func:`~composer.utils.dist.get_local_rank`. |
+------------------------+-------------------------------------------------------+
| ``{world_size}`` | The world size, as returned by |
| | :func:`~composer.utils.dist.get_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{local_world_size}`` | The local world size, as returned by |
| | :func:`~composer.utils.dist.get_local_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{node_rank}`` | The node rank, as returned by |
| | :func:`~composer.utils.dist.get_node_rank`. |
+------------------------+-------------------------------------------------------+
"""
def format_name_with_dist(format_str: str, run_name: str, **extra_format_kwargs: object): # noqa: D103
formatted_str = format_str.format(
run_name=run_name,
**_get_dist_config(strict=False),
**extra_format_kwargs,
)
return formatted_str
format_name_with_dist.__doc__ = f"""
Format ``format_str`` with the ``run_name``, distributed variables, and ``extra_format_kwargs``.
The following format variables are available:
{FORMAT_NAME_WITH_DIST_TABLE}
For example, assume that the rank is ``0``. Then:
>>> from composer.utils import format_name_with_dist
>>> format_str = '{{run_name}}/rank{{rank}}.{{extension}}'
>>> format_name_with_dist(
... format_str,
... run_name='awesome_training_run',
... extension='json',
... )
'awesome_training_run/rank0.json'
Args:
format_str (str): The format string for the checkpoint filename.
run_name (str): The value for the ``{{run_name}}`` format variable.
extra_format_kwargs (object): Any additional :meth:`~str.format` kwargs.
"""
FORMAT_NAME_WITH_DIST_AND_TIME_TABLE = """
+----------------------------+------------------------------------------------------------+
| Variable | Description |
+============================+============================================================+
| ``{run_name}`` | The name of the training run. See |
| | :attr:`.Logger.run_name`. |
+----------------------------+------------------------------------------------------------+
| ``{rank}`` | The global rank, as returned by |
| | :func:`~composer.utils.dist.get_global_rank`. |
+----------------------------+------------------------------------------------------------+
| ``{local_rank}`` | The local rank of the process, as returned by |
| | :func:`~composer.utils.dist.get_local_rank`. |
+----------------------------+------------------------------------------------------------+
| ``{world_size}`` | The world size, as returned by |
| | :func:`~composer.utils.dist.get_world_size`. |
+----------------------------+------------------------------------------------------------+
| ``{local_world_size}`` | The local world size, as returned by |
| | :func:`~composer.utils.dist.get_local_world_size`. |
+----------------------------+------------------------------------------------------------+
| ``{node_rank}`` | The node rank, as returned by |
| | :func:`~composer.utils.dist.get_node_rank`. |
+----------------------------+------------------------------------------------------------+
| ``{epoch}`` | The total epoch count, as returned by |
| | :meth:`~composer.core.time.Timestamp.epoch`. |
+----------------------------+------------------------------------------------------------+
| ``{batch}`` | The total batch count, as returned by |
| | :meth:`~composer.core.time.Timestamp.batch`. |
+----------------------------+------------------------------------------------------------+
| ``{batch_in_epoch}`` | The batch count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timestamp.batch_in_epoch`. |
+----------------------------+------------------------------------------------------------+
| ``{sample}`` | The total sample count, as returned by |
| | :meth:`~composer.core.time.Timestamp.sample`. |
+----------------------------+------------------------------------------------------------+
| ``{sample_in_epoch}`` | The sample count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timestamp.sample_in_epoch`. |
+----------------------------+------------------------------------------------------------+
| ``{token}`` | The total token count, as returned by |
| | :meth:`~composer.core.time.Timestamp.token`. |
+----------------------------+------------------------------------------------------------+
| ``{token_in_epoch}`` | The token count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timestamp.token_in_epoch`. |
+----------------------------+------------------------------------------------------------+
| ``{total_wct}`` | The total training duration in seconds, as returned by |
| | :meth:`~composer.core.time.Timestamp.total_wct`. |
+----------------------------+------------------------------------------------------------+
| ``{epoch_wct}`` | The epoch duration in seconds, as returned by |
| | :meth:`~composer.core.time.Timestamp.epoch_wct`. |
+----------------------------+------------------------------------------------------------+
| ``{batch_wct}`` | The batch duration in seconds, as returned by |
| | :meth:`~composer.core.time.Timestamp.batch_wct`. |
+----------------------------+------------------------------------------------------------+
"""
def format_name_with_dist_and_time(
format_str: str,
run_name: str,
timestamp: Timestamp,
**extra_format_kwargs: object,
): # noqa: D103
formatted_str = format_str.format(
run_name=run_name,
epoch=int(timestamp.epoch),
batch=int(timestamp.batch),
batch_in_epoch=int(timestamp.batch_in_epoch),
sample=int(timestamp.sample),
sample_in_epoch=int(timestamp.sample_in_epoch),
token=int(timestamp.token),
token_in_epoch=int(timestamp.token_in_epoch),
total_wct=timestamp.total_wct.total_seconds(),
epoch_wct=timestamp.epoch_wct.total_seconds(),
batch_wct=timestamp.batch_wct.total_seconds(),
**_get_dist_config(strict=False),
**extra_format_kwargs,
)
return formatted_str
format_name_with_dist_and_time.__doc__ = f"""\
Format ``format_str`` with the ``run_name``, distributed variables, ``timestamp``, and ``extra_format_kwargs``.
In addition to the variables specified via ``extra_format_kwargs``, the following format variables are available:
{FORMAT_NAME_WITH_DIST_AND_TIME_TABLE}
For example, assume that the current epoch is ``0``, batch is ``0``, and rank is ``0``. Then:
>>> from composer.utils import format_name_with_dist_and_time
>>> format_str = '{{run_name}}/ep{{epoch}}-ba{{batch}}-rank{{rank}}.{{extension}}'
>>> format_name_with_dist_and_time(
... format_str,
... run_name='awesome_training_run',
... timestamp=state.timestamp,
... extension='json',
... )
'awesome_training_run/ep0-ba0-rank0.json'
Args:
format_str (str): The format string for the checkpoint filename.
run_name (str): The value for the ``{{run_name}}`` format variable.
timestamp (Timestamp): The timestamp.
extra_format_kwargs (object): Any additional :meth:`~str.format` kwargs.
"""
def parse_uri(uri: str) -> Tuple[str, str, str]:
"""Uses :py:func:`urllib.parse.urlparse` to parse the provided URI.
Args:
uri (str): The provided URI string
Returns:
Tuple[str, str, str]: A tuple containing the backend (e.g. s3), bucket name, and path.
Backend and bucket name will be empty string if the input is a local path
"""
parse_result = urlparse(uri)
backend, net_loc, path = parse_result.scheme, parse_result.netloc, parse_result.path
bucket_name = net_loc if '@' not in net_loc else net_loc.split('@')[0]
if backend == '' and bucket_name == '':
return backend, bucket_name, path
else:
return backend, bucket_name, path.lstrip('/')
def maybe_create_object_store_from_uri(uri: str) -> Optional[ObjectStore]:
"""Automatically creates an :class:`composer.utils.ObjectStore` from supported URI formats.
Currently supported backends are ``s3://``, ``oci://``, and local paths (in which case ``None`` will be returned)
Args:
uri (str): The path to (maybe) create an :class:`composer.utils.ObjectStore` from
Raises:
NotImplementedError: Raises when the URI format is not supported.
Returns:
Optional[ObjectStore]: Returns an :class:`composer.utils.ObjectStore` if the URI is of a supported format, otherwise None
"""
backend, bucket_name, _ = parse_uri(uri)
if backend == '':
return None
if backend == 's3':
return S3ObjectStore(bucket=bucket_name)
elif backend == 'wandb':
raise NotImplementedError(f'There is no implementation for WandB load_object_store via URI. Please use '
'WandBLogger')
elif backend == 'gs':
if 'GCS_KEY' not in os.environ or 'GCS_SECRET' not in os.environ:
raise ValueError(
'You must set the GCS_KEY and GCS_SECRET env variable with you HMAC access id and secret respectively')
return LibcloudObjectStore(
provider='google_storage',
container=bucket_name,
key_environ='GCS_KEY', # Name of env variable for HMAC access id.
secret_environ='GCS_SECRET', # Name of env variable for HMAC secret.
)
elif backend == 'oci':
return OCIObjectStore(bucket=bucket_name)
else:
raise NotImplementedError(f'There is no implementation for the cloud backend {backend} via URI. Please use '
's3 or one of the supported object stores')
def maybe_create_remote_uploader_downloader_from_uri(
uri: str, loggers: List[LoggerDestination]) -> Optional['RemoteUploaderDownloader']:
"""Automatically creates a :class:`composer.loggers.RemoteUploaderDownloader` from supported URI formats.
Currently supported backends are ``s3://``, ``oci://``, and local paths (in which case ``None`` will be returned)
Args:
uri (str):The path to (maybe) create a :class:`composer.loggers.RemoteUploaderDownloader` from
loggers (List[:class:`composer.loggers.LoggerDestination`]): List of the existing :class:`composer.loggers.LoggerDestination` s so as to not create a duplicate
Raises:
NotImplementedError: Raises when the URI format is not supported.
Returns:
Optional[RemoteUploaderDownloader]: Returns a :class:`composer.loggers.RemoteUploaderDownloader` if the URI is of a supported format, otherwise None
"""
from composer.loggers import RemoteUploaderDownloader
existing_remote_uds = [logger_dest for logger_dest in loggers if isinstance(logger_dest, RemoteUploaderDownloader)]
backend, bucket_name, _ = parse_uri(uri)
if backend == '':
return None
for existing_remote_ud in existing_remote_uds:
if ((existing_remote_ud.remote_backend_name == backend) and
(existing_remote_ud.remote_bucket_name == bucket_name)):
warnings.warn(
f'There already exists a RemoteUploaderDownloader object to handle the uri: {uri} you specified')
return None
if backend in ['s3', 'oci']:
return RemoteUploaderDownloader(bucket_uri=f'{backend}://{bucket_name}')
elif backend == 'gs':
if 'GCS_KEY' not in os.environ or 'GCS_SECRET' not in os.environ:
raise ValueError(
'You must set the GCS_KEY and GCS_SECRET env variable with you HMAC access id and secret respectively')
return RemoteUploaderDownloader(
bucket_uri=f'libcloud://{bucket_name}',
backend_kwargs={
'provider': 'google_storage',
'container': bucket_name,
'key_environ': 'GCS_KEY', # Name of env variable for HMAC access id.
'secret_environ': 'GCS_SECRET', # Name of env variable for HMAC secret.
})
elif backend == 'wandb':
raise NotImplementedError(f'There is no implementation for WandB via URI. Please use '
'WandBLogger with log_artifacts set to True')
else:
raise NotImplementedError(f'There is no implementation for the cloud backend {backend} via URI. Please use '
's3 or one of the supported RemoteUploaderDownloader object stores')
def get_file(
path: str,
destination: str,
object_store: Optional[Union[ObjectStore, LoggerDestination]] = None,
overwrite: bool = False,
progress_bar: bool = True,
):
"""Get a file from a local folder, URL, or object store.
Args:
path (str): The path to the file to retrieve.
* If ``object_store`` is specified, then the ``path`` should be the object name for the file to get.
Do not include the the cloud provider or bucket name.
* If ``object_store`` is not specified but the ``path`` begins with ``http://`` or ``https://``,
the object at this URL will be downloaded.
* If ``object_store`` is not specified, but the ``path`` begins with ``s3://``, or another backend
supported by :meth:`composer.utils.maybe_create_object_store_from_uri` an appropriate object store
will be created and used.
* Otherwise, ``path`` is presumed to be a local filepath.
destination (str): The destination filepath.
If ``path`` is a local filepath, then a symlink to ``path`` at ``destination`` will be created.
Otherwise, ``path`` will be downloaded to a file at ``destination``.
object_store (ObjectStore, optional): An :class:`~.ObjectStore`, if ``path`` is located inside
an object store (i.e. AWS S3 or Google Cloud Storage). (default: ``None``)
This :class:`~.ObjectStore` instance will be used to retrieve the file. The ``path`` parameter
should be set to the object name within the object store.
Set this parameter to ``None`` (the default) if ``path`` is a URL or a local file.
overwrite (bool): Whether to overwrite an existing file at ``destination``. (default: ``False``)
progress_bar (bool, optional): Whether to show a progress bar. Ignored if ``path`` is a local file.
(default: ``True``)
Raises:
FileNotFoundError: If the ``path`` does not exist.
"""
if object_store is None and not (path.lower().startswith('http://') or path.lower().startswith('https://')):
object_store = maybe_create_object_store_from_uri(path)
_, _, path = parse_uri(path)
if path.endswith('.symlink'):
with tempfile.TemporaryDirectory() as tmpdir:
symlink_file_name = os.path.join(tmpdir, 'file.symlink')
# Retrieve the symlink
_get_file(
path=path,
destination=symlink_file_name,
object_store=object_store,
overwrite=False,
progress_bar=progress_bar,
)
# Read object name in the symlink
with open(symlink_file_name, 'r') as f:
real_path = f.read()
log.debug(f'Read path {real_path} from symlink file.')
# Recurse
return get_file(
path=real_path,
destination=destination,
object_store=object_store,
overwrite=overwrite,
progress_bar=progress_bar,
)
try:
_get_file(
path=path,
destination=destination,
object_store=object_store,
overwrite=overwrite,
progress_bar=progress_bar,
)
except FileNotFoundError as e:
new_path = path + '.symlink'
try:
# Follow the symlink
return get_file(
path=new_path,
destination=destination,
object_store=object_store,
overwrite=overwrite,
progress_bar=progress_bar,
)
except FileNotFoundError as ee:
# Raise the original not found error first, which contains the path to the user-specified file
raise e from ee
def _get_file(
path: str,
destination: str,
object_store: Optional[Union[ObjectStore, LoggerDestination]],
overwrite: bool,
progress_bar: bool,
):
# Underlying _get_file logic that does not deal with symlinks
if object_store is not None:
if isinstance(object_store, ObjectStore):
total_size_in_bytes = object_store.get_object_size(path)
object_store.download_object(
object_name=path,
filename=destination,
callback=_get_callback(f'Downloading {path}') if progress_bar else None,
overwrite=overwrite,
)
else:
# Type LoggerDestination
object_store.download_file(
remote_file_name=path,
destination=destination,
progress_bar=progress_bar,
overwrite=overwrite,
)
return
if path.lower().startswith('http://') or path.lower().startswith('https://'):
# it's a url
with requests.get(path, stream=True) as r:
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if r.status_code == 404:
raise FileNotFoundError(f'URL {path} not found') from e
raise e
total_size_in_bytes = r.headers.get('content-length')
if total_size_in_bytes is not None:
total_size_in_bytes = int(total_size_in_bytes)
else:
total_size_in_bytes = 0
tmp_path = destination + f'.{uuid.uuid4()}.tmp'
try:
with open(tmp_path, 'wb') as f:
for data in iterate_with_callback(
r.iter_content(2**20),
total_size_in_bytes,
callback=_get_callback(f'Downloading {path}') if progress_bar else None,
):
f.write(data)
except:
# The download failed for some reason. Make a best-effort attempt to remove the temporary file.
try:
os.remove(tmp_path)
except OSError:
pass
raise
else:
os.rename(tmp_path, destination)
return
# It's a local filepath
if not os.path.exists(path):
raise FileNotFoundError(f'Local path {path} does not exist')
os.symlink(os.path.abspath(path), destination)
def _get_callback(description: str):
if len(description) > 60:
description = description[:42] + '...' + description[-15:]
pbar = None
def callback(num_bytes: int, total_size: int):
nonlocal pbar
if num_bytes == 0 or pbar is None:
pbar = tqdm.tqdm(desc=description, total=total_size, unit='iB', unit_scale=True)
n = num_bytes - pbar.n
pbar.update(n)
if num_bytes == total_size:
pbar.close()
return callback
def create_symlink_file(
existing_path: str,
destination_filename: Union[str, pathlib.Path],
):
"""Create a symlink file, which can be followed by :func:`get_file`.
Unlike unix symlinks, symlink files can be created by this function are normal text files and can be
uploaded to object stores via :meth:`.ObjectStore.upload_object` or loggers via :meth:`.Logger.upload_file`
that otherwise would not support unix-style symlinks.
Args:
existing_path (str): The name of existing object that the symlink file should point to.
destination_filename (str | pathlib.Path): The filename to which to write the symlink.
It must end in ``'.symlink'``.
"""
# Loggers might not natively support symlinks, so we emulate symlinks via text files ending with `.symlink`
# This text file contains the name of the object it is pointing to.
# Only symlink if we're uploading files to begin with
# Write remote file name into file to emulate symlink
# Add .symlink extension so we can identify as emulated symlink when downloading
destination_filename = str(destination_filename)
if not destination_filename.endswith('.symlink'):
raise ValueError('The symlink filename must end with .symlink.')
with open(destination_filename, 'x') as f:
f.write(existing_path)
| composer-dev | composer/utils/file_helpers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""OCI-Compatible object store."""
from __future__ import annotations
import os
import pathlib
import uuid
from typing import Callable, Optional, Union
from composer.utils.import_helpers import MissingConditionalImportError
from composer.utils.object_store.object_store import ObjectStore
__all__ = ['OCIObjectStore']
def _reraise_oci_errors(uri: str, e: Exception):
try:
import oci
except ImportError as e:
raise MissingConditionalImportError(conda_package='oci', extra_deps_group='oci',
conda_channel='conda-forge') from e
# If it's an oci service error with code: ObjectNotFound or status 404
if isinstance(e, oci.exceptions.ServiceError):
if e.status == 404: # type: ignore
if e.code == 'ObjectNotFound': # type: ignore
raise FileNotFoundError(f'Object {uri} not found. {e.message}') from e # type: ignore
if e.code == 'BucketNotFound': # type: ignore
raise ValueError(f'Bucket specified in {uri} not found. {e.message}') from e # type: ignore
raise e
# Client errors
if isinstance(e, oci.exceptions.ClientError):
raise ValueError(f'Error with using your OCI config file for uri {uri}') from e
if isinstance(e, oci.exceptions.MultipartUploadError):
raise ValueError(f'Error when uploading {uri} using OCI parallelized uploading') from e
# Otherwise just raise the original error.
raise e
class OCIObjectStore(ObjectStore):
"""Utility for uploading to and downloading from an OCI bucket.
Args:
bucket (str): The bucket name.
prefix (str): A path prefix such as `folder/subfolder/` to prepend to object names. Defaults to ''.
"""
def __init__(
self,
bucket: str,
prefix: str = '',
) -> None:
try:
import oci
except ImportError as e:
raise MissingConditionalImportError(conda_package='oci',
extra_deps_group='oci',
conda_channel='conda-forge') from e
# Format paths
self.bucket = bucket.strip('/')
self.prefix = prefix.strip('/')
if self.prefix != '':
self.prefix += '/'
try:
if 'OCI_CONFIG_FILE' in os.environ:
config = oci.config.from_file(os.environ['OCI_CONFIG_FILE'])
else:
config = oci.config.from_file()
self.client = oci.object_storage.ObjectStorageClient(config=config,
retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
except Exception as e:
_reraise_oci_errors(self.get_uri(object_name=''), e)
self.namespace = self.client.get_namespace().data
self.upload_manager = oci.object_storage.UploadManager(self.client)
def get_uri(self, object_name: str) -> str:
return f'oci://{self.bucket}/{object_name}'
def get_object_size(self, object_name: str) -> int:
try:
response = self.client.get_object(
namespace_name=self.namespace,
bucket_name=self.bucket,
object_name=object_name,
)
except Exception as e:
_reraise_oci_errors(self.get_uri(object_name), e)
if response.status == 200:
return int(response.data.headers['Content-Length'])
else:
raise ValueError(f'OCI get_object was not successful with a {response.status} status code.')
def upload_object(
self,
object_name: str,
filename: Union[str, pathlib.Path],
callback: Optional[Callable[[int, int], None]] = None,
):
del callback
try:
self.upload_manager.upload_file(namespace_name=self.namespace,
bucket_name=self.bucket,
object_name=object_name,
file_path=filename)
except Exception as e:
_reraise_oci_errors(self.get_uri(object_name), e)
def download_object(
self,
object_name: str,
filename: Union[str, pathlib.Path],
overwrite: bool = False,
callback: Optional[Callable[[int, int], None]] = None,
):
del callback
if os.path.exists(filename) and not overwrite:
raise FileExistsError(f'The file at {filename} already exists and overwrite is set to False')
tmp_path = str(filename) + f'.{uuid.uuid4()}.tmp'
try:
response = self.client.get_object(
namespace_name=self.namespace,
bucket_name=self.bucket,
object_name=object_name,
)
except Exception as e:
_reraise_oci_errors(self.get_uri(object_name), e)
with open(tmp_path, 'wb') as f:
f.write(response.data.content)
if overwrite:
os.replace(tmp_path, filename)
else:
os.rename(tmp_path, filename)
| composer-dev | composer/utils/object_store/oci_object_store.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Utility for uploading to and downloading from cloud object stores."""
import io
import os
import pathlib
import uuid
from typing import Any, Callable, Dict, Optional, Union
from requests.exceptions import ConnectionError
from urllib3.exceptions import ProtocolError
from composer.utils.import_helpers import MissingConditionalImportError
from composer.utils.iter_helpers import iterate_with_callback
from composer.utils.object_store.object_store import ObjectStore, ObjectStoreTransientError
__all__ = ['LibcloudObjectStore']
class LibcloudObjectStore(ObjectStore):
"""Utility for uploading to and downloading from object (blob) stores, such as Amazon S3.
.. rubric:: Example
Here's an example for an Amazon S3 bucket named ``MY_CONTAINER``:
>>> from composer.utils import LibcloudObjectStore
>>> object_store = LibcloudObjectStore(
... provider="s3",
... container="MY_CONTAINER",
... provider_kwargs={
... "key": "AKIA...",
... "secret": "*********",
... }
... )
>>> object_store
<composer.utils.object_store.libcloud_object_store.LibcloudObjectStore object at ...>
Args:
provider (str): Cloud provider to use. Valid options are:
* :mod:`~libcloud.storage.drivers.atmos`
* :mod:`~libcloud.storage.drivers.auroraobjects`
* :mod:`~libcloud.storage.drivers.azure_blobs`
* :mod:`~libcloud.storage.drivers.backblaze_b2`
* :mod:`~libcloud.storage.drivers.cloudfiles`
* :mod:`~libcloud.storage.drivers.digitalocean_spaces`
* :mod:`~libcloud.storage.drivers.google_storage`
* :mod:`~libcloud.storage.drivers.ktucloud`
* :mod:`~libcloud.storage.drivers.local`
* :mod:`~libcloud.storage.drivers.minio`
* :mod:`~libcloud.storage.drivers.nimbus`
* :mod:`~libcloud.storage.drivers.ninefold`
* :mod:`~libcloud.storage.drivers.oss`
* :mod:`~libcloud.storage.drivers.rgw`
* :mod:`~libcloud.storage.drivers.s3`
.. seealso:: :doc:`Full list of libcloud providers <libcloud:storage/supported_providers>`
container (str): The name of the container (i.e. bucket) to use.
provider_kwargs (Dict[str, Any], optional): Keyword arguments to pass into the constructor
for the specified provider. These arguments would usually include the cloud region
and credentials.
Common keys are:
* ``key`` (str): API key or username to be used (required).
* ``secret`` (str): Secret password to be used (required).
* ``secure`` (bool): Whether to use HTTPS or HTTP. Note: Some providers only support HTTPS, and it is on by default.
* ``host`` (str): Override hostname used for connections.
* ``port`` (int): Override port used for connections.
* ``api_version`` (str): Optional API version. Only used by drivers which support multiple API versions.
* ``region`` (str): Optional driver region. Only used by drivers which support multiple regions.
.. seealso:: :class:`libcloud.storage.base.StorageDriver`
key_environ (str, optional): Environment variable name for the API Key. Only used
if 'key' is not in ``provider_kwargs``. Default: None.
secret_environ (str, optional): Envrionment varaible for the Secret password. Only
used if 'secret' is not in ``provider_kwargs``. Default: None.
"""
def __init__(self,
provider: str,
container: str,
chunk_size: int = 1_024 * 1_024,
key_environ: Optional[str] = None,
secret_environ: Optional[str] = None,
provider_kwargs: Optional[Dict[str, Any]] = None) -> None:
try:
from libcloud.storage.providers import get_driver
except ImportError as e:
raise MissingConditionalImportError('libcloud', 'apache-libcloud') from e
provider_cls = get_driver(provider)
if provider_kwargs is None:
provider_kwargs = {}
if 'key' not in provider_kwargs and \
key_environ and key_environ in os.environ:
provider_kwargs['key'] = os.environ[key_environ]
if 'secret' not in provider_kwargs and \
secret_environ and secret_environ in os.environ:
provider_kwargs['secret'] = os.environ[secret_environ]
self.chunk_size = chunk_size
self._provider_name = provider
self._provider = provider_cls(**provider_kwargs)
self._container = self._provider.get_container(container)
def get_uri(self, object_name: str):
return f'{self._provider_name}://{self._container.name}/{object_name}'
def upload_object(
self,
object_name: str,
filename: Union[str, pathlib.Path],
callback: Optional[Callable[[int, int], None]] = None,
):
with open(filename, 'rb') as f:
stream = iterate_with_callback(_file_to_iterator(f, self.chunk_size),
os.fstat(f.fileno()).st_size, callback)
try:
self._provider.upload_object_via_stream(
stream,
container=self._container,
object_name=object_name,
)
except Exception as e:
self._ensure_transient_errors_are_wrapped(e)
def _ensure_transient_errors_are_wrapped(self, exc: Exception):
from libcloud.common.types import LibcloudError
if isinstance(exc, (LibcloudError, ProtocolError, TimeoutError, ConnectionError)):
if isinstance(exc, LibcloudError):
# The S3 driver does not encode the error code in an easy-to-parse manner
# So first checking if the error code is non-transient
is_transient_error = any(x in str(exc) for x in ('408', '409', '425', '429', '500', '503', '504'))
if not is_transient_error:
raise exc
raise ObjectStoreTransientError() from exc
raise exc
def _get_object(self, object_name: str):
"""Get object from object store.
Args:
object_name (str): The name of the object.
"""
from libcloud.storage.types import ObjectDoesNotExistError
try:
return self._provider.get_object(self._container.name, object_name)
except ObjectDoesNotExistError as e:
raise FileNotFoundError(f'Object not found: {self.get_uri(object_name)}') from e
except Exception as e:
self._ensure_transient_errors_are_wrapped(e)
def get_object_size(self, object_name: str) -> int:
return self._get_object(object_name).size
def download_object(
self,
object_name: str,
filename: Union[str, pathlib.Path],
overwrite: bool = False,
callback: Optional[Callable[[int, int], None]] = None,
):
if os.path.exists(filename) and not overwrite:
# If the file already exits, short-circuit and skip the download
raise FileExistsError(f'filename {filename} exists and overwrite was set to False.')
obj = self._get_object(object_name)
# Download first to a tempfile, and then rename, in case if the file gets corrupted in transit
tmp_filepath = str(filename) + f'.{uuid.uuid4()}.tmp'
try:
with open(tmp_filepath, 'wb+') as f:
stream = self._provider.download_object_as_stream(obj, chunk_size=self.chunk_size)
for chunk in iterate_with_callback(stream, obj.size, callback):
f.write(chunk)
except Exception as e:
# The download failed for some reason. Make a best-effort attempt to remove the temporary file.
try:
os.remove(tmp_filepath)
except OSError:
pass
self._ensure_transient_errors_are_wrapped(e)
# The download was successful.
if overwrite:
os.replace(tmp_filepath, filename)
else:
os.rename(tmp_filepath, filename)
def _file_to_iterator(f: io.IOBase, chunk_size: int):
while True:
byte = f.read(chunk_size)
if byte == b'':
break
yield byte
| composer-dev | composer/utils/object_store/libcloud_object_store.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""S3-Compatible object store."""
from __future__ import annotations
import os
import pathlib
import uuid
from typing import Any, Callable, Dict, Optional, Union
from composer.utils.import_helpers import MissingConditionalImportError
from composer.utils.object_store.object_store import ObjectStore
__all__ = ['S3ObjectStore']
_NOT_FOUND_CODES = ('403', '404', 'NoSuchKey')
def _ensure_not_found_errors_are_wrapped(uri: str, e: Exception):
import botocore.exceptions
if isinstance(e, botocore.exceptions.ClientError):
# error: Member "response" is unknown (reportGeneralTypeIssues)
if e.response['Error']['Code'] in _NOT_FOUND_CODES: # type: ignore
raise FileNotFoundError(f'Object {uri} not found') from e
raise e
class S3ObjectStore(ObjectStore):
"""Utility for uploading to and downloading from an S3-compatible bucket using :mod:`boto3`.
.. note::
Since :mod:`boto3` has automatic retry handling, transient errors are not
re-raised with :exc:`.ObjectStoreTransientError`. By default, the "Legacy retry mode" makes at most five
attempts for operations that failed due to transient errors. This setting can be customized via the
``retries`` key in ``client_config``.
See the :ref:`guide to retries <boto3:guide_retries>` for additional information.
.. warning::
For best security practices, it is recommended to set credentials via environment variables or config
files. **Never hard-code credentials** via the ``aws_access_key_id``, ``aws_secret_access_key``, or
``aws_session_token`` arguments.
See :ref:`guide to credentials <boto3:guide_credentials>` for more information.
Args:
bucket (str): The bucket name.
prefix (str): A path prefix such as `folder/subfolder/` to prepend to object names. Defaults to ''.
region_name (str, optional): The region name. Must be specified if not available in
a config file or environment variables. Defaults to None.
endpoint_url (str, optional): The URL to an S3-Compatible object store. Must be specified if using something
other than Amazon S3, like Google Cloud Storage. Defaults to None.
aws_access_key_id (str, optional): The access key id. Must be specified if not available in
a config file or environment variables. Defaults to None.
aws_secret_access_key (str, optional): The secret access key. Must be specified if not available in
a config file or environment variables. Defaults to None.
aws_session_token (str, optional): The session token. Must be specified if not available in
a config file or environment variables. Defaults to None.
client_config (dict, optional): Kwargs dictionary for :class:`botocore.config.Config`. Defaults to None.
transfer_config (dict, optional): Kwargs dictionary for :class:`boto3.s3.transfer.TransferConfig`. Defaults to None.
"""
def __init__(
self,
bucket: str,
prefix: str = '',
region_name: Optional[str] = None,
endpoint_url: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
client_config: Optional[Dict[Any, Any]] = None,
transfer_config: Optional[Dict[Any, Any]] = None,
) -> None:
try:
import boto3
from boto3.s3.transfer import TransferConfig
from botocore.config import Config
except ImportError as e:
raise MissingConditionalImportError('streaming', 'boto3') from e
# Format paths
self.bucket = bucket.strip('/')
self.prefix = prefix.strip('/')
if self.prefix:
self.prefix += '/'
if client_config is None:
client_config = {}
config = Config(**client_config)
if 'S3_ENDPOINT_URL' in os.environ and endpoint_url is None:
endpoint_url = os.environ['S3_ENDPOINT_URL']
self.client = boto3.Session().client(
's3',
config=config,
region_name=region_name,
endpoint_url=endpoint_url,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
)
if transfer_config is None:
transfer_config = {}
self.transfer_config = TransferConfig(**transfer_config)
def get_uri(self, object_name: str) -> str:
return f's3://{self.bucket}/{self.get_key(object_name)}'
def get_key(self, object_name: str) -> str:
return f'{self.prefix}{object_name}'
def get_object_size(self, object_name: str) -> int:
try:
obj = self.client.get_object(Bucket=self.bucket, Key=self.get_key(object_name))
except Exception as e:
_ensure_not_found_errors_are_wrapped(self.get_uri(object_name), e)
return obj['ContentLength']
def upload_object(
self,
object_name: str,
filename: Union[str, pathlib.Path],
callback: Optional[Callable[[int, int], None]] = None,
):
file_size = os.path.getsize(filename)
cb_wrapper = None if callback is None else lambda bytes_transferred: callback(bytes_transferred, file_size)
self.client.upload_file(Bucket=self.bucket,
Key=self.get_key(object_name),
Filename=filename,
Callback=cb_wrapper,
Config=self.transfer_config)
def download_object(
self,
object_name: str,
filename: Union[str, pathlib.Path],
overwrite: bool = False,
callback: Optional[Callable[[int, int], None]] = None,
):
if os.path.exists(filename) and not overwrite:
raise FileExistsError(f'The file at {filename} already exists and overwrite is set to False.')
tmp_path = str(filename) + f'.{uuid.uuid4()}.tmp'
if callback is None:
cb_wrapper = None
else:
file_size = self.get_object_size(object_name)
cb_wrapper = lambda bytes_transferred: callback(bytes_transferred, file_size)
try:
try:
self.client.download_file(Bucket=self.bucket,
Key=self.get_key(object_name),
Filename=tmp_path,
Callback=cb_wrapper,
Config=self.transfer_config)
except Exception as e:
_ensure_not_found_errors_are_wrapped(self.get_uri(object_name), e)
except:
# Make a best effort attempt to clean up the temporary file
try:
os.remove(tmp_path)
except OSError:
pass
raise
else:
if overwrite:
os.replace(tmp_path, filename)
else:
os.rename(tmp_path, filename)
| composer-dev | composer/utils/object_store/s3_object_store.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Object store base class and implementations."""
from composer.utils.object_store.libcloud_object_store import LibcloudObjectStore
from composer.utils.object_store.object_store import ObjectStore, ObjectStoreTransientError
from composer.utils.object_store.oci_object_store import OCIObjectStore
from composer.utils.object_store.s3_object_store import S3ObjectStore
from composer.utils.object_store.sftp_object_store import SFTPObjectStore
__all__ = [
'ObjectStore', 'ObjectStoreTransientError', 'LibcloudObjectStore', 'S3ObjectStore', 'SFTPObjectStore',
'OCIObjectStore'
]
| composer-dev | composer/utils/object_store/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Utility for uploading to and downloading from cloud object stores."""
from __future__ import annotations
import contextlib
import os
import pathlib
import urllib.parse
import uuid
from typing import Any, Callable, Dict, Optional, Union
from composer.utils.import_helpers import MissingConditionalImportError
from composer.utils.object_store.object_store import ObjectStore, ObjectStoreTransientError
__all__ = ['SFTPObjectStore']
try:
import paramiko.client
from paramiko import SSHClient
_PARAMIKO_AVAILABLE = True
except ImportError:
_PARAMIKO_AVAILABLE = False
def _set_kwarg(value: Any, kwargs: Dict[str, Any], arg_name: str, kwarg_name: str):
if kwarg_name in kwargs:
raise ValueError(f'The `{arg_name}` should be not be specified directly if also included via `connect_kwargs`')
kwargs[kwarg_name] = value
class SFTPObjectStore(ObjectStore):
"""Utility for uploading to and downloading to a server via SFTP.
Args:
host (str): The server to connect to.
Also accepts a URI string in the form ``'sftp://username@host:port/./relative/path'``.
For an absolute path, use a double `//` -- e.g. ``'sftp://username@host:port//absolute/path'``.
port (int, optional): The server port to connect to.
username (str, optional): The username (if not specified in the SSH config) needed to authenticate.
Defaults to None.
password (str, optional): The password (if required) needed to authenticate. Defaults to None.
key_filename (pathlib.Path | str, optional): The filepath to the a private key (if required) needed to
authenticate. Defaults to None. Any keys specified here will be tried *in addition* to any keys
specified in ``~/.ssh/`` or via a SSH agent.
key_filename_environ (str, optional): Environment variable defining path to an SSH keyfile.
Only used if key_filename is not provided. Default: ``COMPOSER_SFTP_KEY_FILE``.
known_hosts_filename (pathlib.Path | str, optional): The filename of the known hosts file. If not specified,
the default SSH known hosts will be used.
known_hosts_filename_environ (str, optional). Environment variable defining path to a known hosts file.
Only used if known_hosts_filename is not provided. Default: ``COMPOSER_SFTP_KNOWN_HOSTS_FILE``.
missing_host_key_policy (str | paramiko.client.MissingHostKeyPolicy, optional): The class name or instance of
:class:`paramiko.client.MissingHostKeyPolicy` to use for a missing host key. Defaults to ``'RejectPolicy'``.
Built-in options:
* ``'RejectPolicy'`` (the default), which will reject any host key not authorized in the ``known_hosts_filename``.
* ``'AutoAddPolicy'``, which will add any unknown host key.
* ``'WarningPolicy'``, which will warn on an unknown host key.
For custom logic, subclass :class:`paramiko.client.MissingHostKeyPolicy`, and provide an instance of this class.
cwd (str, optional): The directory to navigate to upon creating the SSH connection. If not present
it will be created.
connect_kwargs (Dict[str, Any], optional): Any additional kwargs to pass through to :meth:`.SSHClient.connect`.
"""
def __init__(
self,
host: str,
port: int = 22,
username: Optional[str] = None,
password: Optional[str] = None,
known_hosts_filename: Optional[Union[pathlib.Path, str]] = None,
known_hosts_filename_environ: str = 'COMPOSER_SFTP_KNOWN_HOSTS_FILE',
key_filename: Optional[Union[pathlib.Path, str]] = None,
key_filename_environ: str = 'COMPOSER_SFTP_KEY_FILE',
missing_host_key_policy: Union[str, paramiko.client.MissingHostKeyPolicy] = 'RejectPolicy',
cwd: str = '',
connect_kwargs: Optional[Dict[str, Any]] = None,
):
if known_hosts_filename is None:
known_hosts_filename = os.environ.get(known_hosts_filename_environ, None)
self.known_hosts_filename = known_hosts_filename
if key_filename is None:
key_filename = os.environ.get(key_filename_environ, None)
self.key_filename = key_filename
if not _PARAMIKO_AVAILABLE:
raise MissingConditionalImportError(extra_deps_group='streaming', conda_package='paramiko')
url = urllib.parse.urlsplit(host)
if url.scheme != '':
if url.scheme.lower() != 'sftp':
raise ValueError('If specifying a URI, only the sftp scheme is supported.')
if not url.hostname:
raise ValueError('If specifying a URI, the URI must include the hostname.')
host = url.hostname
if url.username:
if username is not None:
raise ValueError(
'If specifying the username in the `host`, then the `username` argument must be blank.')
username = url.username
if url.password:
if password is not None:
raise ValueError(
'If specifying the password in the `host`, then the `password` argument must be blank.')
password = url.password
if url.port:
if port != 22:
raise ValueError('If specifying the port in the `host`, then the `port` argument must be blank.')
port = url.port
if url.path:
# strip the first left slash. Two slashes for absolute; 1 for relative
assert url.path.startswith('/'), 'The path should always start with a `/`'
cwd = url.path[1:]
if url.query or url.fragment:
raise ValueError('Query and fragment parameters are not supported as part of a URI.')
if connect_kwargs is None:
connect_kwargs = {}
if host:
_set_kwarg(host, connect_kwargs, arg_name='host', kwarg_name='hostname')
if port:
_set_kwarg(port, connect_kwargs, arg_name='port', kwarg_name='port')
if username:
_set_kwarg(username, connect_kwargs, arg_name='username', kwarg_name='username')
if password:
_set_kwarg(password, connect_kwargs, arg_name='password', kwarg_name='password')
if key_filename:
_set_kwarg(key_filename, connect_kwargs, arg_name='key_filename', kwarg_name='key_filename')
if cwd and not cwd.endswith('/'):
cwd += '/'
self.cwd = cwd
netloc = ''
if username:
netloc += f'{username}@'
if host:
netloc += host
if port:
netloc += f':{port}'
self._base_uri = urllib.parse.urlunsplit((
'sftp', # scheme
netloc, # netloc
'/' + cwd, # path
None, # query
None, # fragment
))
self.ssh_client = SSHClient()
if known_hosts_filename is not None:
known_hosts_filename = str(known_hosts_filename)
if isinstance(missing_host_key_policy, str):
try:
missing_host_key_policy = getattr(paramiko.client, missing_host_key_policy)()
assert isinstance(missing_host_key_policy, paramiko.client.MissingHostKeyPolicy)
except AttributeError:
raise ValueError(
"Invalid `missing_host_key_policy`. Must be 'AutoAddPolicy', 'RejectPolicy', or 'WarningPolicy'.")
self.ssh_client.set_missing_host_key_policy(missing_host_key_policy)
self.ssh_client.load_system_host_keys(known_hosts_filename)
self._connect_kwargs = connect_kwargs
self.ssh_client.connect(**connect_kwargs)
self.sftp_client = self.ssh_client.open_sftp()
def close(self):
self.sftp_client.close()
self.ssh_client.close()
def get_uri(self, object_name: str) -> str:
return self._base_uri + object_name
def get_object_size(self, object_name: str) -> int:
object_name = os.path.join(self.cwd, object_name)
with self._handle_transient_errors():
st_size = self.sftp_client.stat(object_name).st_size
if st_size is None:
raise RuntimeError('Cannot determine object size: stat(object_name).st_size is None')
return st_size
@contextlib.contextmanager
def _handle_transient_errors(self):
from paramiko import ChannelException, SSHException
try:
yield
except Exception as e:
if not self._is_cnx_alive():
# If the connection dropped, then it's a transient error. Create a new one, and raise the exception to try again.
self.close()
self.ssh_client.connect(**self._connect_kwargs)
self.sftp_client = self.ssh_client.open_sftp()
raise ObjectStoreTransientError from e
if isinstance(e, SSHException):
if 'Server connection dropped:' in str(e):
raise ObjectStoreTransientError from e
if isinstance(e, (TimeoutError, ConnectionError, EOFError, ChannelException)):
raise ObjectStoreTransientError from e
raise e
def _is_cnx_alive(self):
transport = self.ssh_client.get_transport()
assert transport is not None, 'transport should not be None'
if not transport.is_active() or not transport.is_alive():
return False
channel = self.sftp_client.get_channel()
assert channel is not None, 'channels not be None if the transport is alive'
return channel.active and not channel.closed
def upload_object(
self,
object_name: str,
filename: Union[str, pathlib.Path],
callback: Optional[Callable[[int, int], None]] = None,
) -> None:
remote_object_name = os.path.join(self.cwd, object_name)
dirname = os.path.dirname(remote_object_name)
with self._handle_transient_errors():
if dirname:
self.ssh_client.exec_command(f'mkdir -p {dirname}')
self.sftp_client.put(str(filename), remote_object_name, callback=callback, confirm=False)
# Validating manually to raise ObjectStoreTransientErrors if the size mismatches
# This logic was adapted from the original source -- see
# https://github.com/paramiko/paramiko/blob/1824a27c644132e5d46f2294c1e2fa131c523559/paramiko/sftp_client.py#L719-L724
local_file_size = os.stat(filename).st_size
remote_file_size = self.get_object_size(object_name)
if local_file_size != remote_file_size:
raise ObjectStoreTransientError(
f'Size mismatch in put: local size ({local_file_size}) != remote size ({remote_file_size})')
def download_object(
self,
object_name: str,
filename: Union[str, pathlib.Path],
overwrite: bool = False,
callback: Optional[Callable[[int, int], None]] = None,
) -> None:
object_name = os.path.join(self.cwd, object_name)
dirname = os.path.dirname(filename)
if dirname:
os.makedirs(dirname, exist_ok=True)
if os.path.exists(filename) and not overwrite:
raise FileExistsError(f'The file at {filename} already exists')
tmp_path = str(filename) + f'.{uuid.uuid4()}.tmp'
try:
with self._handle_transient_errors():
self.sftp_client.get(remotepath=object_name, localpath=tmp_path, callback=callback)
except Exception:
# Make a best effort attempt to clean up the temporary file
try:
os.remove(tmp_path)
except OSError:
pass
raise
else:
if overwrite:
os.replace(tmp_path, filename)
else:
os.rename(tmp_path, filename)
| composer-dev | composer/utils/object_store/sftp_object_store.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Abstract class for utilities that upload to and download from object stores."""
import abc
import pathlib
from types import TracebackType
from typing import Callable, Optional, Type, Union
__all__ = ['ObjectStore', 'ObjectStoreTransientError']
class ObjectStoreTransientError(RuntimeError):
"""Custom exception class to signify transient errors.
Implementations of the :class:`.ObjectStore` should re-raise any transient exceptions
(e.g. too many requests, temporarily unavailable) with this class, so callers can easily
detect whether they should attempt to retry any operation.
For example, the :class:`.S3ObjectStore` does the following:
.. testcode::
from composer.utils import ObjectStore, ObjectStoreTransientError
import botocore.exceptions
class S3ObjectStore(ObjectStore):
def upload_object(self, file_path: str, object_name: str):
try:
...
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'LimitExceededException':
raise ObjectStoreTransientError(e.response['Error']['Code']) from e
raise e
Then, callers can automatically handle exceptions:
.. testcode::
import time
from composer.utils import ObjectStore, ObjectStoreTransientError
def upload_file(object_store: ObjectStore, max_num_attempts: int = 3):
for i in range(max_num_attempts):
try:
object_store.upload_object(...)
except ObjectStoreTransientError:
if i + 1 == max_num_attempts:
raise
else:
# Try again after exponential back-off
time.sleep(2**i)
else:
# upload successful
return
"""
pass
class ObjectStore(abc.ABC):
"""Abstract class for implementing object stores, such as LibcloudObjectStore and S3ObjectStore."""
def get_uri(self, object_name: str) -> str:
"""Returns the URI for ``object_name``.
.. note::
This function does not check that ``object_name`` is in the object store.
It computes the URI statically.
Args:
object_name (str): The object name.
Returns:
str: The URI for ``object_name`` in the object store.
"""
raise NotImplementedError(f'{type(self).__name__}.get_uri is not implemented')
def upload_object(
self,
object_name: str,
filename: Union[str, pathlib.Path],
callback: Optional[Callable[[int, int], None]] = None,
) -> None:
"""Upload an object currently located on a disk.
Args:
object_name (str): Object name (where object will be stored in the container)
filename (str | pathlib.Path): Path the the object on disk
callback ((int, int) -> None, optional): If specified, the callback is periodically called with the number of bytes
uploaded and the total size of the object being uploaded.
Raises:
ObjectStoreTransientError: If there was a transient connection issue with uploading the object.
"""
del object_name, filename, callback # unused
raise NotImplementedError(f'{type(self).__name__}.upload_object is not implemented')
def get_object_size(self, object_name: str) -> int:
"""Get the size of an object, in bytes.
Args:
object_name (str): The name of the object.
Returns:
int: The object size, in bytes.
Raises:
FileNotFoundError: If the file was not found in the object store.
ObjectStoreTransientError: If there was a transient connection issue with getting the object size.
"""
raise NotImplementedError(f'{type(self).__name__}.get_object_size is not implemented')
def download_object(
self,
object_name: str,
filename: Union[str, pathlib.Path],
overwrite: bool = False,
callback: Optional[Callable[[int, int], None]] = None,
) -> None:
"""Download an object to the specified destination path.
Args:
object_name (str): The name of the object to download.
filename (str | pathlib.Path): Full path to a file or a directory where the incoming file will be saved.
overwrite (bool, optional): Whether to overwrite an existing file at ``filename``, if it exists.
(default: ``False``)
callback ((int) -> None, optional): If specified, the callback is periodically called with the number of bytes already
downloaded and the total size of the object.
Raises:
FileNotFoundError: If the file was not found in the object store.
ObjectStoreTransientError: If there was a transient connection issue with downloading the object.
"""
del object_name, filename, overwrite, callback # unused
raise NotImplementedError(f'{type(self).__name__}.download_object is not implemented')
def close(self):
"""Close the object store."""
pass
def __enter__(self):
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
traceback: Optional[TracebackType],
):
del exc_type, exc, traceback # unused
self.close()
| composer-dev | composer/utils/object_store/object_store.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The models module contains the :class:`.ComposerModel` base class along with reference
implementations of many common models. Additionally, it includes task-specific convenience
:class:`.ComposerModel`\\s that wrap existing Pytorch models with standard forward passes
and logging to enable quick interaction with the :class:`.Trainer`.
See :doc:`Composer Model </composer_model>` for more details.
"""
from composer.models.base import ComposerModel
from composer.models.bert import create_bert_classification, create_bert_mlm
from composer.models.classify_mnist import mnist_model
from composer.models.deeplabv3 import composer_deeplabv3
from composer.models.efficientnetb0 import composer_efficientnetb0
from composer.models.gpt2 import create_gpt2
from composer.models.huggingface import HuggingFaceModel, write_huggingface_pretrained_from_composer_checkpoint
from composer.models.initializers import Initializer
from composer.models.mmdetection import MMDetModel
from composer.models.resnet import composer_resnet
from composer.models.resnet_cifar import composer_resnet_cifar
from composer.models.tasks import ComposerClassifier
from composer.models.timm import composer_timm
from composer.models.unet import UNet
from composer.models.vit_small_patch16 import vit_small_patch16
__all__ = [
'ComposerModel',
'create_bert_classification',
'create_bert_mlm',
'mnist_model',
'composer_deeplabv3',
'composer_efficientnetb0',
'create_gpt2',
'HuggingFaceModel',
'write_huggingface_pretrained_from_composer_checkpoint',
'Initializer',
'MMDetModel',
'composer_resnet',
'composer_resnet_cifar',
'ComposerClassifier',
'composer_timm',
'UNet',
'vit_small_patch16',
]
| composer-dev | composer/models/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Module Initializers."""
from typing import Callable
import torch
from torch import nn as nn
from composer.utils import StringEnum
class Initializer(StringEnum):
"""Sets the initialization scheme for different layers of a PyTorch model."""
KAIMING_NORMAL = 'kaiming_normal'
KAIMING_UNIFORM = 'kaiming_uniform'
BN_UNIFORM = 'bn_uniform'
BN_ONES = 'bn_ones'
XAVIER_UNIFORM = 'xavier_uniform'
XAVIER_NORMAL = 'xavier_normal'
LINEAR_LOG_CONSTANT_BIAS = 'linear_log_constant_bias'
def get_initializer(self) -> Callable[[torch.nn.Module], None]:
"""Get the initializer function.
Returns:
(torch.nn.Module) -> None: The initializer function.
"""
def kaiming_normal(w: nn.Module):
if isinstance(w, torch.nn.Linear) or isinstance(w, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(w.weight)
def kaiming_uniform(w: nn.Module):
if isinstance(w, torch.nn.Linear) or isinstance(w, torch.nn.Conv2d):
torch.nn.init.kaiming_uniform_(w.weight)
def xavier_uniform(w: nn.Module):
if isinstance(w, torch.nn.Linear) or isinstance(w, torch.nn.Conv2d):
torch.nn.init.xavier_uniform_(w.weight)
def xavier_normal(w: nn.Module):
if isinstance(w, torch.nn.Linear) or isinstance(w, torch.nn.Conv2d):
torch.nn.init.xavier_normal_(w.weight)
def bn_ones(w: nn.Module):
if isinstance(w, torch.nn.BatchNorm2d):
w.weight.data = torch.ones_like(w.weight.data)
w.bias.data = torch.zeros_like(w.bias.data)
def bn_uniform(w: nn.Module):
if isinstance(w, torch.nn.BatchNorm2d):
w.weight.data = torch.rand(w.weight.data.shape)
w.bias.data = torch.zeros_like(w.bias.data)
def linear_log_constant_bias(w: nn.Module):
if isinstance(w, torch.nn.Linear):
w.bias.data = torch.ones(w.bias.shape) * -torch.log(torch.tensor(w.bias.shape[0]))
initializer_dict = {
'kaiming_normal': kaiming_normal,
'kaiming_uniform': kaiming_uniform,
'bn_uniform': bn_uniform,
'bn_ones': bn_ones,
'xavier_uniform': xavier_uniform,
'xavier_normal': xavier_normal,
'linear_log_constant_bias': linear_log_constant_bias
}
if self.value not in initializer_dict:
raise ValueError(f"Initializer '{self.value}' not found.")
return initializer_dict[self.value]
| composer-dev | composer/models/initializers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A wrapper class that converts 🤗 Transformers models to composer models"""
from __future__ import annotations
import inspect
import json
import logging
import tempfile
import textwrap
from collections import UserDict
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
import torch
from torchmetrics import Metric
from composer.metrics import InContextLearningMetric
from composer.models.base import ComposerModel
from composer.utils import MissingConditionalImportError, dist, get_file, import_object, safe_torch_load
if TYPE_CHECKING:
import transformers
from transformers import PretrainedConfig
from transformers.models.auto.auto_factory import _BaseAutoModelClass
log = logging.getLogger(__name__)
__all__ = ['HuggingFaceModel']
class HuggingFaceModel(ComposerModel):
"""
A wrapper class that converts 🤗 Transformers models to composer models.
Args:
model (transformers.PreTrainedModel): A 🤗 Transformers model.
tokenizer (transformers.PreTrainedTokenizer, optional): The tokenizer used to prepare the dataset. Default ``None``.
.. note:: If the tokenizer is provided, its config will be saved in the composer checkpoint, and it can be reloaded
using :meth:`HuggingFaceModel.hf_from_composer_checkpoint`. If the tokenizer is not provided here, it will not be saved in the composer checkpoint.
use_logits (bool, optional): If True, the model's output logits will be used to calculate validation metrics. Else, metrics will be inferred from the HuggingFaceModel directly. Default: ``False``
metrics (list[Metric], optional): list of torchmetrics to apply to the output of `eval_forward` during training. If ``eval_metrics`` is ``None``, these will also be used as ``eval_metrics``. Default: ``None``.
eval_metrics (list[Metric], optional): list of torchmetrics to compute on the eval_dataloader, or be accessible to :class:`Evaluator`s. Default: ``None``.
shift_labels (bool, optional): If True, the batch's labels will be shifted before being used to calculate metrics. This should be set to true for CausalLM models and false otherwise. If not specified, `shift_labels` will be set automatically based on the model class name. Default: ``None``.
allow_embedding_resizing (bool, optional): If True, the model's embeddings will be automatically resized when they are smaller than the tokenizer vocab size. Default: ``False``.
.. note:: To ensure correct behavior, set `shift_labels` manually if using a custom model (i.e., if `model` is not
an instance of a registered 🤗 Transformers class).
.. warning:: This wrapper is designed to work with 🤗 datasets that define a `labels` column.
Example:
.. testcode::
import transformers
from composer.models import HuggingFaceModel
hf_model = transformers.AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)
hf_tokenizer = transformers.AutoTokenizer.from_pretrained('bert-base-uncased')
model = HuggingFaceModel(hf_model, hf_tokenizer)
"""
def __init__(self,
model: transformers.PreTrainedModel,
tokenizer: Optional[Union[transformers.PreTrainedTokenizer,
transformers.PreTrainedTokenizerFast]] = None,
use_logits: Optional[bool] = False,
metrics: Optional[List[Metric]] = None,
eval_metrics: Optional[List[Metric]] = None,
shift_labels: Optional[bool] = None,
allow_embedding_resizing: bool = False) -> None:
try:
import transformers
del transformers # unused
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp',
conda_package='transformers',
conda_channel='conda-forge') from e
super().__init__()
self.model = model
self.config = model.config
self.model_forward_args = inspect.getfullargspec(self.model.forward).args
self.tokenizer = tokenizer
if self.tokenizer is None:
log.warning(
'The tokenizer was not provided. This means the tokenizer config will not be saved in the checkpoint.')
if tokenizer is not None and self.config.vocab_size < len(tokenizer):
if allow_embedding_resizing:
# when the embedding size is smaller than the tokenizer vocab size,
# the embeddings should get resized to match the tokenizer vocab size
log.warning(f'The number of tokens in the tokenizer is greater than the number of tokens in the model.'
f' This would cause an error during training.'
f' Resizing the model embeddings to {len(tokenizer)} from {self.config.vocab_size}.')
self.model.resize_token_embeddings(len(tokenizer))
else:
raise ValueError(
f'The number of tokens in the tokenizer is greater than the number of tokens in the model.'
f' This would cause an error during training.'
f' You can resize the model embeddings to {len(tokenizer)} from {self.config.vocab_size}'
f' by calling `model.resize_token_embeddings(len(tokenizer))` before calling the `HuggingFaceModel`'
f' constructor, or pass `allow_embedding_resizing=True` to have it done automatically.')
elif tokenizer is not None and self.config.vocab_size > len(tokenizer):
# when the embedding size is greater than the tokenizer vocab size,
# the embeddings do not _need_ to be resized to match the tokenizer vocab size,
# and should be done by the user if desired
log.warning(
f'The number of tokens in the tokenizer is less than the number of tokens in the model.'
f' You may want to resize the model embeddings to {len(tokenizer)} from {self.config.vocab_size}'
f' by calling `model.resize_token_embeddings(len(tokenizer))` before calling the `HuggingFaceModel`'
f' constructor.')
self.use_logits = use_logits
self.train_metrics: Optional[Dict] = None
self.val_metrics: Optional[Dict] = None
if eval_metrics is not None:
self.val_metrics = {metric.__class__.__name__: metric for metric in eval_metrics}
if metrics is not None:
self.train_metrics = {metric.__class__.__name__: metric for metric in metrics}
# if eval_metrics is None, use the same metrics as train_metrics
if eval_metrics is None:
self.val_metrics = {metric.__class__.__name__: metric for metric in metrics}
self.labels: Optional[torch.Tensor] = None # set in eval_forward() if exists
is_causal_lm = _is_registered_causal_lm(model)
self.shift_labels = is_causal_lm if shift_labels is None else shift_labels
if is_causal_lm and not self.shift_labels:
log.warning('The shift_labels argument was set to False but the model is an instance of a'
' HuggingFace Causal LM. This may lead to incorrect behavior.')
# Note: No warning if shift_labels and not is_causal_lm, since the model may simply be a custom class.
self.dummy_forward_called = False
@staticmethod
def hf_from_composer_checkpoint(
checkpoint_path: str,
model_instantiation_class: Optional[Union[Type[transformers.PreTrainedModel], Type['_BaseAutoModelClass'],
str]] = None,
model_config_kwargs: Optional[dict] = None,
local_checkpoint_save_location: Optional[Union[Path, str]] = None
) -> Tuple[transformers.PreTrainedModel, Optional[transformers.PreTrainedTokenizer]]:
"""Loads a HuggingFace model (and tokenizer if present) from a composer checkpoint.
.. note:: This function does not load the weights from the checkpoint. It just loads the correctly configured
model and tokenizer classes.
.. testsetup::
import torch
dataset = RandomTextClassificationDataset(size=16, use_keys=True)
train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=8)
eval_dataloader = torch.utils.data.DataLoader(dataset, batch_size=8)
import transformers
from composer.models import HuggingFaceModel
from composer.trainer import Trainer
hf_model = transformers.AutoModelForSequenceClassification.from_pretrained('prajjwal1/bert-tiny', num_labels=2)
hf_tokenizer = transformers.AutoTokenizer.from_pretrained('prajjwal1/bert-tiny')
composer_model = HuggingFaceModel(hf_model, tokenizer=hf_tokenizer, metrics=[], use_logits=True)
trainer = Trainer(model=composer_model,
train_dataloader=train_dataloader,
save_filename='composer-hf-checkpoint.pt',
max_duration='1ep',
save_folder='./')
trainer.fit()
trainer.close()
Example:
.. testcode::
hf_model, hf_tokenizer = HuggingFaceModel.hf_from_composer_checkpoint('composer-hf-checkpoint.pt')
# At this point, hf_model is randomly initialized
composer_model = HuggingFaceModel(hf_model, hf_tokenizer)
trainer = Trainer(model=composer_model,
train_dataloader=train_dataloader,
save_filename='composer-hf-checkpoint-2.pt',
max_duration='1ep',
save_folder='./',
load_path='composer-hf-checkpoint.pt')
# At this point, the weights have been loaded from the composer checkpoint into hf_model
Args:
checkpoint_path (str): Path to the composer checkpoint, can be a local path, or a remote path beginning with ``s3://``, or another backend
supported by :meth:`composer.utils.maybe_create_object_store_from_uri`.
model_instantiation_class (Union[Type[:class:`transformers.PreTrainedModel`], Type[:class:`transformers.AutoModel`], str]), optional):
Class to use to create the HuggingFace model. Defaults to the model class used in the original checkpoint. If this argument is
a HuggingFace auto class (e.g. :class:`transformers.AutoModel` or :class:`transformers.AutoModelForSequenceClassification`), the ``from_config`` method will be used,
while if it is of type :class:`transformers.PreTrainedModel`, the constructor will be called. This argument can also be a string,
which will attempt to be imported as the class to use.
model_config_kwargs: Dict[str, Any]: Extra arguments to pass in for the model config creation (e.g. ``num_labels`` for creating a sequence classification model)
local_checkpoint_save_location (Optional[Union[Path, str]], optional): If specified, where to save the checkpoint file to locally.
If the input ``checkpoint_path`` is already a local path, this will be a symlink.
Defaults to None, which will use a temporary file.
Raises:
ValueError: If the ``model_instantiation_class``, or the model class saved in the checkpoint, is not able to be imported
Returns:
Tuple[transformers.PreTrainedModel, Optional[transformers.PreTrainedTokenizer]]: The loaded HuggingFace model and (if present) tokenizer
"""
try:
import transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp',
conda_package='transformers',
conda_channel='conda-forge') from e
# default local path to a tempfile if path is not provided
if local_checkpoint_save_location is None:
tmp_dir = tempfile.TemporaryDirectory()
local_checkpoint_save_location = Path(tmp_dir.name) / 'local-composer-checkpoint.pt'
if model_config_kwargs is None:
model_config_kwargs = {}
# download the checkpoint file
get_file(checkpoint_path, str(local_checkpoint_save_location))
# load the state dict in
loaded_state_dict = safe_torch_load(local_checkpoint_save_location)
hf_state = loaded_state_dict['state']['integrations']['huggingface']
hf_model_state = hf_state['model']
hf_tokenizer_state = hf_state['tokenizer']
loaded_config = get_hf_config_from_composer_state_dict(loaded_state_dict, config_overrides=model_config_kwargs)
if model_instantiation_class is not None:
# If the instantiation class is explicitly provided, use it
# If a string is provided, attempt to import the class it refers to
if isinstance(model_instantiation_class, str):
try:
model_instantiation_class = import_object(':'.join(model_instantiation_class.rsplit('.',
maxsplit=1)))
except (ModuleNotFoundError, AttributeError):
raise ValueError(
textwrap.dedent(
f'The provided model_instantiation_class string {model_instantiation_class} could not be imported. '
f'Please make sure {model_instantiation_class} is discoverable on the python path, or pass the class '
'in directly.'))
assert model_instantiation_class is not None # pyright
# The AutoModel* classes have `from_config`, while the PreTrainedModel classes do not
# pyright can't tell this isn't a string at this point
if issubclass(
model_instantiation_class, # type: ignore
transformers.models.auto.auto_factory._BaseAutoModelClass):
hf_model = model_instantiation_class.from_config(loaded_config) # type: ignore
else:
hf_model = model_instantiation_class(loaded_config) # type: ignore
else:
# If the instantiation class is not explicitly provided, attempt to import the saved class and use it
try:
saved_class = import_object(':'.join(hf_model_state['config']['class'].rsplit('.', maxsplit=1)))
except (ModuleNotFoundError, AttributeError):
raise ValueError(
textwrap.dedent(
f'The saved class {hf_model_state["config"]["class"]} could not be imported. '
'Please either pass in the class to use explicitly via the model_instantiation_class '
f'parameter, or make sure that {hf_model_state["config"]["class"]} is discoverable '
'on the python path.'))
hf_model = saved_class(loaded_config)
hf_tokenizer = None
if hf_tokenizer_state != {}:
with tempfile.TemporaryDirectory() as _tmp_dir:
for filename, saved_content in hf_tokenizer_state.items():
with open(Path(_tmp_dir) / f'{filename}{saved_content["file_extension"]}', 'w') as _tmp_file:
if saved_content['file_extension'] == '.json':
json.dump(saved_content['content'], _tmp_file)
elif saved_content['file_extension'] == '.txt':
for line in saved_content['content']:
_tmp_file.write(line)
_tmp_file.write('\n')
hf_tokenizer = transformers.AutoTokenizer.from_pretrained(_tmp_dir)
# we need to set the name_or_path back because otherwise it is the tmp dir we are loading from here
hf_tokenizer.name_or_path = hf_tokenizer_state['tokenizer_config']['content'].get('name_or_path', '')
hf_tokenizer.init_kwargs['name_or_path'] = hf_tokenizer.name_or_path
# for an unknown reason this key is missing when loading the saved tokenizer, but present with a value of None
# for the original tokenizer, so we default it to None
hf_tokenizer.init_kwargs['tokenizer_file'] = hf_tokenizer.init_kwargs.get('tokenizer_file', None)
return hf_model, hf_tokenizer
def forward(self, batch):
if isinstance(batch, dict) or isinstance(batch, UserDict):
# Further input validation is left to the huggingface forward call
batch = {k: v for k, v in batch.items() if k in self.model_forward_args}
output = self.model(**batch) # type: ignore (thirdparty)
else:
raise ValueError(
'Unexpected batch type. Expected a dictionary with keys corresponding to the inputs to the forward function of the Huggingface model'
)
return output
def loss(self, outputs, batch):
if self.config.use_return_dict:
return outputs['loss']
else:
# loss is at index 0 in the output tuple
return outputs[0]
def eval_forward(self, batch, outputs: Optional[Any] = None):
# If the batch mode is generate, we will generate a requested number of tokens using the underlying
# model's generate function. Extra generation kwargs can be passed in via the batch. Strings will
# be returned from eval_forward
if batch.get('mode', None) == 'generate':
if self.tokenizer is None:
raise ValueError(
'Generation eval cannot be used without providing a tokenizer to the model constructor.')
self.labels = batch.pop('labels')
generation = self.generate(batch['input_ids'],
attention_mask=batch['attention_mask'],
max_new_tokens=batch['generation_length'],
synced_gpus=dist.get_world_size() > 1,
**batch.get('generation_kwargs', {}))
return self.tokenizer.batch_decode(generation[:, -batch['generation_length']:])
if self.use_logits or batch.get('mode', None) == 'icl_task':
# pop labels first to avoid computing loss
self.labels = batch.pop('labels')
# HF encoder decoder models like T5 expect either decoder_input_ids or labels,
# so we add decoder_input_ids to the batch if it is missing
if self.model.config.is_encoder_decoder and 'decoder_input_ids' not in batch:
if hasattr(self.model, 'prepare_decoder_input_ids_from_labels'):
batch['decoder_input_ids'] = self.model.prepare_decoder_input_ids_from_labels(labels=self.labels)
else:
raise RuntimeError(
'Encoder decoder models require that either decoder_input_ids is present in the batch'
' or that the model has a prepare_decoder_input_ids_from_labels method.')
if self.shift_labels or batch.get('mode', None) == 'icl_task':
assert self.labels is not None
# HF CausalLM models internally shift labels before computing loss, so we do the same here
self.labels[:, :-1] = self.labels[:, 1:].clone()
self.labels[:, -1] = -100
output = outputs if outputs else self.forward(batch)
if self.config.use_return_dict:
output = output['logits']
else:
# if loss was computed (cached outputs from forward), loss is at index 0 and logits are at index 1
# if loss was not computed (no cached outputs during eval), loss is not present and logits are at index 0
output = output[1] if len(output[0].shape) == 0 else output[0]
# if we are in the single class case, then remove the classes dimension
if output.shape[1] == 1:
output = output.squeeze(dim=1)
else:
output = outputs if outputs else self.forward(batch)
return output
def get_metrics(self, is_train: bool = False) -> Dict[str, Metric]:
if is_train:
metrics = self.train_metrics
else:
metrics = self.val_metrics
return metrics if metrics else {}
def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None:
if isinstance(metric, InContextLearningMetric) and batch.get('mode', None) == 'icl_task':
assert self.labels is not None
metric.update(batch, outputs, self.labels)
else:
metric.update(outputs, self.labels) # pyright: ignore [reportGeneralTypeIssues]
def get_metadata(self):
model_output = {}
tokenizer_output = {}
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
model_dir = tmp_dir / 'model'
tokenizer_dir = tmp_dir / 'tokenizer'
self.model.config.save_pretrained(model_dir)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(tokenizer_dir)
with open(model_dir / 'config.json') as _config_file:
model_config = json.load(_config_file)
model_output['config'] = {
'file_extension': '.json',
'content': model_config,
'class': f'{self.model.__class__.__module__}.{self.model.__class__.__name__}'
}
if self.tokenizer is not None:
for tokenizer_file_name in tokenizer_dir.iterdir():
tokenizer_file_path = tokenizer_dir / tokenizer_file_name
with open(tokenizer_file_path) as _tokenizer_file:
tokenizer_file_extension = tokenizer_file_path.suffix
if tokenizer_file_extension == '.txt':
tokenizer_file_content = _tokenizer_file.read().split('\n')
elif tokenizer_file_extension == '.json':
tokenizer_file_content = json.load(_tokenizer_file)
else:
raise ValueError(
f'Unexpected file ending {tokenizer_file_name} in output of tokenizer.save_pretrained.')
tokenizer_output[tokenizer_file_path.stem] = {
'file_extension': tokenizer_file_extension,
'content': tokenizer_file_content
}
return {'model': model_output, 'tokenizer': tokenizer_output}
def generate(self, input_ids: torch.Tensor, **kwargs):
"""Generate from the underlying HuggingFace model.
Except for ``pad_token_id``, which is optionally read from ``self.tokenizer``, all args are passed along
to :meth:`transformers.GenerationMixin.generate` function.
Args:
input_ids (torch.Tensor): Input ids to generate from.
**kwargs: Additional arguments passed to :meth:`transformers.GenerationMixin.generate` function.
See :class:`transformers.GenerationConfig` for all available arguments.
"""
# We need to call forward once in order for FSDP + generate to work
# See https://github.com/huggingface/accelerate/issues/570, https://github.com/huggingface/accelerate/issues/947,
# and https://github.com/pytorch/pytorch/issues/82461 for more info
if not self.dummy_forward_called:
with torch.no_grad():
maybe_decoder_input_ids = {}
if self.model.config.is_encoder_decoder:
maybe_decoder_input_ids['decoder_input_ids'] = torch.tensor([[0]],
dtype=torch.long,
device=input_ids.device)
self.model(input_ids=torch.tensor([[0]], dtype=torch.long, device=input_ids.device),
**maybe_decoder_input_ids)
self.dummy_forward_called = True
pad_token_id = kwargs.pop('pad_token_id', self.tokenizer.pad_token_id if self.tokenizer is not None else None)
return self.model.generate(input_ids, pad_token_id=pad_token_id, **kwargs)
def _is_registered_causal_lm(model: transformers.PreTrainedModel) -> bool:
"""Return True if model class is either a registered 🤗 Causal LM or a subclass of one"""
try:
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp',
conda_package='transformers',
conda_channel='conda-forge') from e
causal_lm_classes = list(MODEL_FOR_CAUSAL_LM_MAPPING.values())
return any([isinstance(model, causal_lm_class) for causal_lm_class in causal_lm_classes])
def get_hf_config_from_composer_state_dict(state_dict: Dict[str, Any],
config_overrides: Optional[Dict[str, Any]] = None) -> 'PretrainedConfig':
"""Get a HuggingFace config from a composer state dict with overrides applied
Args:
state_dict (Dict[str, Any]): The state dict to get the config from
config_overrides (Dict[str, Any], optional): Any overrides to apply to the config
Returns:
transformers.PretrainedConfig: The HuggingFace config
"""
try:
import transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp',
conda_package='transformers',
conda_channel='conda-forge') from e
if config_overrides is None:
config_overrides = {}
hf_config_dict = state_dict['state']['integrations']['huggingface']['model']['config']['content']
# Update the config with any extra args needed
hf_config_dict.update(config_overrides)
# JSON keys need to be converted back to ints, huggingface does not auto convert them along this code path
if 'id2label' in hf_config_dict:
hf_config_dict['id2label'] = {int(k): v for k, v in hf_config_dict['id2label'].items()}
return transformers.AutoConfig.from_pretrained(hf_config_dict['_name_or_path'], **hf_config_dict)
def write_huggingface_pretrained_from_composer_checkpoint(
checkpoint_path: Union[Path, str],
output_folder: Union[Path, str],
local_checkpoint_save_location: Optional[Union[Path, str]] = None) -> None:
"""Write a ``config.json`` and ``pytorch_model.bin``, like :meth:`transformers.PreTrainedModel.from_pretrained` expects, from a composer checkpoint
.. note:: This function will not work properly if you used surgery algorithms when you trained your model. In that case you will want to
load the model weights using the Composer :class:`~composer.Trainer` with the ``load_path`` argument.
.. testsetup::
import torch
dataset = RandomTextClassificationDataset(size=16, use_keys=True)
train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=8)
eval_dataloader = torch.utils.data.DataLoader(dataset, batch_size=8)
import transformers
from composer.models import HuggingFaceModel
from composer.trainer import Trainer
hf_model = transformers.AutoModelForSequenceClassification.from_pretrained('prajjwal1/bert-tiny', num_labels=2)
hf_tokenizer = transformers.AutoTokenizer.from_pretrained('prajjwal1/bert-tiny')
composer_model = HuggingFaceModel(hf_model, tokenizer=hf_tokenizer, metrics=[], use_logits=True)
trainer = Trainer(model=composer_model,
train_dataloader=train_dataloader,
save_filename='composer-hf-checkpoint.pt',
max_duration='1ep',
save_folder='./')
trainer.fit()
trainer.close()
Example:
.. testcode::
from composer.models import write_huggingface_pretrained_from_composer_checkpoint
write_huggingface_pretrained_from_composer_checkpoint('composer-hf-checkpoint.pt', './hf-save-pretrained-output')
loaded_model = transformers.AutoModelForSequenceClassification.from_pretrained('./hf-save-pretrained-output')
Args:
checkpoint_path (Union[Path, str]): Path to the composer checkpoint, can be a local path, or a remote path beginning with ``s3://``, or another backend
supported by :meth:`composer.utils.maybe_create_object_store_from_uri`.
output_folder (Union[Path, str]): Path to the folder to write the output to. Must be a local path.
local_checkpoint_save_location (Optional[Union[Path, str]], optional): If specified, where to save the checkpoint file to locally.
If the input ``checkpoint_path`` is already a local path, this will be a symlink.
Defaults to None, which will use a temporary file.
"""
try:
import transformers
del transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp',
conda_package='transformers',
conda_channel='conda-forge') from e
# default local path to a tempfile if path is not provided
if local_checkpoint_save_location is None:
tmp_dir = tempfile.TemporaryDirectory()
local_checkpoint_save_location = Path(tmp_dir.name) / 'local-composer-checkpoint.pt'
# download the checkpoint file
get_file(str(checkpoint_path), str(local_checkpoint_save_location))
composer_state_dict = safe_torch_load(local_checkpoint_save_location)
config = get_hf_config_from_composer_state_dict(composer_state_dict)
config.save_pretrained(output_folder)
weights_state_dict = composer_state_dict['state']['model']
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(weights_state_dict, prefix='model.')
torch.save(weights_state_dict, Path(output_folder) / 'pytorch_model.bin')
| composer-dev | composer/models/huggingface.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A wrapper class that converts mmdet detection models to composer models"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional
import numpy as np
import torch
from torchmetrics import Metric
from torchmetrics.collections import MetricCollection
from composer.models import ComposerModel
if TYPE_CHECKING:
import mmdet
__all__ = ['MMDetModel']
class MMDetModel(ComposerModel):
"""A wrapper class that adapts mmdetection detectors to composer models.
Args:
model (mmdet.models.detectors.BaseDetector): An MMdetection Detector.
metrics (list[Metric], optional): list of torchmetrics to apply to the output of `eval_forward`. Default: ``None``.
.. warning:: This wrapper is designed to work with mmdet datasets.
Example:
.. code-block:: python
from mmdet.models import build_model
from mmcv import ConfigDict
from composer.models import MMDetModel
yolox_s_config = dict(
type='YOLOX',
input_size=(640, 640),
random_size_range=(15, 25),
random_size_interval=10,
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(type='YOLOXPAFPN', in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
bbox_head=dict(type='YOLOXHead', num_classes=num_classes, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
yolox = build_model(ConfigDict(yolox_s_config))
yolox.init_weights()
model = MMDetModel(yolox)
"""
def __init__(
self,
model: mmdet.models.detectors.BaseDetector, # type: ignore
metrics: Optional[List[Metric]] = None) -> None:
super().__init__()
self.model = model
self.train_metrics = None
self.val_metrics = None
if metrics:
metric_collection = MetricCollection(metrics)
self.train_metrics = metric_collection.clone(prefix='train_')
self.val_metrics = metric_collection.clone(prefix='val_')
def forward(self, batch):
# this will return a dictionary of losses in train mode and model outputs in test mode.
return self.model(**batch)
def loss(self, outputs, batch, **kwargs):
return outputs
def eval_forward(self, batch, outputs: Optional[Any] = None):
"""
Args:
batch (dict): a eval batch of the format:
``img`` (List[torch.Tensor]): list of image torch.Tensors of shape (batch, c, h , w).
``img_metas`` (List[Dict]): (1, batch_size) list of ``image_meta`` dicts.
Returns: model predictions: A batch_size length list of dictionaries containg detection boxes in (x,y, x2, y2) format, class labels, and class probabilities.
"""
device = batch['img'][0].device
batch.pop('gt_labels')
batch.pop('gt_bboxes')
results = self.model(return_loss=False, rescale=True, **batch) # models behave differently in eval mode
# outputs are a list of bbox results (x, y, x2, y2, score)
# pack mmdet bounding boxes and labels into the format for torchmetrics MAP expects
preds = []
for bbox_result in results:
boxes_scores = np.vstack(bbox_result)
boxes, scores = torch.from_numpy(boxes_scores[..., :-1]).to(device), torch.from_numpy(
boxes_scores[..., -1]).to(device)
labels = [np.full(result.shape[0], i, dtype=np.int32) for i, result in enumerate(bbox_result)]
pred = {
'labels': torch.from_numpy(np.concatenate(labels)).to(device).long(),
'boxes': boxes.float(),
'scores': scores.float()
}
preds.append(pred)
return preds
def get_metrics(self, is_train: bool = False):
if is_train:
metrics = self.train_metrics
else:
metrics = self.val_metrics
return metrics if metrics else {}
def update_metric(self, batch: Any, outputs: Any, metric: Metric):
targets_box = batch.pop('gt_bboxes')[0]
targets_cls = batch.pop('gt_labels')[0]
targets = []
for i in range(len(targets_box)):
t = {'boxes': targets_box[i], 'labels': targets_cls[i]}
targets.append(t)
metric.update(outputs, targets)
| composer-dev | composer/models/mmdetection.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The ComposerModel base interface."""
from __future__ import annotations
import abc
import copy
import warnings
from typing import Any, Dict, Optional, Sequence, Union
import torch
from torch import Tensor
from torchmetrics import Metric
from composer.core import Batch
from composer.loggers import Logger
__all__ = ['ComposerModel']
class ComposerModel(torch.nn.Module, abc.ABC):
"""The interface needed to make a PyTorch model compatible with :class:`composer.Trainer`.
To create a :class:`.Trainer`\\-compatible model, subclass :class:`.ComposerModel` and
implement :meth:`forward` and :meth:`loss`. For full functionality (logging and validation), implement :meth:`get_metrics`
and :meth:`eval_forward`.
See the :doc:`Composer Model walk through </composer_model>` for more details.
Minimal Example:
.. code-block:: python
import torchvision
import torch.nn.functional as F
from composer.models import ComposerModel
class ResNet18(ComposerModel):
def __init__(self):
super().__init__()
self.model = torchvision.models.resnet18() # define PyTorch model in __init__.
def forward(self, batch): # batch is the output of the dataloader
# specify how batches are passed through the model
inputs, _ = batch
return self.model(inputs)
def loss(self, outputs, batch):
# pass batches and `forward` outputs to the loss
_, targets = batch
return F.cross_entropy(outputs, targets)
Attributes:
logger (Optional[Logger]): The training :class:`.Logger`.
The trainer sets the :class:`.Logger` on the:attr:`.Event.INIT` event.
"""
def __init__(self) -> None:
super().__init__()
self.logger: Optional[Logger] = None
def __deepcopy__(self, memo: dict):
# From https://stackoverflow.com/questions/1500718/how-to-override-the-copy-deepcopy-operations-for-a-python-object
# The `logger` should not be copied
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k == 'logger':
copied_v = v
else:
copied_v = copy.deepcopy(v, memo)
setattr(result, k, copied_v)
return result
def __copy__(self):
# From https://stackoverflow.com/questions/1500718/how-to-override-the-copy-deepcopy-operations-for-a-python-object
# Need to manually define `__copy__` so it does not rely on `__getstate__`, which would not copy the logger.
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __getstate__(self):
# Don't pickle the logger
state = self.__dict__.copy()
state['logger'] = None
return state
@abc.abstractmethod
def forward(self, batch: Batch) -> Union[Tensor, Sequence[Tensor]]:
"""Compute model output given a batch from the dataloader.
Args:
batch (~composer.core.types.Batch): The output batch from dataloader.
Returns:
Tensor | Sequence[Tensor]:
The result that is passed to :meth:`loss` as the parameter :attr:`outputs`.
.. warning:: This method is different from vanilla PyTorch ``model.forward(x)`` or ``model(x)`` as it takes a
batch of data that has to be unpacked.
Example:
.. code-block:: python
def forward(self, batch): # batch is the output of the dataloader
inputs, _ = batch
return self.model(inputs)
The outputs of :meth:`forward` are passed to :meth:`loss` by the trainer:
.. code-block:: python
for batch in train_dataloader:
optimizer.zero_grad()
outputs = model.forward(batch)
loss = model.loss(outputs, batch)
loss.backward()
"""
pass
@abc.abstractmethod
def loss(self, outputs: Any, batch: Batch, *args, **kwargs) -> Union[Tensor, Sequence[Tensor]]:
"""Compute the loss of the model given ``outputs`` from :meth:`forward` and a
:class:`~composer.core.types.Batch` of data from the dataloader. The :class:`.Trainer`
will call ``.backward()`` on the returned loss.
Args:
outputs (Any): The output of the forward pass.
batch (~composer.core.types.Batch): The output batch from dataloader.
Returns:
Tensor | Sequence[Tensor]: The loss as a :class:`torch.Tensor`.
Example:
.. code-block:: python
import torch.nn.functional as F
def loss(self, outputs, batch):
# pass batches and :meth:`forward` outputs to the loss
_, targets = batch # discard inputs from batch
return F.cross_entropy(outputs, targets)
The outputs of :meth:`forward` are passed to :meth:`loss` by the trainer:
.. code-block:: python
for batch in train_dataloader:
optimizer.zero_grad()
outputs = model.forward(batch)
loss = model.loss(outputs, batch)
loss.backward()
"""
pass
def metrics(self, train: bool = False) -> Dict[str, Metric]:
"""Get metrics for evaluating the model. Metrics should be instances of :class:`torchmetrics.Metric` defined in
:meth:`__init__`. This format enables accurate distributed logging. Metrics consume the outputs of
:meth:`validate`. To track multiple metrics, return a list of metrics in a :ref:`MetricCollection
</pages/overview.rst#metriccollection>`.
Args:
train (bool, optional): True to return metrics that should be computed
during training and False otherwise. This flag is set automatically by the
:class:`.Trainer`. Default: ``False``.
Returns:
Metric or MetricCollection: An instance of :class:`~torchmetrics.Metric` or :ref:`MetricCollection </pages/overview.rst#metriccollection>`.
.. warning:: Each metric keeps states which are updated with data seen so far.
As a result, different metric instances should be used for training
and validation. See:
https://torchmetrics.readthedocs.io/en/latest/pages/overview.html
for more details.
Example:
.. code-block:: python
from torchmetrics.classification import MulticlassAccuracy
from composer.models.loss import CrossEntropyLoss
def __init__(self, num_classes):
super().__init__()
self.train_acc = MulticlassAccuracy(num_classes=num_classes, average='micro') # torchmetric
self.val_acc = MulticlassAccuracy(num_classes=num_classes, average='micro')
self.val_loss = CrossEntropyLoss()
def metrics(self, train: bool = False):
return self.train_acc if train else MetricCollection([self.val_acc, self.val_loss])
"""
warnings.warn(
DeprecationWarning(
'Using ``metrics()`` is no longer supported and will be removed in a future version. Please use ``get_metrics()`` instead.'
))
return self.get_metrics(train)
def eval_forward(
self,
batch: Any,
outputs: Optional[Any] = None,
) -> Any:
"""Run the evaluation forward pass.
By default, it returns the ``outputs`` if they are not None. Otherwise,
``self(batch)`` is returned.
Override this method for models that require custom validation logic -- e.g. self-supervised learning.
Args:
batch: The dataloader batch.
outputs (Any, optional): If training, the outputs from the forward pass. Otherwise, None.
Returns:
Any: The evaluation outputs.
"""
return outputs if outputs is not None else self.forward(batch)
def update_metric(
self,
batch: Any,
outputs: Any,
metric: Metric,
) -> None:
"""Update the given metric.
Args:
batch: The dataloader batch
outputs: The output from :meth:`eval_forward`
metric (Metric): The metric to update.
"""
raise NotImplementedError()
def get_metrics(self, is_train: bool) -> Dict[str, Metric]:
"""Get the metrics.
This method will be called by the trainer immediately after :attr:`.Event.INIT`.
.. note::
Each item in the returned dictionary will be ``copy.deepcopy`` before it is used. This is to ensure that each dataloader (e.g. train, eval)
will be accumulating metrics separately.
To share a metric across all dataloaders, wrap it with ``MetricSpec(metric=metric, share=False)``.
Args:
is_train (bool): Whether the training metrics or evaluation metrics should be returned.
Returns:
Dict[str, Metric]: A mapping of the metric name to a Metric.
"""
return {}
| composer-dev | composer/models/base.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A convenience class that creates a :class:`.ComposerModel` for classification tasks from a vanilla PyTorch model.
:class:`.ComposerClassifier` requires batches in the form: (``input``, ``target``) and includes a basic
classification training loop with :func:`.soft_cross_entropy` loss and accuracy logging.
"""
import logging
import textwrap
import warnings
from typing import Any, Callable, Dict, Optional, Tuple, Union
import torch
from torch import Tensor
from torchmetrics import Metric, MetricCollection
from torchmetrics.classification import MulticlassAccuracy
from composer.loss import soft_cross_entropy
from composer.metrics import CrossEntropy
from composer.models import ComposerModel
__all__ = ['ComposerClassifier']
log = logging.getLogger(__name__)
class ComposerClassifier(ComposerModel):
"""A convenience class that creates a :class:`.ComposerModel` for classification tasks from a vanilla PyTorch model.
:class:`.ComposerClassifier` requires batches in the form: (``input``, ``target``) and includes a basic
classification training loop with a loss function `loss_fn` which takes in the model's outputs and the labels.
Args:
module (torch.nn.Module): A PyTorch neural network module.
num_classes (int, optional): The number of output classes. Required if self.module does not have a num_classes parameter.
train_metrics (Metric | MetricCollection, optional): A torchmetric or collection of torchmetrics to be
computed on the training set throughout training. (default: :class:`MulticlassAccuracy`)
val_metrics (Metric | MetricCollection, optional): A torchmetric or collection of torchmetrics to be
computed on the validation set throughout training.
(default: :class:`composer.metrics.CrossEntropy`, :class:`.MulticlassAccuracy`)
loss_fn (Callable, optional): Loss function to use. This loss function should have at least two arguments:
1) the output of the model and 2) ``target`` i.e. labels from the dataset.
Returns:
ComposerClassifier: An instance of :class:`.ComposerClassifier`.
Example:
.. testcode::
import torchvision
from composer.models import ComposerClassifier
pytorch_model = torchvision.models.resnet18(pretrained=False)
model = ComposerClassifier(pytorch_model, num_classes=1000)
"""
num_classes: Optional[int] = None
def __init__(self,
module: torch.nn.Module,
num_classes: Optional[int] = None,
train_metrics: Optional[Union[Metric, MetricCollection]] = None,
val_metrics: Optional[Union[Metric, MetricCollection]] = None,
loss_fn: Callable = soft_cross_entropy) -> None:
super().__init__()
self.module = module
self._loss_fn = loss_fn
self.num_classes = num_classes
if hasattr(self.module, 'num_classes'):
model_num_classes = getattr(self.module, 'num_classes')
if self.num_classes is not None and self.num_classes != model_num_classes:
warnings.warn(
textwrap.dedent(
f'Specified num_classes={self.num_classes} does not match model num_classes={model_num_classes}.'
'Using model num_classes.'))
self.num_classes = model_num_classes
if self.num_classes is None and (train_metrics is None or val_metrics is None):
raise ValueError(
textwrap.dedent('Please specify the number of output classes. Either: \n (1) pass '
'in num_classes to the ComposerClassifier \n (2) pass in both '
'train_metrics and val_metrics to Composer Classifier, or \n (3) '
'specify a num_classes parameter in the PyTorch network module.'))
# Metrics for training
if train_metrics is None:
assert self.num_classes is not None
train_metrics = MulticlassAccuracy(num_classes=self.num_classes, average='micro')
self.train_metrics = train_metrics
# Metrics for validation
if val_metrics is None:
assert self.num_classes is not None
val_metrics = MetricCollection(
[CrossEntropy(), MulticlassAccuracy(num_classes=self.num_classes, average='micro')])
self.val_metrics = val_metrics
def loss(self, outputs: Tensor, batch: Tuple[Any, Tensor], *args, **kwargs) -> Tensor:
_, targets = batch
return self._loss_fn(outputs, targets, *args, **kwargs)
def get_metrics(self, is_train: bool = False) -> Dict[str, Metric]:
if is_train:
metrics = self.train_metrics
else:
metrics = self.val_metrics
if isinstance(metrics, Metric):
metrics_dict = {metrics.__class__.__name__: metrics}
else:
metrics_dict = {}
for name, metric in metrics.items():
assert isinstance(metric, Metric)
metrics_dict[name] = metric
return metrics_dict
def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None:
_, targets = batch
metric.update(outputs, targets)
def forward(self, batch: Tuple[Tensor, Any]) -> Tensor:
inputs, _ = batch
outputs = self.module(inputs)
return outputs
| composer-dev | composer/models/tasks/classification.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Model tasks are ComposerModels with forward passes and logging built-in for many common deep learning tasks."""
from composer.models.tasks.classification import ComposerClassifier as ComposerClassifier
__all__ = ['ComposerClassifier']
| composer-dev | composer/models/tasks/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The ResNet model family is a set of convolutional neural networks described in `Deep Residual Learning for Image
Recognition <https://arxiv.org/abs/1512.03385>`_ (He et al, 2015). ResNets can be used as the base for a variety of
vision tasks. ImageNet ResNets are a subset of the ResNet family which were designed specifically for classification on
the ImageNet dataset.
See the :doc:`Model Card </model_cards/resnet>` for more details.
"""
from composer.models.resnet.model import composer_resnet
__all__ = ['composer_resnet']
_metadata = {
'resnet18': {
'_task': 'Image Classification',
'_dataset': 'ImageNet',
'_name': 'ResNet18',
'_quality': 'TBD',
'_metric': 'Top-1 Accuracy',
'_ttt': 'TBD',
'_hparams': 'resnet18.yaml'
},
'resnet34': {
'_task': 'Image Classification',
'_dataset': 'ImageNet',
'_name': 'ResNet34',
'_quality': 'TBD',
'_metric': 'Top-1 Accuracy',
'_ttt': 'TBD',
'_hparams': 'resnet34.yaml'
},
'resnet50': {
'_task': 'Image Classification',
'_dataset': 'ImageNet',
'_name': 'ResNet50',
'_quality': '76.51',
'_metric': 'Top-1 Accuracy',
'_ttt': '3h 33m',
'_hparams': 'resnet50.yaml'
},
'resnet101': {
'_task': 'Image Classification',
'_dataset': 'ImageNet',
'_name': 'ResNet101',
'_quality': '78.10',
'_metric': 'Top-1 Accuracy',
'_ttt': '8h 15m',
'_hparams': 'resnet101.yaml',
},
'resnet152': {
'_task': 'Image Classification',
'_dataset': 'ImageNet',
'_name': 'ResNet152',
'_quality': 'TBD',
'_metric': 'Top-1 Accuracy',
'_ttt': 'TBD',
'_hparams': 'resnet152.yaml'
}
}
| composer-dev | composer/models/resnet/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A :class:`.ComposerClassifier` wrapper around the torchvision implementations of the ResNet model family."""
import logging
import textwrap
import warnings
from typing import List, Optional
import torchvision
from packaging import version
from torchmetrics import MetricCollection
from torchmetrics.classification import MulticlassAccuracy
from torchvision.models import resnet
from composer.loss import loss_registry
from composer.metrics import CrossEntropy
from composer.models.initializers import Initializer
from composer.models.tasks import ComposerClassifier
__all__ = ['composer_resnet']
log = logging.getLogger(__name__)
valid_model_names = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
def composer_resnet(model_name: str,
num_classes: int = 1000,
weights: Optional[str] = None,
pretrained: bool = False,
groups: int = 1,
width_per_group: int = 64,
initializers: Optional[List[Initializer]] = None,
loss_name: str = 'soft_cross_entropy') -> ComposerClassifier:
"""Helper function to create a :class:`.ComposerClassifier` with a torchvision ResNet model.
From `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_ (He et al, 2015).
Args:
model_name (str): Name of the ResNet model instance. Either [``"resnet18"``, ``"resnet34"``, ``"resnet50"``, ``"resnet101"``,
``"resnet152"``].
num_classes (int, optional): The number of classes. Needed for classification tasks. Default: ``1000``.
weights (str, optional): If provided, pretrained weights can be specified, such as with ``IMAGENET1K_V2``. Default: ``None``.
pretrained (bool, optional): If True, use ImageNet pretrained weights. Default: ``False``. This parameter is deprecated and
will soon be removed in favor of ``weights``.
groups (int, optional): Number of filter groups for the 3x3 convolution layer in bottleneck blocks. Default: ``1``.
width_per_group (int, optional): Initial width for each convolution group. Width doubles after each stage.
Default: ``64``.
initializers (List[Initializer], optional): Initializers for the model. ``None`` for no initialization.
Default: ``None``.
loss_name (str, optional): Loss function to use. E.g. 'soft_cross_entropy' or
'binary_cross_entropy_with_logits'. Loss function must be in
:mod:`~composer.loss.loss`. Default: ``'soft_cross_entropy'``".
Returns:
ComposerModel: instance of :class:`.ComposerClassifier` with a torchvision ResNet model.
Example:
.. testcode::
from composer.models import composer_resnet
model = composer_resnet(model_name='resnet18') # creates a torchvision resnet18 for image classification
"""
valid_model_names = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
if model_name not in valid_model_names:
raise ValueError(f'model_name must be one of {valid_model_names} instead of {model_name}.')
if loss_name not in loss_registry.keys():
raise ValueError(f'Unrecognized loss function: {loss_name}. Please ensure the '
'specified loss function is present in composer.loss.loss.py')
if loss_name == 'binary_cross_entropy_with_logits' and (initializers is None or
Initializer.LINEAR_LOG_CONSTANT_BIAS not in initializers):
log.warning('UserWarning: Using `binary_cross_entropy_loss_with_logits` '
'without using `initializers.linear_log_constant_bias` can degrade '
'performance. '
'Please ensure you are using `initializers. '
'linear_log_constant_bias`.')
if initializers is None:
initializers = []
# Configure pretrained/weights based on torchvision version
if pretrained and weights:
raise ValueError(
'composer_resnet expects only one of ``pretrained`` or ``weights`` to be specified, but both were specified.'
)
if pretrained:
weights = 'IMAGENET1K_V2'
warnings.warn(
DeprecationWarning(
'The ``pretrained`` argument for composer_resnet is deprecated and will be removed in the future when torch 1.11 is no longer supported. Please use ``weights`` instead.'
))
# Instantiate model
model_fn = getattr(resnet, model_name)
model = None
if version.parse(torchvision.__version__) < version.parse('0.13.0'):
if weights:
pretrained = True
warnings.warn(
textwrap.dedent(f'The current torchvision version {torchvision.__version__} does not support the '
'``weights`` argument, so ``pretrained=True`` will be used instead. To enable '
'``weights``, please upgrade to the latest version of torchvision.'))
model = model_fn(pretrained=pretrained, num_classes=num_classes, groups=groups, width_per_group=width_per_group)
else:
model = model_fn(weights=weights, num_classes=num_classes, groups=groups, width_per_group=width_per_group)
# Grab loss function from loss registry
loss_fn = loss_registry[loss_name]
# Create metrics for train and validation
train_metrics = MulticlassAccuracy(num_classes=num_classes, average='micro')
val_metrics = MetricCollection([CrossEntropy(), MulticlassAccuracy(num_classes=num_classes, average='micro')])
# Apply Initializers to model
for initializer in initializers:
initializer = Initializer(initializer)
model.apply(initializer.get_initializer())
composer_model = ComposerClassifier(model, train_metrics=train_metrics, val_metrics=val_metrics, loss_fn=loss_fn)
return composer_model
| composer-dev | composer/models/resnet/model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""ViT Small Patch 16 for image classification."""
from composer.models.vit_small_patch16.model import vit_small_patch16 as vit_small_patch16
__all__ = ['vit_small_patch16']
_task = 'Image Classification'
_dataset = 'ImageNet'
_name = 'ViT-Small-Patch16'
_quality = '74.52'
_metric = 'Top-1 Accuracy'
_ttt = '1d 59m'
_hparams = 'vit_small_patch16.yaml'
| composer-dev | composer/models/vit_small_patch16/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Implements ViT-S/16 as a :class:`.ComposerClassifier`."""
from composer.models.tasks import ComposerClassifier
__all__ = ['vit_small_patch16']
def vit_small_patch16(num_classes: int = 1000,
image_size: int = 224,
channels: int = 3,
dropout: float = 0.0,
embedding_dropout: float = 0.0):
"""Helper function to create a :class:`.ComposerClassifier` using a ViT-S/16 model.
See `Training data-efficient image transformers & distillation through attention <https://arxiv.org/pdf/2012.12877.pdf>`_
(Touvron et al, 2021) for details on ViT-S/16.
Args:
num_classes (int, optional): number of classes for the model. Default: ``1000``.
image_size (int, optional): input image size. If you have rectangular images, make sure your image
size is the maximum of the width and height. Default: ``224``.
channels (int, optional): number of image channels. Default: ``3``.
dropout (float, optional): 0.0 - 1.0 dropout rate. Default: ``0``.
embedding_dropout (float, optional): 0.0 - 1.0 embedding dropout rate. Default: ``0``.
Returns:
ComposerModel: instance of :class:`.ComposerClassifier` with a ViT-S/16 model.
"""
from vit_pytorch import ViT
model = ViT(
image_size=image_size,
channels=channels,
num_classes=num_classes,
dim=384, # embed dim/width
patch_size=16,
depth=12, # layers
heads=6,
mlp_dim=1536,
dropout=dropout,
emb_dropout=embedding_dropout)
composer_model = ComposerClassifier(module=model)
return composer_model
| composer-dev | composer/models/vit_small_patch16/model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
## Code adapted from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/nnUNet/
import numpy as np
import torch
import torch.nn as nn
normalizations = {
'instancenorm3d': nn.InstanceNorm3d,
'instancenorm2d': nn.InstanceNorm2d,
'batchnorm3d': nn.BatchNorm3d,
'batchnorm2d': nn.BatchNorm2d,
}
convolutions = {
'Conv2d': nn.Conv2d,
'Conv3d': nn.Conv3d,
'ConvTranspose2d': nn.ConvTranspose2d,
'ConvTranspose3d': nn.ConvTranspose3d,
}
def get_norm(name, out_channels):
if 'groupnorm' in name:
return nn.GroupNorm(32, out_channels, affine=True)
return normalizations[name](out_channels, affine=True)
def get_conv(in_channels, out_channels, kernel_size, stride, dim, bias=False):
conv = convolutions[f'Conv{dim}d']
padding = get_padding(kernel_size, stride)
return conv(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
def get_transp_conv(in_channels, out_channels, kernel_size, stride, dim):
conv = convolutions[f'ConvTranspose{dim}d']
padding = get_padding(kernel_size, stride)
output_padding = get_output_padding(kernel_size, stride, padding)
return conv(in_channels, out_channels, kernel_size, stride, padding, output_padding, bias=True)
def get_padding(kernel_size, stride):
#kernel_size_np = np.cast(np.ndarray, np.atleast_1d(kernel_size))
#stride_np = np.cast(np.ndarray, np.atleast_1d(stride))
kernel_size_np = np.atleast_1d(kernel_size)
stride_np = np.atleast_1d(stride)
padding_np = (kernel_size_np - stride_np + 1) / 2 # type: ignore
padding = tuple(int(p) for p in padding_np) # type: ignore
return padding if len(padding) > 1 else padding[0]
def get_output_padding(kernel_size, stride, padding):
kernel_size_np = np.atleast_1d(kernel_size)
stride_np = np.atleast_1d(stride)
padding_np = np.atleast_1d(padding)
out_padding_np = 2 * padding_np + stride_np - kernel_size_np
out_padding = tuple(int(p) for p in out_padding_np)
return out_padding if len(out_padding) > 1 else out_padding[0]
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, **kwargs):
super(ConvLayer, self).__init__()
self.conv = get_conv(in_channels, out_channels, kernel_size, stride, kwargs['dim'])
self.norm = get_norm(kwargs['norm'], out_channels)
self.lrelu = nn.LeakyReLU(negative_slope=kwargs['negative_slope'], inplace=True)
def forward(self, data):
out = self.conv(data)
out = self.norm(out)
out = self.lrelu(out)
return out
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, **kwargs):
super(ConvBlock, self).__init__()
self.conv1 = ConvLayer(in_channels, out_channels, kernel_size, stride, **kwargs)
self.conv2 = ConvLayer(out_channels, out_channels, kernel_size, 1, **kwargs)
def forward(self, input_data):
out = self.conv1(input_data)
out = self.conv2(out)
return out
class ResidBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, **kwargs):
super(ResidBlock, self).__init__()
self.conv1 = ConvLayer(in_channels, out_channels, kernel_size, stride, **kwargs)
self.conv2 = get_conv(out_channels, out_channels, kernel_size, 1, kwargs['dim'])
self.norm = get_norm(kwargs['norm'], out_channels)
self.lrelu = nn.LeakyReLU(negative_slope=kwargs['negative_slope'], inplace=True)
self.downsample = None
if max(stride) > 1 or in_channels != out_channels: # type: ignore
self.downsample = get_conv(in_channels, out_channels, kernel_size, stride, kwargs['dim'])
self.norm_res = get_norm(kwargs['norm'], out_channels)
def forward(self, input_data):
residual = input_data
out = self.conv1(input_data)
out = self.conv2(out)
out = self.norm(out)
if self.downsample is not None:
residual = self.downsample(residual)
residual = self.norm_res(residual)
out = self.lrelu(out + residual)
return out
class UpsampleBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, **kwargs):
super(UpsampleBlock, self).__init__()
self.transp_conv = get_transp_conv(in_channels, out_channels, stride, stride, kwargs['dim'])
self.conv_block = ConvBlock(2 * out_channels, out_channels, kernel_size, 1, **kwargs)
def forward(self, input_data, skip_data):
out = self.transp_conv(input_data)
out = torch.cat((out, skip_data), dim=1)
out = self.conv_block(out)
return out
class OutputBlock(nn.Module):
def __init__(self, in_channels, out_channels, dim):
super(OutputBlock, self).__init__()
self.conv = get_conv(in_channels, out_channels, kernel_size=1, stride=1, dim=dim, bias=True)
nn.init.constant_(self.conv.bias, 0)
def forward(self, input_data):
return self.conv(input_data)
| composer-dev | composer/models/unet/_layers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A U-Net model extending :class:`.ComposerModel`."""
import logging
from typing import Any, Dict, Optional, Sequence, Union
import torch
import torch.nn as nn
from torchmetrics import Metric
from composer.metrics.metrics import Dice
from composer.models.base import ComposerModel
from composer.models.unet.model import UNet as UNetModel
from composer.utils.import_helpers import MissingConditionalImportError
log = logging.getLogger(__name__)
__all__ = ['UNet']
class UNet(ComposerModel):
"""A U-Net model extending :class:`.ComposerModel`.
See U-Net: Convolutional Networks for Biomedical Image Segmentation (`Ronneberger et al, 2015`_)
on the U-Net architecture.
Args:
num_classes (int, optional): The number of classes. Needed for classification tasks. Default: ``3``.
.. _Ronneberger et al, 2015: https://arxiv.org/abs/1505.04597
"""
def __init__(self, num_classes: int = 3) -> None:
super().__init__()
try:
from monai.losses import DiceLoss
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='unet',
conda_package='monai',
conda_channel='conda-forge') from e
self.module = self.build_nnunet()
self.dice = Dice(num_classes=num_classes)
self.dloss = DiceLoss(include_background=False, softmax=True, to_onehot_y=True, batch=True)
self.closs = nn.CrossEntropyLoss()
def loss(self, outputs: Any, batch: Any, *args, **kwargs) -> Union[torch.Tensor, Sequence[torch.Tensor]]:
_, y = batch
y = y.squeeze(1) # type: ignore
loss = self.dloss(outputs, y)
loss += self.closs(outputs, y[:, 0].long())
return loss
@staticmethod
def metric_mean(name, outputs):
return torch.stack([out[name] for out in outputs]).mean(dim=0)
def get_metrics(self, is_train: bool = False) -> Dict[str, Metric]:
return {'Dice': self.dice}
def forward(self, batch: Any) -> torch.Tensor:
x, _ = batch
x = x.squeeze(1) # type: ignore
logits = self.module(x)
return logits
def inference2d(self, image):
"""Runs inference on a 3D image, by passing each depth slice through the model."""
batch_modulo = image.shape[2] % 64
if batch_modulo != 0:
batch_pad = 64 - batch_modulo
image = nn.ConstantPad3d((0, 0, 0, 0, batch_pad, 0), 0)(image)
image = torch.transpose(image.squeeze(0), 0, 1)
preds_shape = (image.shape[0], 4, *image.shape[2:])
preds = torch.zeros(preds_shape, dtype=image.dtype, device=image.device)
for start in range(0, image.shape[0] - 64 + 1, 64):
end = start + 64
with torch.no_grad():
pred = self.module(image[start:end])
preds[start:end] = pred.data
if batch_modulo != 0:
preds = preds[batch_pad:] # type: ignore
return torch.transpose(preds, 0, 1).unsqueeze(0)
def eval_forward(self, batch: Any, outputs: Optional[Any] = None):
assert self.training is False, 'For validation, model must be in eval mode'
image, _ = batch
pred = self.inference2d(image)
return pred
def build_nnunet(self) -> torch.nn.Module:
kernels = [[3, 3]] * 6
strides = [[1, 1]] + [[2, 2]] * 5
model = UNetModel(in_channels=4,
n_class=4,
kernels=kernels,
strides=strides,
dimension=2,
residual=True,
normalization_layer='batch',
negative_slope=0.01)
return model
| composer-dev | composer/models/unet/unet.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The Unet architecture used in image segmentation. The example we are using is for BRATS medical brain tumor dataset.
See the :doc:`Model Card </model_cards/unet>` for more details.
"""
from composer.models.unet.unet import UNet as UNet
__all__ = ['UNet']
_task = 'Image Segmentation'
_dataset = 'BRATS'
_name = 'UNet'
_quality = '69.1'
_metric = 'Dice'
_ttt = '21m'
_hparams = 'unet.yaml'
| composer-dev | composer/models/unet/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The Unet architecture used in image segmentation. The example we are using is for BRATS medical brain tumor dataset.
See the :doc:`Model Card </model_cards/unet>` for more details.
"""
import torch.nn as nn
from composer.models.unet._layers import ConvBlock, OutputBlock, ResidBlock, UpsampleBlock
__all__ = ['UNet']
class UNet(nn.Module):
"""Unet Architecture adapted from NVidia `Deep Learning Examples`_.
.. _Deep Learning Examples: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/nnUNet/
Args:
in_channels (int): Number of input channels.
n_class (int): Number of output layers.
kernels (list): Conv layer kernel sizes.
strides (list): Conv layer strides.
normalization_layer (str): Normalization layer type, one of (``"batch"``, ``"instance"``).
negative_slope (float): Leaky relu negative slope.
residual (bool): Use residual connections.
dimension (int): Filter dimensions.
"""
def __init__(
self,
in_channels,
n_class,
kernels,
strides,
normalization_layer,
negative_slope,
residual,
dimension,
):
super(UNet, self).__init__()
self.dim = dimension
self.n_class = n_class
self.residual = residual
self.negative_slope = negative_slope
self.norm = normalization_layer + f'norm{dimension}d'
self.filters = [min(2**(5 + i), 320 if dimension == 3 else 512) for i in range(len(strides))]
down_block = ResidBlock if self.residual else ConvBlock
self.input_block = self.get_conv_block(
conv_block=down_block,
in_channels=in_channels,
out_channels=self.filters[0],
kernel_size=kernels[0],
stride=strides[0],
)
self.downsamples = self.get_module_list(
conv_block=down_block,
in_channels=self.filters[:-1],
out_channels=self.filters[1:],
kernels=kernels[1:-1],
strides=strides[1:-1],
)
self.bottleneck = self.get_conv_block(
conv_block=down_block,
in_channels=self.filters[-2],
out_channels=self.filters[-1],
kernel_size=kernels[-1],
stride=strides[-1],
)
self.upsamples = self.get_module_list(
conv_block=UpsampleBlock,
in_channels=self.filters[1:][::-1],
out_channels=self.filters[:-1][::-1],
kernels=kernels[1:][::-1],
strides=strides[1:][::-1],
)
self.output_block = self.get_output_block(decoder_level=0)
self.apply(self.initialize_weights)
self.n_layers = len(self.upsamples) - 1
def forward(self, input_data):
out = self.input_block(input_data)
encoder_outputs = [out]
for downsample in self.downsamples:
out = downsample(out)
encoder_outputs.append(out)
out = self.bottleneck(out)
for idx, upsample in enumerate(self.upsamples):
out = upsample(out, encoder_outputs[self.n_layers - idx])
out = self.output_block(out)
return out
def get_conv_block(self, conv_block, in_channels, out_channels, kernel_size, stride):
return conv_block(
dim=self.dim,
stride=stride,
norm=self.norm,
kernel_size=kernel_size,
in_channels=in_channels,
out_channels=out_channels,
negative_slope=self.negative_slope,
)
def get_output_block(self, decoder_level):
return OutputBlock(in_channels=self.filters[decoder_level], out_channels=self.n_class, dim=self.dim)
def get_module_list(self, in_channels, out_channels, kernels, strides, conv_block):
layers = []
for in_channel, out_channel, kernel, stride in zip(in_channels, out_channels, kernels, strides):
conv_layer = self.get_conv_block(conv_block, in_channel, out_channel, kernel, stride)
layers.append(conv_layer)
return nn.ModuleList(layers)
def initialize_weights(self, module):
name = module.__class__.__name__.lower()
if name in ['conv2d']:
nn.init.kaiming_normal_(module.weight, a=self.negative_slope)
| composer-dev | composer/models/unet/model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A wrapper around `timm.create_model() <https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-model>`_
used to create :class:`.ComposerClassifier`."""
from composer.models.timm.model import composer_timm as composer_timm
__all__ = ['composer_timm']
| composer-dev | composer/models/timm/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A wrapper around `timm.create_model() <https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-model>`_
used to create :class:`.ComposerClassifier`."""
from typing import Optional
from composer.models.tasks import ComposerClassifier
from composer.utils.import_helpers import MissingConditionalImportError
__all__ = ['composer_timm']
def composer_timm(model_name: str,
pretrained: bool = False,
num_classes: int = 1000,
drop_rate: float = 0.0,
drop_path_rate: Optional[float] = None,
drop_block_rate: Optional[float] = None,
global_pool: Optional[str] = None,
bn_momentum: Optional[float] = None,
bn_eps: Optional[float] = None) -> ComposerClassifier:
"""A wrapper around `timm.create_model() <https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-
model>`_ used to create :class:`.ComposerClassifier`.
Args:
model_name (str): timm model name e.g: ``"resnet50"``. List of models can be found at
`PyTorch Image Models <https://github.com/rwightman/pytorch-image-models>`_.
pretrained (bool, optional): Imagenet pretrained. Default: ``False``.
num_classes (int, optional): The number of classes. Needed for classification tasks. Default: ``1000``.
drop_rate (float, optional): Dropout rate. Default: ``0.0``.
drop_path_rate (float, optional): Drop path rate (model default if ``None``). Default: ``None``.
drop_block_rate (float, optional): Drop block rate (model default if ``None``). Default: ``None``.
global_pool (str, optional): Global pool type, one of (``"fast"``, ``"avg"``, ``"max"``, ``"avgmax"``, ``"avgmaxc"``). Model default if ``None``. Default: ``None``.
bn_momentum (float, optional): BatchNorm momentum override (model default if ``None``). Default: ``None``.
bn_eps (float, optional): BatchNorm epsilon override (model default if ``None``). Default: ``None``.
Returns:
ComposerModel: instance of :class:`.ComposerClassifier` with the specified TIMM model.
Resnet18 Example:
.. testcode::
from composer.models import composer_timm
model = composer_timm(model_name='resnet18') # creates a timm resnet18
"""
try:
import timm
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='timm', conda_package='timm>=0.5.4',
conda_channel=None) from e
model = timm.create_model( # type: ignore (third-party)
model_name=model_name,
pretrained=pretrained,
num_classes=num_classes,
drop_rate=drop_rate,
drop_path_rate=drop_path_rate,
drop_block_rate=drop_block_rate,
global_pool=global_pool,
bn_momentum=bn_momentum,
bn_eps=bn_eps)
composer_model = ComposerClassifier(module=model)
return composer_model
| composer-dev | composer/models/timm/model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Callable, Optional
import torch
from torch import nn as nn
def round_channels(
channels: float,
width_multiplier: float,
divisor: int = 8,
min_value: Optional[int] = None,
) -> int:
"""Round number of channels after scaling with width multiplier.
This function ensures that channel integers halfway in-between divisors is rounded up.
Args:
channels (float): Number to round.
width_multiplier (float): Amount to scale `channels`.
divisor (int): Number to make the output divisible by.
min_value (int, optional): Minimum value the output can be. If not specified, defaults
to the ``divisor``.
"""
if not width_multiplier:
return int(channels)
channels *= width_multiplier
min_value = min_value or divisor
new_channels = max(min_value, int(channels + divisor / 2) // divisor * divisor)
if new_channels < 0.9 * channels: # increase channels if rounding decreases by >10%
new_channels += divisor
return new_channels
def calculate_same_padding(kernel_size, dilation, stride):
"""Calculates the amount of padding to use to get the "SAME" functionality in Tensorflow."""
return ((stride - 1) + dilation * (kernel_size - 1)) // 2
def drop_connect(inputs: torch.Tensor, drop_connect_rate: float, training: bool):
"""Randomly mask a set of samples. Provides similar regularization as stochastic depth.
Args:
input (torch.Tensor): Input tensor to mask.
drop_connect_rate (float): Probability of droppping each sample.
training (bool): Whether or not the model is training
"""
if not training:
return inputs
keep_prob = 1 - drop_connect_rate
rand_tensor = keep_prob + torch.rand(
[inputs.size()[0], 1, 1, 1],
dtype=inputs.dtype,
device=inputs.device,
)
rand_tensor.floor_() # binarize
output = inputs.div(keep_prob) * rand_tensor
return output
class SqueezeExcite(nn.Module):
"""Squeeze Excite Layer.
Args:
in_channels (int): Number of channels in the input tensor.
latent_channels (int): Number of hidden channels.
act_layer (torch.nn.Module): Activation layer to use in block.
"""
def __init__(
self,
in_channels: int,
latent_channels: int,
act_layer: Callable[..., nn.Module] = nn.ReLU,
):
super().__init__()
self.global_avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_channels, latent_channels, kernel_size=1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(latent_channels, in_channels, kernel_size=1, bias=True)
self.gate_fn = torch.nn.Sigmoid()
def forward(self, x: torch.Tensor):
out = self.global_avg_pool(x)
out = self.conv_reduce(out)
out = self.act1(out)
out = self.conv_expand(out)
out = x * self.gate_fn(out)
return out
class DepthwiseSeparableConv(nn.Module):
"""Depthwise Separable Convolution layer.
Args:
in_channels (int): Number of channels in the input tensor.
out_channels (int): Number of channels in the output tensor.
kernel_size (int): Size of the convolving kernel.
stride (int): Stride of the convolution.
se_ratio (float): How much to scale `in_channels` for the hidden layer
dimensionality of the squeeze-excite module.
drop_connect_rate (float): Probability of dropping a sample before the
identity connection, provides regularization similar to stochastic
depth.
act_layer (torch.nn.Module): Activation layer to use in block.
norm_kwargs (dict): Normalization layer's keyword arguments.
norm_layer (torch.nn.Module): Normalization layer to use in block.
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int,
se_ratio: float,
drop_connect_rate: float,
act_layer: Callable[..., nn.Module],
norm_kwargs: dict,
norm_layer: Callable[..., nn.Module] = nn.BatchNorm2d):
super().__init__()
self.drop_connect_rate = drop_connect_rate
self.has_residual = (in_channels == out_channels and stride == 1)
self.has_se = se_ratio is not None and se_ratio > 0.0
padding = calculate_same_padding(kernel_size, dilation=1, stride=stride)
self.conv_depthwise = nn.Conv2d(in_channels=in_channels,
out_channels=in_channels,
groups=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False)
self.bn1 = norm_layer(in_channels, **norm_kwargs)
self.act1 = act_layer(inplace=True)
if self.has_se:
latent_channels = max(1, int(in_channels * se_ratio))
self.se = SqueezeExcite(in_channels, latent_channels, act_layer)
self.conv_pointwise = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
bias=False,
)
self.bn2 = norm_layer(out_channels, **norm_kwargs)
self.act2 = act_layer(inplace=True)
def forward(self, input: torch.Tensor):
residual = input
out = self.conv_depthwise(input)
out = self.bn1(out)
out = self.act1(out)
if self.has_se:
out = self.se(out)
out = self.conv_pointwise(out)
out = self.bn2(out)
out = self.act2(out)
if self.has_residual:
if self.drop_connect_rate > 0.0:
out = drop_connect(out, self.drop_connect_rate, self.training)
out += residual
return out
class MBConvBlock(nn.Module):
"""Mobile Inverted Residual Bottleneck Block.
This block is implemented as as defined in
`MobileNetV2: Inverted Residuals and Linear Bottlenecks <https://arxiv.org/abs/1801.04381>`_ (Sandler et al, 2018).
Args:
in_channels (int): Number of channels in the input tensor.
out_channels (int): Number of channels in the output tensor.
kernel_size (int): Size of the convolving kernel.
stride (int): Stride of the convolution.
expand_ratio (int): How much to expand the input channels for the
depthwise convolution.
se_ratio (float): How much to scale `in_channels` for the hidden layer
dimensionality of the squeeze-excite module.
drop_connect_rate (float): Probability of dropping a sample before the
identity connection, provides regularization similar to stochastic
depth.
act_layer (torch.nn.Module): Activation layer to use in block.
norm_kwargs (dict): Normalization layer's keyword arguments.
norm_layer (torch.nn.Module): Normalization layer to use in block.
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int,
expand_ratio: int,
se_ratio: float,
drop_connect_rate: float,
act_layer: Callable[..., nn.Module],
norm_kwargs: dict,
norm_layer: Callable[..., nn.Module] = nn.BatchNorm2d):
super().__init__()
self.drop_connect_rate = drop_connect_rate
self.has_residual = (in_channels == out_channels and stride == 1)
self.has_se = se_ratio is not None and se_ratio > 0.0
mid_channels = round_channels(in_channels, expand_ratio)
# Point-wise convolution expansion
self.conv1x1_expand = nn.Conv2d(in_channels, mid_channels, kernel_size=1, bias=False)
self.bn1 = norm_layer(mid_channels, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Depth-wise Convolution
padding = calculate_same_padding(kernel_size, dilation=1, stride=stride)
self.conv_depthwise = nn.Conv2d(in_channels=mid_channels,
out_channels=mid_channels,
groups=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False)
self.bn2 = norm_layer(mid_channels, **norm_kwargs)
self.act2 = act_layer(inplace=True)
# Squeeze and Excitation layer, if specified
if self.has_se:
latent_channels = max(1, int(in_channels * se_ratio))
self.se = SqueezeExcite(mid_channels, latent_channels, act_layer)
# Point-wise convolution contraction
self.conv1x1_contract = nn.Conv2d(mid_channels, out_channels, kernel_size=1, bias=False)
self.bn3 = norm_layer(out_channels, **norm_kwargs)
def forward(self, input: torch.Tensor):
residual = input
out = self.conv1x1_expand(input)
out = self.bn1(out)
out = self.act1(out)
out = self.conv_depthwise(out)
out = self.bn2(out)
out = self.act2(out)
if self.has_se:
out = self.se(out)
out = self.conv1x1_contract(out)
out = self.bn3(out)
if self.has_residual:
if self.drop_connect_rate:
out = drop_connect(out, self.drop_connect_rate, self.training)
out += residual
return out
| composer-dev | composer/models/efficientnetb0/_layers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The EfficientNet model family is a set of convolutional neural networks that can be used as the basis for a variety
of vision tasks, but were initially designed for image classification. The model family was designed to reach the
highest accuracy for a given computation budget during inference by simultaneously scaling model depth, model width, and
image resolution according to an empirically determined scaling law.
See the :doc:`Model Card </model_cards/efficientnet>` for more details.
"""
from composer.models.efficientnetb0.model import composer_efficientnetb0 as composer_efficientnetb0
__all__ = ['composer_efficientnetb0']
_task = 'Image Classification'
_dataset = 'ImageNet'
_name = 'EfficientNet-B0'
_quality = '76.63'
_metric = 'Top-1 Accuracy'
_ttt = '21h 48m'
_hparams = 'efficientnetb0.yaml'
| composer-dev | composer/models/efficientnetb0/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A :class:`.ComposerClassifier` wrapper around the EfficientNet-b0 architecture."""
from composer.models.efficientnetb0.efficientnets import EfficientNet
from composer.models.tasks import ComposerClassifier
__all__ = ['composer_efficientnetb0']
def composer_efficientnetb0(num_classes: int = 1000, drop_connect_rate: float = 0.2) -> ComposerClassifier:
"""Helper function to create a :class:`.ComposerClassifier` with an EfficientNet-b0 architecture.
See `Rethinking Model Scaling for Convolutional Neural Networks <https://arxiv.org/abs/1905.11946>`_
(Tan et al, 2019) for more details.
Args:
num_classes (int, optional): The number of classes. Needed for classification tasks. Default: ``1000``.
drop_connect_rate (float, optional): Probability of dropping a sample within a block before identity
connection. Default: ``0.2``.
Returns:
ComposerModel: instance of :class:`.ComposerClassifier` with a EfficientNet-B0 model.
Example:
.. testcode::
from composer.models import composer_efficientnetb0
model = composer_efficientnetb0() # creates EfficientNet-b0 for image classification
"""
model = EfficientNet.get_model_from_name(model_name='efficientnet-b0',
num_classes=num_classes,
drop_connect_rate=drop_connect_rate)
composer_model = ComposerClassifier(module=model)
return composer_model
| composer-dev | composer/models/efficientnetb0/model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""EfficientNet model.
Adapted from `(Generic) EfficientNets for PyTorch. <https://github.com/rwightman/gen-efficientnet-pytorch>`_.
"""
import math
import re
from typing import Callable, Optional
import torch
import torch.nn as nn
from composer.models.efficientnetb0._layers import (DepthwiseSeparableConv, MBConvBlock, calculate_same_padding,
round_channels)
__all__ = ['EfficientNet']
class EfficientNet(nn.Module):
"""EfficientNet model based on (`Tan et al, 2019 <https://arxiv.org/abs/1905.11946>`_).
Args:
num_classes (int): Size of the EfficientNet output, typically viewed
as the number of classes in a classification task.
width_multiplier (float, optional): How much to scale the EfficientNet-B0 channel
dimension throughout the model. Default: ``1.0``.
depth_multiplier (float, optional): How much to scale the EFficientNet-B0 depth. Default: ``1.0``.
drop_rate (float, optional): Dropout probability for the penultimate activations. Default: ``0.2``.
drop_connect_rate (float, optional): Probability of dropping a sample before the
identity connection, provides regularization similar to stochastic
depth. Default: ``0.2``.
act_layer (torch.nn.Module, optional): Activation layer to use in the model. Default: ``nn.SiLU``.
norm_kwargs (dict, optional): Normalization layer's keyword arguments. Default: ``{"momentum": 0.1, "eps": 1e-5}``.
norm_layer (torch.nn.Module, optional): Normalization layer to use in the model. Default: ``nn.BatchNorm2d``.
"""
# EfficientNet-B0 architecture specification.
# block_strings are decoded into block level hyperparameters.
# r=repeat, k=kernel_size, s=stride, e=expand_ratio, i=in_channels, o=out_channels, se=se_ratio.
_blocks_strings = [
'r1_k3_s1_e1_i32_o16_se0.25',
'r2_k3_s2_e6_i16_o24_se0.25',
'r2_k5_s2_e6_i24_o40_se0.25',
'r3_k3_s2_e6_i40_o80_se0.25',
'r3_k5_s1_e6_i80_o112_se0.25',
'r4_k5_s2_e6_i112_o192_se0.25',
'r1_k3_s1_e6_i192_o320_se0.25',
]
def __init__(self,
num_classes: int,
width_multiplier: float = 1.0,
depth_multiplier: float = 1.0,
drop_rate: float = 0.2,
drop_connect_rate: float = 0.2,
act_layer: Callable[..., nn.Module] = nn.SiLU,
norm_kwargs: Optional[dict] = None,
norm_layer: Callable[..., nn.Module] = nn.BatchNorm2d):
super(EfficientNet, self).__init__()
self.num_classes = num_classes
if norm_kwargs is None:
norm_kwargs = {'momentum': 0.1, 'eps': 1e-5}
in_channels = 3
out_channels = round_channels(32, width_multiplier)
padding = calculate_same_padding(kernel_size=3, dilation=1, stride=2)
self.conv_stem = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=2,
padding=padding,
bias=False,
)
self.bn1 = norm_layer(num_features=out_channels, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Count the number of blocks in the model
block_count = 0.
for block_string in self._blocks_strings:
_, num_repeat = self._decode_block_string(block_string)
block_count += num_repeat
# Decode block strings and add blocks
block_idx = 0.
blocks = []
block_args = {}
for block_string in self._blocks_strings:
block_args, num_repeat = self._decode_block_string(block_string)
# Scale channels and number of repeated blocks based on multipliers
block_args['in_channels'] = round_channels(
block_args['in_channels'],
width_multiplier,
)
block_args['out_channels'] = round_channels(
block_args['out_channels'],
width_multiplier,
)
num_repeat = int(math.ceil(depth_multiplier * num_repeat))
# Add activation, normalization layers, and drop connect
block_args['act_layer'] = act_layer
block_args['norm_kwargs'] = norm_kwargs
block_args['norm_layer'] = norm_layer
# Delete expand_ratio when set to 1 to use depthwise separable convolution layer
if block_args['expand_ratio'] == 1:
del block_args['expand_ratio']
for i in range(num_repeat):
# Linearly decay drop_connect_rate across model depth
block_args['drop_connect_rate'] = drop_connect_rate * block_idx / block_count
if 'expand_ratio' not in block_args:
blocks.append(DepthwiseSeparableConv(**block_args))
else:
blocks.append(MBConvBlock(**block_args))
block_idx += 1
# Only the first block in a stage can have stride != 1
if i == 0:
block_args['stride'] = 1
block_args['in_channels'] = block_args['out_channels']
self.blocks = nn.Sequential(*blocks)
in_channels = block_args['out_channels']
out_channels = round_channels(1280, width_multiplier)
self.conv_head = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self.bn2 = norm_layer(out_channels, **norm_kwargs)
self.act2 = act_layer(inplace=True)
self.global_avg_pool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(drop_rate)
self.classifier = nn.Linear(out_channels, num_classes)
# Initialization from gen-efficientnet-pytorch repo
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
fan_out = (m.kernel_size[0] * m.kernel_size[1] * m.out_channels) // m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, torch.nn.Linear):
fan_out = m.weight.size(0)
init_range = 1.0 / math.sqrt(fan_out)
m.weight.data.uniform_(-init_range, init_range)
m.bias.data.zero_()
def extract_features(self, input: torch.Tensor):
out = self.conv_stem(input)
out = self.bn1(out)
out = self.act1(out)
out = self.blocks(out)
out = self.conv_head(out)
out = self.bn2(out)
out = self.act2(out)
out = self.global_avg_pool(out)
return out.flatten(1)
def forward(self, input: torch.Tensor):
out = self.extract_features(input)
out = self.dropout(out)
return self.classifier(out)
@staticmethod
def get_model_from_name(model_name: str, num_classes, drop_connect_rate: float):
"""Instantiate an EfficientNet model family member based on the model_name string.
Args:
model_name: (str): One of ``'efficientnet-b0'`` through ``'efficientnet-b7'``.
num_classes (int): Size of the EfficientNet output, typically viewed as the number of classes in a classification task.
drop_connect_rate (float): Probability of dropping a sample before the identity connection,
provides regularization similar to stochastic depth.
"""
# Coefficients: width, depth, res, dropout
model_arch = {
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
}
model_params = model_arch[model_name]
width_multiplier = model_params[0]
depth_multiplier = model_params[1]
drop_rate = model_params[3]
return EfficientNet(num_classes=num_classes,
width_multiplier=width_multiplier,
depth_multiplier=depth_multiplier,
drop_rate=drop_rate,
drop_connect_rate=drop_connect_rate)
def _decode_block_string(self, block_string: str):
"""Decodes an EfficientNet block specification string into a dictionary of keyword arguments for a block in the
architecture."""
arg_strings = block_string.split('_')
args = {}
for arg_string in arg_strings:
splits = re.split(r'(\d.*)', arg_string)
if len(splits) >= 2:
key, value = splits[:2]
args[key] = value
num_repeat = int(args['r'])
block_args = {
'kernel_size': int(args['k']),
'stride': int(args['s']),
'expand_ratio': int(args['e']),
'in_channels': int(args['i']),
'out_channels': int(args['o']),
'se_ratio': float(args['se']) if 'se' in args else None,
}
return block_args, num_repeat
| composer-dev | composer/models/efficientnetb0/efficientnets.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The `BERT <https://huggingface.co/docs/transformers/master/en/model_doc/bert>`_ model family using `Hugging Face
Transformers <https://huggingface.co/transformers/>`_."""
from composer.models.bert.model import create_bert_classification as create_bert_classification
from composer.models.bert.model import create_bert_mlm as create_bert_mlm
__all__ = ['create_bert_classification', 'create_bert_mlm']
| composer-dev | composer/models/bert/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Implements a BERT wrapper around a :class:`.ComposerTransformer`."""
from __future__ import annotations
from typing import Optional
from torchmetrics import MeanSquaredError
from torchmetrics.classification import MatthewsCorrCoef, MulticlassAccuracy
from torchmetrics.regression import SpearmanCorrCoef
from composer.metrics.nlp import BinaryF1Score, LanguageCrossEntropy, MaskedAccuracy
from composer.models.huggingface import HuggingFaceModel
from composer.utils.import_helpers import MissingConditionalImportError
__all__ = ['create_bert_mlm', 'create_bert_classification']
def create_bert_mlm(use_pretrained: Optional[bool] = False,
pretrained_model_name: Optional[str] = None,
model_config: Optional[dict] = None,
tokenizer_name: Optional[str] = None,
gradient_checkpointing: Optional[bool] = False):
"""BERT model based on |:hugging_face:| Transformers.
For more information, see `Transformers <https://huggingface.co/transformers/>`_.
Args:
gradient_checkpointing (bool, optional): Use gradient checkpointing. Default: ``False``.
use_pretrained (bool, optional): Whether to initialize the model with the pretrained weights. Default: ``False``.
model_config (dict): The settings used to create a Hugging Face BertConfig. BertConfig is used to specify the
architecture of a Hugging Face model.
tokenizer_name (transformers.BertTokenizer, optional): Tokenizer name used to preprocess the dataset
and validate the models inputs.
.. code-block::
{
"_name_or_path": "bert-base-uncased",
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": 0.1,
"classifier_dropout": null,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"position_embedding_type": "absolute",
"transformers_version": "4.16.0",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 30522
}
To create a BERT model for Masked Language Model pretraining:
.. testcode::
from composer.models import create_bert_mlm
model = create_bert_mlm()
"""
try:
import transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e
if not model_config:
model_config = {}
if not pretrained_model_name:
pretrained_model_name = 'bert-base-uncased'
if use_pretrained:
assert transformers.AutoModelForMaskedLM.from_pretrained is not None, 'AutoModelForMaskedLM has from_pretrained method'
model = transformers.AutoModelForMaskedLM.from_pretrained(pretrained_model_name_or_path=pretrained_model_name,
**model_config)
else:
config = transformers.AutoConfig.from_pretrained(pretrained_model_name, **model_config)
assert transformers.AutoModelForMaskedLM.from_config is not None, 'AutoModelForMaskedLM has from_config method'
model = transformers.AutoModelForMaskedLM.from_config(config)
if gradient_checkpointing:
model.gradient_checkpointing_enable() # type: ignore
# setup the tokenizer
if tokenizer_name:
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name)
else:
tokenizer = None
metrics = [LanguageCrossEntropy(ignore_index=-100), MaskedAccuracy(ignore_index=-100)]
return HuggingFaceModel(model=model, tokenizer=tokenizer, use_logits=True, metrics=metrics)
def create_bert_classification(num_labels: int = 2,
use_pretrained: bool = False,
pretrained_model_name: Optional[str] = None,
model_config: Optional[dict] = None,
tokenizer_name: Optional[str] = None,
gradient_checkpointing: bool = False):
"""BERT classification model based on |:hugging_face:| Transformers.
For more information, see `Transformers <https://huggingface.co/transformers/>`_.
Args:
num_labels (int, optional): The number of classes in the classification task. Default: ``2``.
gradient_checkpointing (bool, optional): Use gradient checkpointing. Default: ``False``.
use_pretrained (bool, optional): Whether to initialize the model with the pretrained weights. Default: ``False``.
model_config (dict): The settings used to create a Hugging Face BertConfig. BertConfig is used to specify the
architecture of a Hugging Face model.
tokenizer_name (str, optional): Tokenizer name used to preprocess the dataset
and validate the models inputs.
.. code-block::
{
"_name_or_path": "bert-base-uncased",
"architectures": [
"BertForSequenceClassification
],
"attention_probs_dropout_prob": 0.1,
"classifier_dropout": null,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1",
"2": "LABEL_2"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1,
"LABEL_2": 2
},
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"position_embedding_type": "absolute",
"transformers_version": "4.16.0",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 30522
}
To create a BERT model for classification:
.. testcode::
from composer.models import create_bert_classification
model = create_bert_classification(num_labels=3) # if the task has three classes.
Note:
This function can be used to construct a BERT model for regression by setting ``num_labels == 1``.
This will have two noteworthy effects. First, it will switch the training loss to :class:`~torch.nn.MSELoss`.
Second, the returned :class:`.ComposerModel`'s train/validation metrics will be :class:`~torchmetrics.MeanSquaredError` and :class:`~torchmetrics.SpearmanCorrCoef`.
For the classification case (when ``num_labels > 1``), the training loss is :class:`~torch.nn.CrossEntropyLoss`, and the train/validation
metrics are :class:`~torchmetrics.MulticlassAccuracy` and :class:`~torchmetrics.MatthewsCorrCoef`, as well as :class:`.BinaryF1Score` if ``num_labels == 2``.
"""
try:
import transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e
if not model_config:
model_config = {}
model_config['num_labels'] = num_labels
if not pretrained_model_name:
pretrained_model_name = 'bert-base-uncased'
if use_pretrained:
assert transformers.AutoModelForSequenceClassification.from_pretrained is not None, 'AutoModelForSequenceClassification has from_pretrained method'
model = transformers.AutoModelForSequenceClassification.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name, **model_config)
else:
config = transformers.AutoConfig.from_pretrained(pretrained_model_name, **model_config)
assert transformers.AutoModelForSequenceClassification.from_config is not None, 'AutoModelForSequenceClassification has from_config method'
model = transformers.AutoModelForSequenceClassification.from_config(config)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
# setup the tokenizer
if tokenizer_name:
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name)
else:
tokenizer = None
if num_labels == 1:
# Metrics for a regression model
metrics = [MeanSquaredError(), SpearmanCorrCoef()]
else:
# Metrics for a classification model
metrics = [
MulticlassAccuracy(num_classes=num_labels, average='micro'),
MatthewsCorrCoef(task='multiclass', num_classes=num_labels)
]
if num_labels == 2:
metrics.append(BinaryF1Score())
return HuggingFaceModel(model=model, tokenizer=tokenizer, use_logits=True, metrics=metrics)
| composer-dev | composer/models/bert/model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A simple example convolutional neural network which can be used to classify MNIST data."""
from composer.models.classify_mnist.model import mnist_model as mnist_model
__all__ = ['mnist_model']
_task = 'Image Classification'
_dataset = 'MNIST'
_name = 'SimpleConvNet'
_quality = ''
_metric = 'MulticlassAccuracy'
_ttt = '?'
_hparams = 'classify_mnist_cpu.yaml'
| composer-dev | composer/models/classify_mnist/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A simple convolutional neural network extending :class:`.ComposerClassifier`."""
from typing import List, Optional, Sequence, Union
import torch
import torch.nn as nn
from torch.nn import functional as F
from composer.models.initializers import Initializer
from composer.models.tasks import ComposerClassifier
__all__ = ['Model', 'mnist_model']
class Model(nn.Module):
"""Toy convolutional neural network architecture in pytorch for MNIST."""
def __init__(self, initializers: Sequence[Union[str, Initializer]], num_classes: int = 10):
super().__init__()
self.num_classes = num_classes
for initializer in initializers:
initializer = Initializer(initializer)
self.apply(initializer.get_initializer())
self.conv1 = nn.Conv2d(1, 16, (3, 3), padding=0)
self.conv2 = nn.Conv2d(16, 32, (3, 3), padding=0)
self.bn = nn.BatchNorm2d(32)
self.fc1 = nn.Linear(32 * 16, 32)
self.fc2 = nn.Linear(32, num_classes)
def forward(self, x):
out = self.conv1(x)
out = F.relu(out)
out = self.conv2(out)
out = self.bn(out)
out = F.relu(out)
out = F.adaptive_avg_pool2d(out, (4, 4))
out = torch.flatten(out, 1, -1)
out = self.fc1(out)
out = F.relu(out)
return self.fc2(out)
def mnist_model(num_classes: int = 10, initializers: Optional[List[Initializer]] = None):
"""Helper function to create a :class:`.ComposerClassifier` with a simple convolutional neural network.
Args:
num_classes (int, optional): The number of classes. Needed for classification tasks. Default: ``10``
initializers (List[Initializer], optional): list of Initializers
for the model. ``None`` for no initialization. Default: ``None``
Returns:
ComposerModel: instance of :class:`.ComposerClassifier` with a simple MNIST model.
Example:
.. testcode::
from composer.models import mnist_model
model = mnist_model()
"""
if initializers is None:
initializers = []
model = Model(initializers, num_classes)
composer_model = ComposerClassifier(module=model)
return composer_model
| composer-dev | composer/models/classify_mnist/model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A ResNet model family adapted for CIFAR10 image sizes.
See the :doc:`Model Card </model_cards/cifar_resnet>` for more details.
"""
from composer.models.resnet_cifar.model import composer_resnet_cifar as composer_resnet_cifar
__all__ = ['composer_resnet_cifar']
_metadata = {
'resnet9': {
'_task': 'Image Classification',
'_dataset': 'CIFAR10',
'_name': 'ResNet9',
'_quality': 'tbd',
'_metric': 'Top-1 Accuracy',
'_ttt': 'tbd',
'_hparams': 'resnet9_cifar10.yaml'
},
'resnet20': {
'_task': 'Image Classification',
'_dataset': 'CIFAR10',
'_name': 'ResNet20',
'_quality': 'tbd',
'_metric': 'Top-1 Accuracy',
'_ttt': 'tbd',
'_hparams': 'resnet20_cifar10.yaml'
},
'resnet56': {
'_task': 'Image Classification',
'_dataset': 'CIFAR10',
'_name': 'ResNet56',
'_quality': '93.1',
'_metric': 'Top-1 Accuracy',
'_ttt': '35m',
'_hparams': 'resnet56_cifar10.yaml'
}
}
| composer-dev | composer/models/resnet_cifar/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""ResNet models for CIFAR extending :class:`.ComposerClassifier`."""
from typing import List, Optional
from composer.models.initializers import Initializer
from composer.models.resnet_cifar.resnets import ResNet9, ResNetCIFAR
from composer.models.tasks import ComposerClassifier
__all__ = ['composer_resnet_cifar']
def composer_resnet_cifar(model_name: str,
num_classes: int = 10,
initializers: Optional[List[Initializer]] = None) -> ComposerClassifier:
"""Helper function to create a :class:`.ComposerClassifier` with a CIFAR ResNet models.
From `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_ (He et al, 2015).
ResNet9 is based on the model from myrtle.ai `blog`_.
Args:
model_name (str): ``"resnet_9"``, ``"resnet_20"``, or ``"resnet_56"``.
num_classes (int, optional): The number of classes. Needed for classification tasks. Default: ``10``.
initializers (List[Initializer], optional): Initializers for the model. ``None`` for no initialization.
Default: ``None``.
Returns:
ComposerModel: instance of :class:`.ComposerClassifier` with a CIFAR ResNet model.
Example:
.. testcode::
from composer.models import composer_resnet_cifar
model = composer_resnet_cifar(model_name="resnet_56") # creates a resnet56 for cifar image classification
.. _blog: https://myrtle.ai/learn/how-to-train-your-resnet-4-architecture/
"""
if initializers is None:
initializers = []
if model_name == 'resnet_9':
model = ResNet9(num_classes) # current initializers don't work with this architecture.
else:
model = ResNetCIFAR.get_model_from_name(model_name, initializers, num_classes)
composer_model = ComposerClassifier(module=model, num_classes=num_classes)
return composer_model
| composer-dev | composer/models/resnet_cifar/model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The CIFAR ResNet torch module.
See the :doc:`Model Card </model_cards/resnet>` for more details.
"""
# Code below adapted from https://github.com/facebookresearch/open_lth
# and https://github.com/pytorch/vision
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.resnet import BasicBlock
from composer.models import Initializer
__all__ = ['ResNetCIFAR', 'ResNet9']
class ResNetCIFAR(nn.Module):
"""A residual neural network as originally designed for CIFAR-10."""
class Block(nn.Module):
"""A ResNet block."""
def __init__(self, f_in: int, f_out: int, downsample: bool = False):
super(ResNetCIFAR.Block, self).__init__()
stride = 2 if downsample else 1
self.conv1 = nn.Conv2d(f_in, f_out, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(f_out)
self.conv2 = nn.Conv2d(f_out, f_out, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(f_out)
self.relu = nn.ReLU(inplace=True)
# No parameters for shortcut connections.
if downsample or f_in != f_out:
self.shortcut = nn.Sequential(
nn.Conv2d(f_in, f_out, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(f_out),
)
else:
self.shortcut = nn.Sequential()
def forward(self, x: torch.Tensor):
out = self.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
return self.relu(out)
def __init__(self, plan: List[Tuple[int, int]], initializers: List[Initializer], outputs: int = 10):
super(ResNetCIFAR, self).__init__()
outputs = outputs or 10
self.num_classes = outputs
# Initial convolution.
current_filters = plan[0][0]
self.conv = nn.Conv2d(3, current_filters, kernel_size=3, stride=1, padding=1, bias=False)
self.bn = nn.BatchNorm2d(current_filters)
self.relu = nn.ReLU(inplace=True)
# The subsequent blocks of the ResNet.
blocks = []
for segment_index, (filters, num_blocks) in enumerate(plan):
for block_index in range(num_blocks):
downsample = segment_index > 0 and block_index == 0
blocks.append(ResNetCIFAR.Block(current_filters, filters, downsample))
current_filters = filters
self.blocks = nn.Sequential(*blocks)
# Final fc layer. Size = number of filters in last segment.
self.fc = nn.Linear(plan[-1][0], outputs)
self.criterion = nn.CrossEntropyLoss()
for initializer in initializers:
initializer = Initializer(initializer)
self.apply(initializer.get_initializer())
def forward(self, x: torch.Tensor):
out = self.relu(self.bn(self.conv(x)))
out = self.blocks(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
@staticmethod
def is_valid_model_name(model_name: str):
valid_model_names = [f'resnet_{layers}' for layers in (20, 56)]
return (model_name in valid_model_names)
@staticmethod
def get_model_from_name(model_name: str, initializers: List[Initializer], outputs: int = 10):
"""The naming scheme for a ResNet is ``'resnet_D[_W]'``.
D is the model depth (e.g. ``'resnet_56'``)
"""
if not ResNetCIFAR.is_valid_model_name(model_name):
raise ValueError('Invalid model name: {}'.format(model_name))
depth = int(model_name.split('_')[-1]) # for resnet56, depth 56, width 16
if len(model_name.split('_')) == 2:
width = 16
else:
width = int(model_name.split('_')[3])
if (depth - 2) % 3 != 0:
raise ValueError('Invalid ResNetCIFAR depth: {}'.format(depth))
num_blocks = (depth - 2) // 6
model_arch = {
56: [(width, num_blocks), (2 * width, num_blocks), (4 * width, num_blocks)],
20: [(width, num_blocks), (2 * width, num_blocks), (4 * width, num_blocks)],
}
return ResNetCIFAR(model_arch[depth], initializers, outputs)
# adapted from https://raw.githubusercontent.com/matthias-wright/cifar10-resnet/master/model.py
# under the MIT license
class ResNet9(nn.Module):
"""A 9-layer residual network, excluding BatchNorms and activation functions.
Based on the myrtle.ai `blog`_ and Deep Residual Learning for Image Recognition (`He et al, 2015`_).
Args:
num_classes (int, optional): The number of classes. Needed for classification tasks. Default: ``10``.
.. _blog: https://myrtle.ai/learn/how-to-train-your-resnet-4-architecture/
.. _He et al, 2015: https://arxiv.org/abs/1512.03385
"""
def __init__(self, num_classes: int = 10):
super().__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(num_features=64, momentum=0.9),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(num_features=128, momentum=0.9),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
BasicBlock(inplanes=128, planes=128, stride=1),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(num_features=256, momentum=0.9),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(num_features=256, momentum=0.9),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
BasicBlock(inplanes=256, planes=256, stride=1),
)
self.fc = nn.Linear(in_features=256, out_features=num_classes, bias=True)
def forward(self, x):
out = self.body(x)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
| composer-dev | composer/models/resnet_cifar/resnets.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The GPT-2 model family is set of transformer-based networks for autoregressive language modeling at various scales.
This family was originally proposed by OpenAI, and is trained on the OpenWebText dataset. It is useful for downstream
language generation tasks, such as summarization, translation, and dialog.
See the :doc:`Model Card </model_cards/GPT2>` for more details.
"""
from composer.models.gpt2.model import create_gpt2 as create_gpt2
__all__ = ['create_gpt2']
_metadata = {
'gpt2': {
'_task': 'Language Modeling',
'_dataset': 'OpenWebText',
'_name': 'GPT-2 52M',
'_quality': '30.88',
'_metric': 'Perplexity',
'_ttt': '02:44',
'_hparams': 'gpt2_52m.yaml'
},
'gpt2 -- TODO RENAME TO GPT2': {
'_task': 'Language Modeling',
'_dataset': 'OpenWebText',
'_name': 'GPT-2 83M',
'_quality': '26.57',
'_metric': 'Perplexity',
'_ttt': '04:52',
'_hparams': 'gpt2_83m.yaml'
},
'gpt2 --! TODO RENAME TO GPT2': {
'_task': 'Language Modeling',
'_dataset': 'OpenWebText',
'_name': 'GPT-2 125M',
'_quality': '24.04',
'_metric': 'Perplexity',
'_ttt': '08:25',
'_hparams': 'gpt2_125m.yaml'
}
}
| composer-dev | composer/models/gpt2/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""GPT-2 model based on `Hugging Face GPT-2 <https://huggingface.co/docs/transformers/master/en/model_doc/gpt2>`_.
Implemented as a wrapper using :class:`.ComposerTrainer`.
"""
from __future__ import annotations
from typing import Optional
from composer.metrics.nlp import LanguageCrossEntropy, LanguagePerplexity
from composer.models.huggingface import HuggingFaceModel
from composer.utils.import_helpers import MissingConditionalImportError
__all__ = ['create_gpt2']
def create_gpt2(use_pretrained: Optional[bool] = False,
pretrained_model_name: Optional[str] = None,
model_config: Optional[dict] = None,
tokenizer_name: Optional[str] = None,
gradient_checkpointing: Optional[bool] = False):
"""Implements :class:`~composer.models.huggingface.HuggingFaceModel` to wrap `Hugging Face GPT-2 \
transformers <https://huggingface.co/docs/transformers/master/en/model_doc/gpt2#overview>`_. Logs training and
validation perplexity.
From `Language Models are Unsupervised Multitask Learners <https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf>`_ (Radford et al, 2018).
Args:
gradient_checkpointing (bool, optional): Use gradient checkpointing. Default: ``False``.
use_pretrained (bool, optional): Whether to initialize the model with the pretrained weights. Default: ``False``.
model_config (dict): A dictionary providing a HuggingFace model configuration.
tokenizer_name (str, optional): Tokenizer name used to preprocess the dataset
and validate the models inputs.
.. code-block::
{
"_name_or_path": "gpt2",
"activation_function": "gelu_new",
"architectures": ["GPT2LMHeadModel"],
"attn_pdrop": 0.1,
"bos_token_id": 50256,
"embd_pdrop": 0.1,
"eos_token_id": 50256,
"initializer_range": 0.02,
"layer_norm_epsilon": 1e-05,
"model_type": "gpt2",
"n_ctx": 1024,
"n_embd": 768,
"n_head": 12,
"n_inner": null,
"n_layer": 12,
"n_positions": 1024,
"reorder_and_upcast_attn": false,
"resid_pdrop": 0.1,
"scale_attn_by_inverse_layer_idx": false,
"scale_attn_weights": true,
"summary_activation": null,
"summary_first_dropout": 0.1,
"summary_proj_to_labels": true,
"summary_type": "cls_index",
"summary_use_proj": true,
"task_specific_params": {
"text-generation": {
"do_sample": true,
"max_length": 50 }
},
"transformers_version": "4.16.0",
"use_cache": true,
"vocab_size": 50257
}
To create a GPT-2 model for language modeling pretraining:
.. testcode::
from composer.models import create_gpt2
composer_model = create_gpt2()
"""
try:
import transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e
if not model_config:
model_config = {}
if not pretrained_model_name:
pretrained_model_name = 'gpt2'
if use_pretrained:
assert transformers.AutoModelForCausalLM.from_pretrained is not None, 'AutoModelForCausalLM has from_pretrained method'
model = transformers.AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=pretrained_model_name,
**model_config)
else:
config = transformers.AutoConfig.from_pretrained(pretrained_model_name, **model_config)
assert transformers.AutoModelForCausalLM.from_config is not None, 'AutoModelForCausalLM has from_config method'
model = transformers.AutoModelForCausalLM.from_config(config)
if gradient_checkpointing:
model.gradient_checkpointing_enable() # type: ignore
# setup the tokenizer
if tokenizer_name:
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name)
else:
tokenizer = None
return HuggingFaceModel(model=model,
tokenizer=tokenizer,
metrics=[LanguageCrossEntropy(), LanguagePerplexity()],
use_logits=True)
| composer-dev | composer/models/gpt2/model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""DeepLabV3 for image segmentation."""
from composer.models.deeplabv3.model import composer_deeplabv3 as composer_deeplabv3
__all__ = ['composer_deeplabv3']
| composer-dev | composer/models/deeplabv3/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""DeepLabV3 model extending :class:`.ComposerClassifier`."""
import functools
import textwrap
import warnings
from typing import Dict, Optional, Sequence
import torch
import torch.distributed as torch_dist
import torch.nn.functional as F
import torchvision
from packaging import version
from torchmetrics import MetricCollection
from torchvision.models import _utils, resnet
from composer.loss import DiceLoss, soft_cross_entropy
from composer.metrics import CrossEntropy, MIoU
from composer.models.initializers import Initializer
from composer.models.tasks import ComposerClassifier
from composer.utils import dist
__all__ = ['deeplabv3', 'composer_deeplabv3']
class SimpleSegmentationModel(torch.nn.Module):
def __init__(self, backbone, classifier):
super().__init__()
self.backbone = backbone
self.classifier = classifier
def forward(self, x):
input_shape = x.shape[-2:]
features = self.backbone(x)
logits = self.classifier(tuple(features.values()))
logits = F.interpolate(logits,
size=input_shape,
mode='bilinear',
align_corners=False,
recompute_scale_factor=False)
return logits
def deeplabv3(num_classes: int,
backbone_arch: str = 'resnet101',
backbone_weights: Optional[str] = None,
sync_bn: bool = True,
use_plus: bool = True,
initializers: Sequence[Initializer] = ()):
"""Helper function to build a mmsegmentation DeepLabV3 model.
Args:
num_classes (int): Number of classes in the segmentation task.
backbone_arch (str, optional): The architecture to use for the backbone. Must be either
[``'resnet50'``, ``'resnet101'``]. Default: ``'resnet101'``.
backbone_weights (str, optional): If specified, the PyTorch pre-trained weights to load for the backbone.
Currently, only ['IMAGENET1K_V1', 'IMAGENET1K_V2'] are supported. Default: ``None``.
sync_bn (bool, optional): If ``True``, replace all BatchNorm layers with SyncBatchNorm layers.
Default: ``True``.
use_plus (bool, optional): If ``True``, use DeepLabv3+ head instead of DeepLabv3. Default: ``True``.
initializers (Sequence[Initializer], optional): Initializers for the model. ``()`` for no initialization.
Default: ``()``.
Returns:
deeplabv3: A DeepLabV3 :class:`torch.nn.Module`.
Example:
.. code-block:: python
from composer.models.deeplabv3.deeplabv3 import deeplabv3
pytorch_model = deeplabv3(num_classes=150, backbone_arch='resnet101', backbone_weights=None)
"""
# check that the specified architecture is in the resnet module
if not hasattr(resnet, backbone_arch):
raise ValueError(f'backbone_arch must be part of the torchvision resnet module, got value: {backbone_arch}')
# change the model weight url if specified
if version.parse(torchvision.__version__) < version.parse('0.13.0'):
pretrained = False
if backbone_weights:
pretrained = True
if backbone_weights == 'IMAGENET1K_V1':
resnet.model_urls[backbone_arch] = 'https://download.pytorch.org/models/resnet101-63fe2227.pth'
elif backbone_weights == 'IMAGENET1K_V2':
resnet.model_urls[backbone_arch] = 'https://download.pytorch.org/models/resnet101-cd907fc2.pth'
else:
ValueError(
textwrap.dedent(f"""\
`backbone_weights` must be either "IMAGENET1K_V1" or "IMAGENET1K_V2"
if torchvision.__version__ < 0.13.0. `backbone_weights` was {backbone_weights}."""))
backbone = getattr(resnet, backbone_arch)(pretrained=pretrained,
replace_stride_with_dilation=[False, True, True])
else:
backbone = getattr(resnet, backbone_arch)(weights=backbone_weights,
replace_stride_with_dilation=[False, True, True])
# specify which layers to extract activations from
return_layers = {'layer1': 'layer1', 'layer4': 'layer4'} if use_plus else {'layer4': 'layer4'}
backbone = _utils.IntermediateLayerGetter(backbone, return_layers=return_layers)
try:
from mmseg.models import ASPPHead, DepthwiseSeparableASPPHead
except ImportError as e:
raise ImportError(
textwrap.dedent("""\
Either mmcv or mmsegmentation is not installed. To install mmcv, please run pip install mmcv-full==1.4.4 -f
https://download.openmmlab.com/mmcv/dist/{cu_version}/{torch_version}/index.html where {cu_version} and
{torch_version} refer to your CUDA and PyTorch versions, respectively. To install mmsegmentation, please
run pip install mmsegmentation==0.22.0 on command-line.""")) from e
world_size = dist.get_world_size()
if sync_bn and world_size == 1:
warnings.warn('sync_bn was true, but only one process is present for training. sync_bn will be ignored.')
norm_type = 'SyncBN' if sync_bn and world_size > 1 else 'BN'
norm_cfg = {'type': norm_type, 'requires_grad': True}
if use_plus:
# mmseg config:
# https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/models/deeplabv3plus_r50-d8.py
head = DepthwiseSeparableASPPHead(in_channels=2048,
in_index=-1,
channels=512,
dilations=(1, 12, 24, 36),
c1_in_channels=256,
c1_channels=48,
dropout_ratio=0.1,
num_classes=num_classes,
norm_cfg=norm_cfg,
align_corners=False)
else:
# mmseg config:
# https://github.com/open-mmlab/mmsegmentation/blob/master/configs/_base_/models/deeplabv3_r50-d8.py
head = ASPPHead(in_channels=2048,
in_index=-1,
channels=512,
dilations=(1, 12, 24, 36),
dropout_ratio=0.1,
num_classes=num_classes,
norm_cfg=norm_cfg,
align_corners=False)
model = SimpleSegmentationModel(backbone, head)
if initializers:
for initializer in initializers:
initializer_fn = Initializer(initializer).get_initializer()
# Only apply initialization to classifier head if pre-trained weights are used
if backbone_weights is None:
model.apply(initializer_fn)
else:
model.classifier.apply(initializer_fn)
if sync_bn and world_size > 1:
local_world_size = dist.get_local_world_size()
# List of ranks for each node, assumes that each node has the same number of ranks
num_nodes = world_size // local_world_size
process_group = None
if num_nodes > 1:
ranks_per_node = [
list(range(node * local_world_size, (node + 1) * local_world_size)) for node in range(num_nodes)
]
process_groups = [torch_dist.new_group(ranks) for ranks in ranks_per_node]
process_group = process_groups[dist.get_node_rank()]
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model, process_group=process_group)
return model
def composer_deeplabv3(num_classes: int,
backbone_arch: str = 'resnet101',
backbone_weights: Optional[str] = None,
sync_bn: bool = True,
use_plus: bool = True,
ignore_index: int = -1,
cross_entropy_weight: float = 1.0,
dice_weight: float = 0.0,
initializers: Sequence[Initializer] = ()):
"""Helper function to create a :class:`.ComposerClassifier` with a DeepLabv3(+) model. Logs
Mean Intersection over Union (MIoU) and Cross Entropy during training and validation.
From `Rethinking Atrous Convolution for Semantic Image Segmentation <https://arxiv.org/abs/1706.05587>`_
(Chen et al, 2017).
Args:
num_classes (int): Number of classes in the segmentation task.
backbone_arch (str, optional): The architecture to use for the backbone. Must be either
[``'resnet50'``, ``'resnet101'``]. Default: ``'resnet101'``.
backbone_weights (str, optional): If specified, the PyTorch pre-trained weights to load for the backbone.
Currently, only ['IMAGENET1K_V1', 'IMAGENET1K_V2'] are supported. Default: ``None``.
sync_bn (bool, optional): If ``True``, replace all BatchNorm layers with SyncBatchNorm layers.
Default: ``True``.
use_plus (bool, optional): If ``True``, use DeepLabv3+ head instead of DeepLabv3. Default: ``True``.
ignore_index (int): Class label to ignore when calculating the loss and other metrics. Default: ``-1``.
cross_entropy_weight (float): Weight to scale the cross entropy loss. Default: ``1.0``.
dice_weight (float): Weight to scale the dice loss. Default: ``0.0``.
initializers (List[Initializer], optional): Initializers for the model. ``[]`` for no initialization.
Default: ``[]``.
Returns:
ComposerModel: instance of :class:`.ComposerClassifier` with a DeepLabv3(+) model.
Example:
.. code-block:: python
from composer.models import composer_deeplabv3
model = composer_deeplabv3(num_classes=150, backbone_arch='resnet101', backbone_weights=None)
"""
model = deeplabv3(backbone_arch=backbone_arch,
backbone_weights=backbone_weights,
use_plus=use_plus,
num_classes=num_classes,
sync_bn=sync_bn,
initializers=initializers)
train_metrics = MetricCollection(
[CrossEntropy(ignore_index=ignore_index),
MIoU(num_classes, ignore_index=ignore_index)])
val_metrics = MetricCollection(
[CrossEntropy(ignore_index=ignore_index),
MIoU(num_classes, ignore_index=ignore_index)])
ce_loss_fn = functools.partial(soft_cross_entropy, ignore_index=ignore_index)
dice_loss_fn = DiceLoss(softmax=True, batch=True, ignore_absent_classes=True)
def _combo_loss(output, target) -> Dict[str, torch.Tensor]:
loss = {'total': torch.zeros(1, device=output.device, dtype=output.dtype)}
if cross_entropy_weight:
loss['cross_entropy'] = ce_loss_fn(output, target)
loss['total'] += loss['cross_entropy'] * cross_entropy_weight
if dice_weight:
loss['dice'] = dice_loss_fn(output, target)
loss['total'] += loss['dice'] * dice_weight
return loss
composer_model = ComposerClassifier(module=model,
train_metrics=train_metrics,
val_metrics=val_metrics,
loss_fn=_combo_loss)
return composer_model
| composer-dev | composer/models/deeplabv3/model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Composer CLI."""
| composer-dev | composer/cli/__init__.py |
#!/usr/bin/env python3
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The Composer CLI launcher for distributed training."""
import contextlib
import datetime
import logging
import os
import signal
import subprocess
import sys
import tempfile
import time
import traceback
from argparse import ArgumentParser
from typing import Any, Dict, List
import psutil
import torch
import composer
from composer.utils import get_free_tcp_port
CLEANUP_TIMEOUT = datetime.timedelta(seconds=30)
log = logging.getLogger(__name__)
def _get_parser():
parser = ArgumentParser(description='Utility for launching distributed machine learning jobs.')
parser.add_argument('--version', action='version', version=f'MosaicML Composer {composer.__version__}')
required_args = parser.add_argument_group('required arguments')
parser.add_argument(
'-n',
'--nproc',
type=int,
help=('The number of processes to launch on this node. Overrides env var `LOCAL_WORLD_SIZE` if specified; '
'otherwise, defaults to `max(1, torch.cuda.device_count())`.'),
)
parser.add_argument(
'--stdout',
type=str,
default=None,
help=('Format string for a filename to dump the STDOUT from the non-local-rank-zero processes. '
'The local rank zero process will be piped through to STDOUT. The available format variables are: '
"'{rank}', '{local_rank}', '{world_size}', '{node_rank}', and '{local_world_size}'. If specified, "
"it is recommended to include '{rank}' or '{local_rank}' in the filename so each rank will write to its "
'own file. By default, the STDOUT of the non-local-rank-zero processes is discarded; instead, use the '
'FileLogger within Composer. This logger captures and saves the STDOUT of each process.'),
)
parser.add_argument(
'--stderr',
type=str,
default=None,
help=('Format string for a filename to dump the STDERR from the non-local-rank-zero processes. '
'The local rank zero process will be piped through to STDERR. The available format variables are: '
"'{rank}', '{local_rank}', '{world_size}', '{node_rank}', and '{local_world_size}'. If specified, "
"it is recommended to include '{rank}' or '{local_rank}' in the filename so each rank will write to its "
'own file. By default, the STDERR of the non-local-rank-zero processes is discarded; instead, use the '
'FileLogger within Composer. This logger captures and saves the STDERR of each process.'),
)
parser.add_argument('-v', '--verbose', action='store_true', help='If set, print verbose messages')
parser.add_argument(
'-m',
'--module_mode',
action='store_true',
help=('If set, run the training script as a module instead of as a script. '
'Cannot be used in conjunction with `command_mode`'),
)
parser.add_argument(
'-c',
'--command_mode',
action='store_true',
help=('If set, run the training script as a command (i.e. without `python`). '
'Cannot be used in conjunction with `module_mode`.'),
)
multinode_args = parser.add_argument_group(
'multi-node arguments',
description=('These arguments generally only need to be set when training in a multi-node '
'environment, i.e. when the world_size is bigger than nproc.'))
multinode_args.add_argument('--world_size',
type=int,
help=('The total number of processes to launch across all nodes. '
'Setting this to a value greater than nproc indicates a multi-node '
'environment. Overrides env var WORLD_SIZE. Defaults to nproc.'))
multinode_args.add_argument('--base_rank',
type=int,
help=('The rank of the lowest ranked process to launch on this node. '
'Specifying a base_rank B and an nproc N will spawn processes with '
'global ranks [B, B+1, ... B+N-1]. In a multi-node environment, '
'at least one of base_rank and node_rank must be specified. '
'If only one of base_rank and node_rank are provided, it is assumed '
'that all nodes have the same amount of processes, and that the two '
'values are related as node_rank * nproc = base_rank. If this is '
'not the case, both base_rank and node_rank must be provided. '
'Overrides env var BASE_RANK. Defaults to 0 in a single-node '
'environment.'))
multinode_args.add_argument('--node_rank',
type=int,
help=('The rank of this node. See base_rank for information on when '
'this must be provided. Overrides env var NODE_RANK. Defaults to 0 '
'in a single-node environment.'))
multinode_args.add_argument('--master_addr',
type=str,
help=('The FQDN of the node hosting the C10d TCP store. For single-node '
'operation, this can generally be left as 127.0.0.1. Overrides env var '
'MASTER_ADDR. Defaults to 127.0.0.1 in a single-node environment.'))
multinode_args.add_argument('--master_port',
type=int,
help=('The port on the master hosting the C10d TCP store. If you are '
'running multiple trainers on a single node, this generally needs '
'to be unique for each one. Overrides env var MASTER_PORT. Defaults '
'to a random free port in a single-node environment.'))
required_args.add_argument('training_script',
type=str,
help=('The path to the training script used to initialize a single training '
'process. Should be followed by any command-line arguments the script '
'should be launched with.'))
required_args.add_argument('training_script_args',
nargs='...',
help='Any arguments for the training script, given in the expected order.')
return parser
def _parse_args():
parser = _get_parser()
args = parser.parse_args()
# Default values to env vars if they are not provided
if args.nproc is None:
if 'LOCAL_WORLD_SIZE' in os.environ:
args.nproc = int(os.environ['LOCAL_WORLD_SIZE'])
else:
args.nproc = torch.cuda.device_count()
if args.nproc == 0:
# This could happen if doing cpu-only training,
# which could cause torch.cuda.device_count() to return 0,
# and LOCAL_WORLD_SIZE (as set by MCLI) to be zero
args.nproc = 1
if args.nproc < 1:
raise ValueError('The nproc must be 1 or greater')
if args.world_size is None and 'WORLD_SIZE' in os.environ:
args.world_size = int(os.environ['WORLD_SIZE'])
if args.base_rank is None and 'BASE_RANK' in os.environ:
args.base_rank = int(os.environ['BASE_RANK'])
if args.node_rank is None and 'NODE_RANK' in os.environ:
args.node_rank = int(os.environ['NODE_RANK'])
if args.master_addr is None and 'MASTER_ADDR' in os.environ:
args.master_addr = os.environ['MASTER_ADDR']
if args.master_port is None and 'MASTER_PORT' in os.environ:
args.master_port = int(os.environ['MASTER_PORT'])
if args.world_size is None:
args.world_size = args.nproc
if args.world_size < args.nproc:
raise ValueError(f'world_size({args.world_size}) cannot be less than nproc({args.nproc})')
if args.world_size < 1:
raise ValueError('The world_size must be 1 or greater')
is_multinode = args.world_size > args.nproc
if is_multinode:
if args.base_rank is None and args.node_rank is None:
raise ValueError(f'In a multi-node environment, at least one of node_rank and base_rank must be provided.')
if args.node_rank is None:
if args.world_size % args.nproc != 0 or args.base_rank % args.nproc != 0:
raise ValueError('node_rank not provided, but unable to infer from base_rank since nodes appear to '
'have different amounts of processes. Please also specify node_rank.')
args.node_rank = args.base_rank // args.nproc
if args.base_rank is None:
if args.world_size % args.nproc != 0:
raise ValueError('base_rank not provided, but unable to infer from node_rank since nodes appear to '
'have different amounts of processes. Please also provide base_rank.')
args.base_rank = args.node_rank * args.nproc
if args.base_rank + args.nproc > args.world_size:
raise ValueError(f'Cannot initialize processes for node with base_rank({args.base_rank}) and '
f'nproc({args.nproc}) because this would mean creating a process with '
f'rank({args.base_rank + args.nproc - 1}), and all processes must have smaller rank than '
f'the world_size({args.world_size}).')
if args.master_addr is None:
raise ValueError('In a multi-node environment, master_addr is required.')
if args.master_port is None:
raise ValueError('In a multi-node environment, master_port is required.')
else:
if args.base_rank is not None and args.base_rank != 0:
raise ValueError(f'base_rank({args.base_rank}) != 0 is not valid in a single-node environment.')
args.base_rank = 0
if args.node_rank is not None and args.node_rank != 0:
raise ValueError(f'node_rank({args.node_rank}) != 0 is not valid in a single-node environment.')
args.node_rank = 0
if args.master_addr is None:
args.master_addr = '127.0.0.1'
if args.master_port is None:
args.master_port = get_free_tcp_port()
return args
@contextlib.contextmanager
def _patch_env(**environs: str):
"""Returns a context manager that patches ``os.environ`` with ``environs``.
The original ``os.environ`` values are restored at the end.
"""
# Adapted loosely from https://stackoverflow.com/a/34333710
# Capture the original environ values
original_environs = {k: os.environ.get(k) for k in environs}
# Patch the environment
for k, v in environs.items():
os.environ[k] = v
try:
# Run the context manager
yield
finally:
# Restore the original environ values
for k, v in original_environs.items():
if v is None:
del os.environ[k]
else:
os.environ[k] = v
def _launch_processes(
nproc: int,
world_size: int,
base_rank: int,
node_rank: int,
master_addr: str,
master_port: int,
module_mode: bool,
command_mode: bool,
training_script: str,
stdout_file_format: str,
stderr_file_format: str,
training_script_args: List[Any],
processes: Dict[int, subprocess.Popen],
):
log.info('Starting distributed environment on local node for global_rank(%s-%s)', base_rank, base_rank + nproc - 1)
log.info('Distributed KV store: tcp://%s:%s', master_addr, master_port)
for local_rank in range(nproc):
global_rank = base_rank + local_rank
if command_mode and module_mode:
raise ValueError('Either `command_mode` or `module_mode` should be set, but not both.')
cmd = []
if not command_mode:
cmd.append(sys.executable)
if module_mode:
cmd.append('-m')
cmd.append(training_script)
# Update the env with the distributed variables
with _patch_env(
RANK=str(global_rank),
WORLD_SIZE=str(world_size),
LOCAL_RANK=str(local_rank),
LOCAL_WORLD_SIZE=str(nproc),
NODE_RANK=str(node_rank),
MASTER_ADDR=master_addr,
MASTER_PORT=str(master_port),
PYTHONUNBUFFERED='1',
NCCL_ASYNC_ERROR_HANDLING='1',
):
# Populate the distributed variables in all launcher args
for arg in training_script_args:
cmd.append(os.path.expandvars(os.path.expanduser(arg)))
log.info('Launching process for local_rank(%s), global_rank(%s) with command(%s)', local_rank, global_rank,
cmd)
if local_rank == 0:
process = subprocess.Popen(
cmd,
text=True,
)
else:
def _get_file(format: str):
filename = format.format(
rank=global_rank,
world_size=world_size,
local_rank=local_rank,
local_world_size=nproc,
node_rank=node_rank,
)
return open(filename, 'x+')
stderr_file = _get_file(stderr_file_format)
stdout_file = _get_file(stdout_file_format)
process = subprocess.Popen(
cmd,
stdout=stdout_file,
stderr=stderr_file,
text=True,
)
process.stderr = stderr_file
process.stdout = stdout_file
processes[global_rank] = process
def _monitor_processes(processes: Dict[int, subprocess.Popen]):
try:
while True:
process_has_crashed = False
all_processes_finished = True
for global_rank, process in processes.items():
if process.poll() is None:
# the process is still running
all_processes_finished = False
continue
else:
# return code of 0 implies clean exit
if process.returncode != 0:
log.error(f'Rank {global_rank} crashed with exit code {process.returncode}.')
process_has_crashed = True
break
else:
# exited cleanly
log.info(f'Rank {global_rank} finished successfully.')
if process_has_crashed or all_processes_finished:
break
time.sleep(0.1)
except KeyboardInterrupt:
print('Ctrl-C received; terminating training processes.')
pass
def _print_process_exit_status(global_rank: int, process: subprocess.Popen):
if process.stdout is None:
output = None
else:
process.stdout.seek(0)
output = process.stdout.read()
if process.stderr is None:
stderr = None
else:
process.stderr.seek(0)
stderr = process.stderr.read()
exc = subprocess.CalledProcessError(
process.returncode,
cmd=process.args,
output=output,
stderr=stderr,
)
error_msg = [f'Global rank {global_rank} (PID {process.pid}) exited with code {process.returncode}']
if output is not None:
error_msg.extend([
f'----------Begin global rank {global_rank} STDOUT----------',
output,
f'----------End global rank {global_rank} STDOUT----------',
])
if stderr is not None:
error_msg.extend([
f'----------Begin global rank {global_rank} STDERR----------',
exc.stderr,
f'----------End global rank {global_rank} STDERR----------',
])
print('\n'.join(error_msg))
def _cleanup_processes(processes: Dict[int, subprocess.Popen]):
for global_rank, process in processes.items():
process.poll()
if process.returncode is None:
log.info('Killing global rank %s (PID %s) with SIGTERM', global_rank, process.pid)
# Assuming that child processes correctly handle SIGTERM to cleanup any children
try:
os.kill(process.pid, signal.SIGTERM)
except ProcessLookupError:
pass
current_time = datetime.datetime.now()
try:
print((f'Waiting up to {CLEANUP_TIMEOUT.seconds} seconds for all training processes to terminate. '
'Press Ctrl-C to exit immediately.'))
while datetime.datetime.now() - current_time < CLEANUP_TIMEOUT:
for process in processes.values():
process.poll()
if all(process.returncode is not None for process in processes.values()):
break
time.sleep(0.1)
except KeyboardInterrupt:
pass
for global_rank, process in processes.items():
process.poll()
if process.returncode is None:
log.warning('Failed to kill global rank %s (PID %s) with SIGTERM; terminating with SIGKILL instead',
global_rank, process.pid)
try:
proc = psutil.Process(process.pid)
except psutil.NoSuchProcess:
pass
else:
# If using SIGKILL, manually kill all child processes, since the main training process
# likely won't be able to intercept the signal and clean up its children.
for psutil_proc in [proc, *proc.children(recursive=True)]:
try:
os.kill(psutil_proc.pid, signal.SIGKILL)
except ProcessLookupError:
pass
for global_rank, process in processes.items():
process.poll()
if process.returncode is not None and process.returncode != 0:
if -process.returncode in (signal.SIGKILL, signal.SIGTERM):
# Negative return codes indicate the process was killed via a signal
# If the launcher script killed the training process (which would happen via SIGKILL or SIGTERM),
# then do not print the stack trace.
continue
# only print the processes that have actually crashed,
# not the ones that were killed
_print_process_exit_status(global_rank, process)
def _aggregate_process_returncode(processes: Dict[int, subprocess.Popen]) -> int:
for global_rank, process in processes.items():
process.poll()
if process.returncode is None:
log.error('Global rank %s (PID %s) has still not exited; return exit code 1.', global_rank, process.pid)
return 1
if process.returncode != 0:
log.error('Global rank %s (PID %s) exited with code %s', global_rank, process.pid, process.returncode)
return process.returncode
return 0
def main():
"""Entrypoint into the Composer CLI."""
args = _parse_args()
logging.basicConfig()
log.setLevel(logging.INFO if args.verbose else logging.WARN)
processes = {}
log_tmpdir = tempfile.TemporaryDirectory()
if args.stdout is None:
args.stdout = f'{log_tmpdir.name}/rank{{rank}}.stdout.txt'
if args.stderr is None:
args.stderr = f'{log_tmpdir.name}/rank{{rank}}.stderr.txt'
try:
_launch_processes(nproc=args.nproc,
world_size=args.world_size,
base_rank=args.base_rank,
node_rank=args.node_rank,
master_addr=args.master_addr,
master_port=args.master_port,
module_mode=args.module_mode,
command_mode=args.command_mode,
stdout_file_format=args.stdout,
stderr_file_format=args.stderr,
training_script=args.training_script,
training_script_args=args.training_script_args,
processes=processes)
_monitor_processes(processes)
except:
# Print the exception first, then kill the training processes, since killing
# may take up to CLEANUP_TIMEOUT seconds, and the user should know immediately
# what failed. No need to re-raise the exception, as `aggregate_process_returncode`
# will return an appropriate error code, which will cause the script to exit.
traceback.print_exc()
print('Killing training processes')
finally:
_cleanup_processes(processes)
log_tmpdir.cleanup()
return _aggregate_process_returncode(processes)
if __name__ == '__main__':
sys.exit(main())
| composer-dev | composer/cli/launcher.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Runs the Composer CLI."""
import sys
from composer.cli.launcher import main
sys.exit(main())
| composer-dev | composer/cli/__main__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Profiler Schedules."""
from typing import Callable
from composer.core.state import State
from composer.profiler.profiler_action import ProfilerAction
__all__ = ['cyclic_schedule']
def cyclic_schedule(
skip_first: int = 0,
wait: int = 0,
warmup: int = 1,
active: int = 4,
repeat: int = 1,
) -> Callable[[State], ProfilerAction]:
"""Profiler schedule function for a cyclic profiling window.
This function returns a schedule function that uses a cyclic profiling window. The resulting function can be
passed as the ``prof_schedule`` argument to the :class:`.Trainer`.
The cyclic window skips the first ``skip_first`` batches in every epoch. Then, it performs a cycle of
skipping ``wait`` batches, warming up for ``warmup`` batches, and recording ``active`` batches.
It repeats this cycle up to ``repeat`` times per epoch (or for the entire epoch, if ``repeat`` is 0).
This logic repeats every epoch.
Args:
skip_first (int, optional): Number of batches to skip profiling at epoch start. Defaults to ``0``.
wait (int, optional): For each profiling cycle, number of batches to skip at the beginning of the cycle.
Defaults to ``0``.
warmup (int, optional): For each profiling cycle, number of batches to be in the warmup state after skipping
``wait`` batches. Defaults to ``1``.
active (int, optional): For each profiling cycle, number of batches to record after warming up. Defaults to ``4``.
repeat (int, optional): Number of profiling cycles to perform per epoch. Set to ``0`` to record the entire epoch.
Defaults to ``1``.
Returns:
(State -> ProfilerAction): A ``prof_schedule`` for the :class:`.Trainer`.
"""
def schedule(state: State):
# do wait, then warump, then active, up to repeat times per cycle
cycle_len = wait + warmup + active
batch_idx = int(state.timestamp.batch_in_epoch)
if batch_idx < skip_first:
return ProfilerAction.SKIP
if repeat != 0 and batch_idx >= cycle_len * repeat + skip_first:
# exhausted the repeat
return ProfilerAction.SKIP
position_in_cycle = (batch_idx - skip_first) % cycle_len
if position_in_cycle < wait:
return ProfilerAction.SKIP
if position_in_cycle < wait + warmup:
return ProfilerAction.WARMUP
is_last_batch_in_epoch = state.dataloader_len is not None and state.timestamp.batch_in_epoch == state.dataloader_len - 1
if position_in_cycle == cycle_len - 1 or is_last_batch_in_epoch:
return ProfilerAction.ACTIVE_AND_SAVE
return ProfilerAction.ACTIVE
return schedule
| composer-dev | composer/profiler/profiler_schedule.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Outputs profiling data in JSON trace format."""
from __future__ import annotations
import gzip
import json
import os
import pathlib
import queue
import tempfile
import textwrap
import time
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
from composer.loggers import Logger
from composer.profiler.json_trace_merger import merge_traces
from composer.profiler.profiler_action import ProfilerAction
from composer.profiler.trace_handler import TraceHandler
from composer.utils import (FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, FORMAT_NAME_WITH_DIST_TABLE, dist,
ensure_folder_is_empty, format_name_with_dist, format_name_with_dist_and_time)
if TYPE_CHECKING:
from composer.core import State, Timestamp
__all__ = ['JSONTraceHandler']
class JSONTraceHandler(TraceHandler): # noqa: D101
__doc__ = f"""Records trace events in Chrome JSON trace format.
See `this document <https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview>`_
for more information.
Traces are output to ``output_directory``. Traces can be visualized using the Chrome Trace Viewer.
To view in a Google Chrome browser, navigate to ``chrome://tracing`` and load the JSON trace file.
Args:
folder (str, optional): Format string for the trace file folder. Defaults to ``'{{run_name}}/traces'``.
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_TABLE, prefix=' ')}
For example, if the ``run_name`` is ``'awesome_training_run'``, and the default ``folder`` of
``'{{run_name}}/traces'`` is used, traces will be stored in ``'awesome_training_run/traces'``.
filename (str, optional): A format string describing how to name trace files.
(default: ``'ep{{epoch}}-ba{{batch}}-rank{{rank}}.json'``)
At the end of each batch where :meth:`~composer.profiler.Profiler.get_action` returns
:attr:`~composer.profiler._profiler_action.ProfilerAction.ACTIVE_AND_SAVE`, trace files are saved
approximately to ``{{folder}}/{{filename.format(...)}}``.
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, prefix=' ')}
Consider the following scenario, where:
* The :attr:`~.State.run_name` is ``'awesome-training-run'``
* The default ``trace_folder='{{run_name}}/traces'`` is used.
* The default ``name='ep{{epoch}}-ba{{batch}}-rank{{rank}}.json'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
Each rank (process) will save traces to::
awesome-training-run/traces/ep1-ba42-rank0.json
awesome-training-run/traces/ep1-ba42-rank1.json
awesome-training-run/traces/ep1-ba42-rank2.json
...
remote_file_name (str, optional): Format string for the trace file's remote name.
(default: ``'{{run_name}}/traces/ep{{epoch}}-ba{{batch}}-rank{{rank}}.json'``)
Whenever a trace file is saved, it is also uploaded as a remote file according to this format string.
The same format variables as for ``filename`` are available.
.. seealso:: :doc:`Uploading Files</trainer/file_uploading>` for notes for file uploading.
Leading slashes (``'/'``) will be stripped.
To disable uploading trace files, set this parameter to ``None``.
merged_trace_filename (str, optional): Format string for the merged trace filename.
(default: ``'node{{node_rank}}.json'``)
Each rank writes a separate trace file at the end of each profiling cycle. However, when visualizing
traces, it is generally helpful to merge traces together into a single file. This allows the traces
across all ranks to be shown in a single view. To
The same format variables as for ``folder`` are available. The merged trace file is saved
approximately to ``{{folder}}/{{merged_trace_filename.format(...)}}`` on the local rank zero
process for each node.
If specified (the default), the local rank zero process merges together all traces files from that node,
across all profiling cycles, into a single trace file. The merged trace file is written to the filename
specified by this format string. There will be one merged trace file per node.
To disable merging, set this parameter to ``None``.
.. warning::
Trace merging blocks the training loop. When profiling live training runs, it is recommended to
disable trace merging by setting this parameter to ``None``. Instead, traces should be merged together
in a post-processing step. See :mod:`composer.profiler.json_trace_merger` for additional info.
merged_trace_remote_file_name (str, optional): Format string for the merged trace file's remote file name.
(default: ``'{{run_name}}/traces/merged_trace.json'``)
The same format variables as for ``folder`` are available.
This parameter has no effect if ``merged_trace_filename`` is None.
To disable uploading merged trace files, set this parameter to ``None``.
overwrite (bool, optional): Whether to overwrite existing traces. (default: ``False``)
If ``False``, the :meth:`trace_folder` (as determined by the ``trace_folder`` argument)
must be empty when training starts.
num_traces_to_keep (int, optional): The number of traces to keep locally. The oldest traces
are removed first. Set to ``-1`` to keep all traces locally. (default: ``-1``)
Traces will be removed after they have been uploaded. For example, when this handler
is used in conjunction with the :class:`.RemoteUploaderDownloader`, set this
parameter to ``0`` to immediately delete traces from the local disk after they have been uploaded to
the object store.
This parameter only controls how many traces are kept locally; traces are not deleted from
remote file systems.
Attributes:
saved_traces (List[Tuple[Timestamp, List[pathlib.Path]]]): The trace timestamps and filepaths.
This list contains tuples of the save timestamp and the trace filepaths.
This list will have at most ``save_num_traces_to_keep`` entries. The latest trace
will be at the end.
The index of a filepath in each list corresponds to the global rank of the process that wrote that file.
Each filepath is valid only on the process's (rank's) node.
"""
def __init__(
self,
folder: str = '{run_name}/traces',
filename: str = 'ep{epoch}-ba{batch}-rank{rank}.json',
remote_file_name: Optional[str] = '{run_name}/traces/ep{epoch}-ba{batch}-rank{rank}.json',
merged_trace_filename: Optional[str] = 'merged_trace.json',
merged_trace_remote_file_name: Optional[str] = '{run_name}/traces/merged_trace.json',
*,
overwrite: bool = False,
num_traces_to_keep: int = -1,
):
self.folder = folder
self.overwrite = overwrite
self.filename = filename
self.remote_file_name = remote_file_name
self.merged_trace_filename = merged_trace_filename
self.merged_trace_remote_file_name = merged_trace_remote_file_name
self.saved_traces: List[Tuple[Timestamp, List[pathlib.Path]]] = []
self.num_traces_to_keep = num_traces_to_keep
self._queue: queue.Queue[str] = queue.Queue()
self._is_trace_active = False
self._save_at_batch_end = False
def init(self, state: State, logger: Logger) -> None:
del logger # unused
trace_folder = format_name_with_dist(self.folder, run_name=state.run_name)
os.makedirs(trace_folder, exist_ok=True)
if not self.overwrite:
ensure_folder_is_empty(trace_folder)
# Ensure all ranks checked that the folder is empty before proceeding
# remove any existing merged trace file
if self.merged_trace_filename is not None:
merged_trace_filename = os.path.join(
trace_folder,
format_name_with_dist(self.merged_trace_filename, state.run_name),
)
merged_trace_dirname = os.path.dirname(merged_trace_filename)
if merged_trace_dirname:
if os.path.exists(merged_trace_filename):
os.remove(merged_trace_filename)
dist.barrier()
def batch_start(self, state: State, logger: Logger) -> None:
del logger # unusued
if state.profiler is None:
raise RuntimeError(('The Composer Profiler was not enabled, which is required to use the '
f'{type(self).__name__}. To enable, set the `prof_schedule` argument of the Trainer.'))
if state.profiler.schedule(state) != ProfilerAction.SKIP and not self._is_trace_active:
# Starting a new profiling cycle
wall_clock_ns = time.time_ns()
self._record_event(
name='process_name',
ph='M', # metadata
wall_clock_ns=wall_clock_ns,
tid=os.getpid(),
pid=dist.get_global_rank(),
args={'name': f'Rank {dist.get_global_rank()} training loop process'})
self._record_event(
name='thread_name',
ph='M', # metadata
wall_clock_ns=wall_clock_ns,
tid=os.getpid(),
pid=dist.get_global_rank(),
args={'name': f'Training Loop'})
self._record_event(
name='thread_sort_index',
ph='M', # metadata
wall_clock_ns=wall_clock_ns,
tid=os.getpid(),
pid=dist.get_global_rank(),
args={'sort_index': 0}) # training loop thread should be first
self._record_event(
name='global_rank',
ph='M', # metadata
wall_clock_ns=wall_clock_ns,
tid=os.getpid(),
pid=dist.get_global_rank(),
args={'value': dist.get_global_rank()})
self._record_event(
name='process_sort_index',
ph='M', # metadata
wall_clock_ns=wall_clock_ns,
tid=os.getpid(),
pid=dist.get_global_rank(),
args={'sort_index': dist.get_global_rank()}) # sort index for processes should be the global rank
# Synchronize the clocks
# Each rank will record a timestamp at approxmately the same real world time
clock_sync_a = time.time_ns()
dist.barrier() # syncronize all ranks
clock_sync_time_ns = time.time_ns()
dist.barrier() # another barrier to bound the error
clock_sync_b = time.time_ns()
clock_sync_error_bound = clock_sync_b - clock_sync_a
self._record_event(
name='clock_sync_timestamp_us',
ph='M', # metadata
wall_clock_ns=wall_clock_ns,
tid=os.getpid(),
pid=dist.get_global_rank(),
args={'value': clock_sync_time_ns // 1000})
self._record_event(
name='clock_sync_error_bound',
ph='M', # metadata
wall_clock_ns=wall_clock_ns,
tid=os.getpid(),
pid=dist.get_global_rank(),
args={'value': clock_sync_error_bound // 1000})
self._is_trace_active = True
if state.profiler.schedule(state) == ProfilerAction.ACTIVE_AND_SAVE:
self._save_at_batch_end = True
def batch_end(self, state: State, logger: Logger) -> None:
assert state.profiler is not None
timestamp = state.timestamp
trace_folder = format_name_with_dist(self.folder, run_name=state.run_name)
if self._save_at_batch_end:
# no longer active, but was previously active.
# Epty the queue and save the trace file
trace_filename = os.path.join(
trace_folder,
format_name_with_dist_and_time(self.filename, state.run_name, timestamp),
)
trace_dirname = os.path.dirname(trace_filename)
if trace_dirname:
os.makedirs(trace_dirname, exist_ok=True)
with open(trace_filename, 'w+') as f:
is_first_line = True
f.write('[\n')
while True:
try:
s = self._queue.get_nowait()
except queue.Empty:
break
if not is_first_line:
s = ',\n' + s
is_first_line = False
f.write(s)
f.write('\n]\n')
if self.remote_file_name is not None:
remote_file_name = format_name_with_dist_and_time(self.remote_file_name, state.run_name, timestamp)
logger.upload_file(remote_file_name=remote_file_name,
file_path=trace_filename,
overwrite=self.overwrite)
# Gather the filenames
trace_files = [pathlib.Path(x) for x in dist.all_gather_object(trace_filename)]
self.saved_traces.append((timestamp, trace_files))
# Ensure that all traces have been saved.
dist.barrier()
if self.merged_trace_filename is not None and dist.get_local_rank() == 0:
# Merge together all traces from the node into one file
start_rank = dist.get_global_rank()
end_rank = dist.get_global_rank() + dist.get_local_world_size()
trace_files_to_merge = trace_files[start_rank:end_rank]
merged_trace_filename = os.path.join(
trace_folder,
format_name_with_dist(
self.merged_trace_filename,
state.run_name,
),
)
merged_trace_dirname = os.path.dirname(merged_trace_filename)
if merged_trace_dirname:
os.makedirs(merged_trace_dirname, exist_ok=True)
if os.path.exists(merged_trace_filename):
# Include the existing merged trace in the new trace
with tempfile.NamedTemporaryFile('x+', delete=False) as f:
merge_traces(f.name, merged_trace_filename, *trace_files_to_merge)
os.rename(f.name, merged_trace_filename)
else:
# Write the trace directly
merge_traces(merged_trace_filename, *trace_files_to_merge)
if self.merged_trace_remote_file_name is not None:
merged_trace_remote_file_name = format_name_with_dist(
self.merged_trace_remote_file_name,
state.run_name,
)
logger.upload_file(
remote_file_name=merged_trace_remote_file_name,
file_path=merged_trace_remote_file_name,
overwrite=True,
)
# delete old trace files
if self.num_traces_to_keep >= 0:
while len(self.saved_traces) > self.num_traces_to_keep:
timestamp, checkpoint_filepaths = self.saved_traces[0]
if dist.get_global_rank() < len(checkpoint_filepaths):
# Remove this rank's trace
os.remove(checkpoint_filepaths[dist.get_global_rank()])
del self.saved_traces[0]
self._is_trace_active = False
self._save_at_batch_end = False
def process_duration_event(
self,
name: str,
categories: Union[List[str], Tuple[str, ...]],
is_start: bool,
timestamp: Timestamp,
wall_clock_time_ns: int,
) -> None:
ph = 'B' if is_start else 'E'
args = {}
args['epoch'] = timestamp.epoch.value
args['batch'] = timestamp.batch.value
self._record_event(
name=name,
categories=','.join(categories),
ph=ph,
wall_clock_ns=wall_clock_time_ns,
pid=dist.get_global_rank(),
args=args,
tid=os.getpid(),
)
def process_instant_event(
self,
name: str,
categories: Union[List[str], Tuple[str, ...]],
timestamp: Timestamp,
wall_clock_time_ns: int,
) -> None:
args = {}
args['epoch'] = timestamp.epoch.value
args['batch'] = timestamp.batch.value
self._record_event(
name=name,
categories=','.join(categories),
ph='i',
wall_clock_ns=wall_clock_time_ns,
args=args,
pid=dist.get_global_rank(),
tid=os.getpid(),
s='p', # mark instant event for at process level
)
def process_counter_event(self, name: str, categories: Union[List[str], Tuple[str, ...]], timestamp: Timestamp,
wall_clock_time_ns: int, values: Dict[str, Union[int, float]]) -> None:
self._record_event(
name=name,
categories=','.join(categories),
ph='C', # counter event
wall_clock_ns=wall_clock_time_ns,
pid=dist.get_global_rank(),
tid=os.getpid(),
args=values,
)
def _record_event(self, name: str, ph: str, wall_clock_ns: int, pid: int, tid: int, categories: str = '', **kwargs):
"""Helper function to record an event in the trace.
Args:
name (str): Event name
categories (str): Comma-seperated string of event categories
ph (str): Event type. Should be one of the following
Duration Events: ``B`` (begin), ``E`` (end)
Complete Events: ``X``
Instant Events: ``i``
Counter Events: ``C``
Async Events: ``b`` (nestable start), ``n`` (nestable instant), ``e`` (nestable end)
Flow events: ``s`` (start), ``t`` (step), ``f`` (end)
Sample events: ``P``
Object Events ``N`` (created), ``O`` (snapshot), ``D`` (destroyed)
Metadata Events: ``M``
Memory Dump Events: ``V`` (global), ``v`` (process)
Mark Events: ``R``
Clock Sync Events ``c``
wall_clock_ns (int): Wall clock time, in nanoseconds.
tid (int): :meth:`threading.get_ident` value for the event
pid (int): :meth:`os.get_pid` value for the event
kwargs: Any extra info to record with the event, such as event specific fields.
"""
data = {
'name': name,
'cat': categories,
'ph': ph,
'ts': wall_clock_ns // 1000, # tracing clock timestamp, in microseconds
'pid': pid,
'tid': tid,
**kwargs,
}
entry = json.dumps(data, indent=None)
self._queue.put_nowait(entry)
def process_chrome_json_trace_file(self, filepath: pathlib.Path) -> None:
with (gzip.open(filepath, 'rt') if str(filepath).endswith('.gz') else open(filepath, 'r')) as f:
# It may be an incomplete trace file that is missing the closing ] bracket, as is permitted
# in the chrome json format spec
trace_data_str = f.read().strip()
if trace_data_str.startswith('[') and not trace_data_str.endswith(']'):
trace_data_str += ']'
trace_data = json.loads(trace_data_str)
if isinstance(trace_data, dict):
event_list = trace_data['traceEvents']
else:
event_list = trace_data
if not isinstance(event_list, list):
raise TypeError('A trace file should either be a dict or a list')
for entry in event_list:
entry['pid'] = dist.get_global_rank() # override the PID to the global rank
entry_s = json.dumps(entry, indent=None)
self._queue.put_nowait(entry_s)
| composer-dev | composer/profiler/json_trace_handler.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Profiler to collect :mod:`torch` performance metrics during training."""
from __future__ import annotations
import json
import os
import textwrap
from typing import TYPE_CHECKING, Optional, OrderedDict
import torch.profiler
from torch.profiler.profiler import ProfilerAction as TorchProfilerAction
from composer.core.callback import Callback
from composer.loggers import Logger
from composer.profiler.profiler_action import ProfilerAction
from composer.utils import (FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, FORMAT_NAME_WITH_DIST_TABLE, dist,
ensure_folder_is_empty, format_name_with_dist, format_name_with_dist_and_time)
if TYPE_CHECKING:
from composer.core import State
__all__ = ['TorchProfiler']
class TorchProfiler(Callback): # noqa: D101
__doc__ = f"""Profile the execution using the :class:`PyTorch Profiler <torch.profiler.profile>`.
Profiling results are stored in TensorBoard format in the directory specified by ``folder``.
.. note::
The Composer :class:`.Trainer` automatically creates an instance of this
:class:`.TorchProfiler` callback whenever any of the PyTorch Profiler arguments
(``torch_prof_record_shapes``, ``torch_prof_profile_memory``, ``torch_prof_with_stack``, or
``torch_prof_with_flops``) are enabled.
When using the Composer :class:`.Trainer`, one does not need to directly create an
instance of this :class:`.TorchProfiler` callback.
To view profiling results, run::
pip install tensorbaord torch_tb_profiler
tensorboard --logdir path/to/torch/trace_folder
.. note::
See :doc:`profiler` for additional usage details on the :class:`torch.profiler.profile`.
.. note::
Enabling shape and stack tracing results in additional overhead.
When ``record_shapes=True`` is specified, the profiler will temporarily hold references to tensors which
may prevent certain optimizations that depend on the reference count and can introduce extra tensor copies.
Args:
folder (str, optional): Format string for the folder containing the Torch Profiler trace files.
Defaults to ``'{{run_name}}/torch_traces'``.
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_TABLE, prefix=' ')}
For example, if the ``run_name`` is ``'awesome_training_run'``, and the default ``folder`` of
``'{{run_name}}/torch_traces'`` is used, Torch Profiler traces will be stored in
``'awesome_training_run/torch_traces'``.
filename (str, optional): A format string describing how to name Torch Profiler trace files.
Defaults to ``'rank{{rank}}.{{batch}}.pt.trace.json'``.
At the end of each batch where :meth:`~composer.profiler.Profiler.get_action` returns
:attr:`~composer.profiler._profiler_action.ProfilerAction.ACTIVE_AND_SAVE`, trace files are saved
approximately to ``{{folder.format(...)}}/{{filename.format(...)}}``.
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, prefix=' ')}
Consider the following scenario, where:
* The :attr:`~.State.run_name` is ``'awesome-training-run'``.
* The default ``trace_folder='{{run_name}}/torch_traces'`` is used.
* The default ``name='rank{{rank}}.{{batch}}.pt.trace.json'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
Each rank (process) will save traces to::
awesome-training-run/torch_traces/ep1-ba42-rank0.json
awesome-training-run/torch_traces/ep1-ba42-rank1.json
awesome-training-run/torch_traces/ep1-ba42-rank2.json
...
remote_file_name (str, optional): Format string for a Torch Profiler trace file's remote file name.
Defaults to ``'{{run_name}}/torch_traces/rank{{rank}}.{{batch}}.pt.trace.json'``.
Whenever a trace file is saved, it is also uploaded as a file according to this format string.
The same format variables as for ``filename`` are available.
.. seealso:: :doc:`Uploading Files</trainer/file_uploading>` for notes for file uploading.
Leading slashes (``'/'``) will be stripped.
To disable uploading trace files, set this parameter to ``None``.
overwrite (bool, optional): Whether to override existing Torch Profiler traces. Defaults to False.
If False, then the trace folder as determined by ``folder`` must be empty.
use_gzip (bool, optional): Whether to use gzip for the trace. Defaults to False.
If True, ``'.gz'`` will be appended ``filename`` and ``remote_file_name``
(if they do not already end in ``'.gz'``).
record_shapes (bool, optional): Whether to record tensor shapes. Defaults to False.
profile_memory (bool, optional): Whether to profile memory. Defaults to True.
with_stack (bool, optional): Whether to record stack info. Defaults to False.
with_flops (bool, optional): Whether to estimate flops for operators. Defaults to True.
num_traces_to_keep (int, optional): The number of trace files to keep locally. Defaults to -1.
If set to -1, then all traces files are kept locally.
After a trace has been saved and uploaded, the oldest traces are removed until
``num_traces_to_keep`` traces remain. This parameter only controls how many traces are kept locally;
traces are not deleted from remote file systems.
It can be useful to set this parameter to ``0`` when using a remote file uploader such as the
:class:`.RemoteUploaderDownloader`. This combination will minimize local
disk usage by deleting trace files immediately after they have been uploaded to the object store.
Attributes:
saved_traces (List[Tuple[Timestamp, List[pathlib.Path]]]): The trace timestamps and filepaths.
This list contains tuples of the save timestamp and the trace filepaths.
This list will have at most ``num_traces_to_keep`` entries. The latest trace
will be at the end.
The index of a filepath in each list corresponds to the global rank of the process that wrote that file.
Each filepath is valid only on the process's (rank's) node.
"""
def __init__(
self,
folder: str = '{run_name}/torch_traces',
filename: str = 'rank{rank}.{batch}.pt.trace.json',
remote_file_name: Optional[str] = '{run_name}/torch_traces/rank{rank}.{batch}.pt.trace.json',
*,
overwrite: bool = False,
use_gzip: bool = False,
record_shapes: bool = False,
profile_memory: bool = True,
with_stack: bool = False,
with_flops: bool = True,
num_traces_to_keep: int = -1,
) -> None:
self.overwrite = overwrite
self.folder = folder
if use_gzip and not filename.endswith('.gz'):
filename += '.gz'
self.filename = filename
if use_gzip and remote_file_name is not None and not remote_file_name.endswith('.gz'):
remote_file_name += '.gz'
self.remote_file_name = remote_file_name
self.record_shapes = record_shapes
self.profile_memory = profile_memory
self.with_stack = with_stack
self.with_flops = with_flops
self.num_traces_to_keep = num_traces_to_keep
self.saved_traces = OrderedDict()
self.profiler: Optional[torch.profiler.profile] = None
def init(self, state: State, logger: Logger) -> None:
if state.profiler is None:
raise RuntimeError(('The Composer Profiler was not enabled, which is required to use the '
f'{type(self).__name__}. To enable, set the `prof_schedule` argument of the Trainer.'))
folder_name = format_name_with_dist(self.folder, state.run_name)
os.makedirs(folder_name, exist_ok=True)
if not self.overwrite:
ensure_folder_is_empty(folder_name)
dist.barrier()
def scheduler_fn(torch_profiler_step: int) -> TorchProfilerAction:
del torch_profiler_step # the torch profiler step is unused. Using the composer timestamp instead.
assert state.profiler is not None
composer_profiler_action = state.profiler.schedule(state)
if composer_profiler_action == ProfilerAction.ACTIVE_AND_SAVE:
return TorchProfilerAction.RECORD_AND_SAVE
if composer_profiler_action == ProfilerAction.ACTIVE:
return TorchProfilerAction.RECORD
if composer_profiler_action == ProfilerAction.WARMUP:
return TorchProfilerAction.WARMUP
assert composer_profiler_action == ProfilerAction.SKIP, f'unexpected action: {composer_profiler_action}'
return TorchProfilerAction.NONE
def handler_fn(prof: torch.profiler.profiler.profile):
assert state.profiler is not None
timestamp = state.timestamp
trace_file_name = os.path.join(
folder_name,
format_name_with_dist_and_time(self.filename, run_name=state.run_name, timestamp=timestamp),
)
trace_file_dirname = os.path.dirname(trace_file_name)
if trace_file_dirname:
os.makedirs(trace_file_dirname, exist_ok=True)
prof.export_chrome_trace(trace_file_name)
state.profiler.record_chrome_json_trace_file(trace_file_name)
if self.remote_file_name is not None:
trace_remote_file_name = format_name_with_dist_and_time(self.remote_file_name,
run_name=state.run_name,
timestamp=timestamp)
trace_remote_file_name = trace_remote_file_name.lstrip('/')
logger.upload_file(remote_file_name=trace_remote_file_name,
file_path=trace_file_name,
overwrite=self.overwrite)
if self.num_traces_to_keep >= 0:
while len(self.saved_traces) > self.num_traces_to_keep:
# self.saved_traces is an ordered dict, so the zeroth item will be the oldest checkpoint
timestamp, filepaths = next(iter(self.saved_traces.items()))
if dist.get_global_rank() < len(filepaths):
# Remove this rank's checkpoint
os.remove(filepaths[dist.get_global_rank()])
del self.saved_traces[timestamp]
self.profiler = torch.profiler.profile(
schedule=scheduler_fn,
on_trace_ready=handler_fn,
record_shapes=self.record_shapes,
profile_memory=self.profile_memory,
with_stack=self.with_stack,
with_flops=self.with_flops,
)
self.profiler.__enter__()
def batch_end(self, state: State, logger: Logger) -> None:
del state, logger # unused
assert self.profiler is not None
self.profiler.add_metadata_json('global_rank', json.dumps(dist.get_global_rank()))
self.profiler.step()
def batch_start(self, state: State, logger: Logger) -> None:
del state # unused
assert self.profiler is not None
logger.log_traces({'profiler/state': self.profiler.current_action.name})
def close(self, state: State, logger: Logger) -> None:
del state, logger # unused
if self.profiler is not None:
self.profiler.__exit__(None, None, None)
self.profiler = None
| composer-dev | composer/profiler/torch_profiler.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Performance profiling tools.
The profiler gathers performance metrics during a training run that can be used to diagnose bottlenecks and
facilitate model development.
The metrics gathered include:
* Duration of each :class:`.Event` during training
* Time taken by the data loader to return a batch
* Host metrics such as CPU, system memory, disk and network utilization over time
* Execution order, latency and attributes of PyTorch operators and GPU kernels (see :doc:`torch:profiler`)
See the :doc:`Profiling Guide </trainer/performance_tutorials/profiling>` for additional information.
"""
from composer.profiler.json_trace_handler import JSONTraceHandler
from composer.profiler.marker import Marker
from composer.profiler.profiler import Profiler
from composer.profiler.profiler_action import ProfilerAction
from composer.profiler.profiler_schedule import cyclic_schedule
from composer.profiler.system_profiler import SystemProfiler
from composer.profiler.torch_profiler import TorchProfiler
from composer.profiler.trace_handler import TraceHandler
# All needs to be defined properly for sphinx autosummary
__all__ = [
'Marker',
'Profiler',
'ProfilerAction',
'TraceHandler',
'cyclic_schedule',
'JSONTraceHandler',
'SystemProfiler',
'TorchProfiler',
]
| composer-dev | composer/profiler/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Action states for the :class:`Profiler` that define whether or not events are being recorded to the trace file."""
from composer.utils import StringEnum
__all__ = ['ProfilerAction']
class ProfilerAction(StringEnum):
"""Action states for the :class:`Profiler` that define whether or not events are being recorded to the trace file.
Attributes:
SKIP: Do not record new events to the trace. Any events started during ``ACTIVE`` or ``WARMUP`` will be recorded upon finish.
WARMUP: Record all events to the trace `except` those requiring a warmup period to initialize data structures (e.g., :doc:`profiler`).
ACTIVE: Record all events to the trace.
ACTIVE_AND_SAVE: Record all events and save the trace at the end of the batch.
"""
SKIP = 'skip'
WARMUP = 'warmup'
ACTIVE = 'active'
ACTIVE_AND_SAVE = 'active_and_save'
| composer-dev | composer/profiler/profiler_action.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Profiler Marker."""
from __future__ import annotations
import functools
import time
from types import TracebackType
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
from composer.profiler.trace_handler import TraceHandler
if TYPE_CHECKING:
from composer.core import State, Timestamp
__all__ = ['Marker']
class Marker:
"""Profiler Marker.
Used by the :class:`.Engine` to measure the duration of :class:`.Event` during training.
.. note::
:class:`.Marker` should not be instantiated directly; instead use :meth:`.Profiler.marker`.
Markers can record the following types of events:
#. Duration: Records the start and stop time of an event of interest (:meth:`.Marker.start()`, :meth:`.Marker.finish()`).
#. Instant: Record time a particular event occurs, but not the full duration (:meth:`.Marker.instant()`).
#. Counter: The value of a variable at given time (:meth:`.Marker.counter()`).
A :class:`.Marker` can also be used as a context manager or decorator to record a duration:
#. Use a :class:`.Marker` with a context manager:
.. testsetup::
from composer.profiler import Profiler, cyclic_schedule
profiler = Profiler(schedule=cyclic_schedule(), trace_handlers=[])
profiler.bind_to_state(state)
.. doctest::
>>> def something_to_measure():
... print("something_to_measure")
>>> marker = profiler.marker("foo")
>>> with marker:
... something_to_measure()
something_to_measure
#. Use a :class:`.Marker` as a decorator:
.. testsetup::
from composer.profiler import Profiler, cyclic_schedule
profiler = Profiler(schedule=cyclic_schedule(), trace_handlers=[])
profiler.bind_to_state(state)
.. doctest::
>>> marker = profiler.marker("foo")
>>> @marker
... def something_to_measure():
... print("something_to_measure")
>>> something_to_measure()
something_to_measure
"""
def __init__(self, state: State, should_record: Callable[[State], bool], trace_handlers: Sequence[TraceHandler],
name: str, record_instant_on_start: bool, record_instant_on_finish: bool,
categories: Union[List[str], Tuple[str, ...]]) -> None:
self.state = state
self.trace_handlers = trace_handlers
self.name = name
self.categories = categories
self.record_instant_on_start = record_instant_on_start
self.record_instant_on_finish = record_instant_on_finish
self.should_record = should_record
self._started = False
self._recorded_start = False
def _record_duration_event(self, is_start: bool, wall_clock_time_ns: int, timestamp: Timestamp):
"""Record a duration event."""
for handler in self.trace_handlers:
handler.process_duration_event(
name=self.name,
categories=self.categories,
timestamp=timestamp,
is_start=is_start,
wall_clock_time_ns=wall_clock_time_ns,
)
def _record_instant_event(self, wall_clock_time_ns: int, timestamp: Timestamp):
"""Record an instant event."""
for handler in self.trace_handlers:
handler.process_instant_event(
name=self.name,
categories=self.categories,
timestamp=timestamp,
wall_clock_time_ns=wall_clock_time_ns,
)
def _record_counter_event(self, wall_clock_time_ns: int, timestamp: Timestamp,
values: Dict[str, Union[int, float]]) -> None:
"""Record a counter invent."""
for handler in self.trace_handlers:
handler.process_counter_event(
name=self.name,
categories=self.categories,
wall_clock_time_ns=wall_clock_time_ns,
timestamp=timestamp,
values=values,
)
def start(self) -> None:
"""Record the start of a duration event.
To record the duration of an event, invoke :meth:`.Marker.start` followed by :meth:`.Marker.finish`.
.. testsetup::
from composer.profiler import Profiler, cyclic_schedule
profiler = Profiler(schedule=cyclic_schedule(), trace_handlers=[])
profiler.bind_to_state(state)
.. doctest::
>>> def something_to_measure():
... print("something_to_measure")
>>> marker = profiler.marker("foo")
>>> marker.start()
>>> something_to_measure()
something_to_measure
>>> marker.finish()
"""
if self._started:
raise RuntimeError(
f'Attempted to start profiler event {self.name}; however, this marker is already started')
self._recorded_start = self.should_record(self.state)
if self._recorded_start:
wall_clock_time = time.time_ns()
self._record_duration_event(
is_start=True,
wall_clock_time_ns=wall_clock_time,
timestamp=self.state.timestamp,
)
if self.record_instant_on_start:
self._record_instant_event(
timestamp=self.state.timestamp,
wall_clock_time_ns=wall_clock_time,
)
self._started = True
def finish(self) -> None:
"""Record the end of a duration event.
See :meth:`.Marker.start()` for a usage example.
"""
if not self._started:
raise RuntimeError(
f'Attempted to finish profiler event {self.name}; however, this profiler event is not yet started')
wall_clock_time = time.time_ns()
self._record_duration_event(
is_start=False,
timestamp=self.state.timestamp,
wall_clock_time_ns=wall_clock_time,
)
if self.record_instant_on_finish:
self._record_instant_event(
wall_clock_time_ns=wall_clock_time,
timestamp=self.state.timestamp,
)
self._started = False
def instant(self) -> None:
"""Record an instant event.
To record an instant event:
.. testsetup::
from composer.profiler import Profiler, cyclic_schedule
profiler = Profiler(schedule=cyclic_schedule(), trace_handlers=[])
profiler.bind_to_state(state)
.. doctest::
>>> def something_to_measure():
... print("something_to_measure")
>>> marker = profiler.marker("instant")
>>> marker.instant()
>>> something_to_measure()
something_to_measure
"""
if self.should_record(self.state):
self._record_instant_event(
wall_clock_time_ns=time.time_ns(),
timestamp=self.state.timestamp,
)
def counter(self, values: Dict[str, Union[float, int]]) -> None:
"""Record a counter event.
To record a counter event:
.. testsetup::
from composer.profiler import Profiler, cyclic_schedule
profiler = Profiler(schedule=cyclic_schedule(), trace_handlers=[])
profiler.bind_to_state(state)
.. doctest::
>>> marker = profiler.marker("foo")
>>> counter_event = 5
>>> marker.counter({"counter_event": counter_event})
>>> counter_event = 10
>>> marker.counter({"counter_event": counter_event})
"""
if self.should_record(self.state):
self._record_counter_event(
wall_clock_time_ns=time.time_ns(),
values=values,
timestamp=self.state.timestamp,
)
def __enter__(self) -> Marker:
self.start()
return self
def __exit__(self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException],
traceback: Optional[TracebackType]) -> None:
del exc_type, exc, traceback # unused
self.finish()
def __call__(self, func: Optional[Callable[..., Any]] = None) -> Callable[..., Any]:
if func is None:
# for decorators of the style @Marker(),
# return self so it's equivalent to @Marker
return self
@functools.wraps(func)
def wrapped(*args: Any, **kwargs: Any):
with self:
func(*args, **kwargs)
return wrapped
| composer-dev | composer/profiler/marker.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Merge trace files together.
To run:
.. code-block::
python -m composer.profiler.json_trace_merger -o merged_trace_output.json path/to/input_file_1.json path/to/input_file_2.json ...
To view the traces, open a Google Chrome browser window, navigate to ``chrome://tracing`` and load the ``merged_trace_output.json``
to visualize the trace.
"""
import argparse
import json
import pathlib
from typing import Dict, List, Tuple, Union
__all__ = ['merge_traces']
def _load_trace(file: Union[str, pathlib.Path]) -> Union[Dict, List]:
with open(file, 'r') as f:
trace_str = f.read().strip()
if trace_str.startswith('['):
if trace_str.endswith('}'):
trace_str += ']'
if trace_str.endswith(','):
trace_str = trace_str[-1] + ']'
return json.loads(trace_str)
def _get_global_rank_from_file(file: Union[str, pathlib.Path]) -> int:
trace_json = _load_trace(file)
if isinstance(trace_json, list):
for event in trace_json:
if event['ph'] == 'M' and event['name'] == 'global_rank':
return event['args']['value']
else:
assert isinstance(trace_json, dict)
return trace_json['global_rank']
raise RuntimeError('global rank not found in file')
def _get_rank_to_clock_syncs(trace_files: Tuple[Union[str, pathlib.Path], ...]) -> Dict[int, int]:
rank_to_clock_sync: Dict[int, int] = {}
for filename in trace_files:
rank = _get_global_rank_from_file(filename)
trace_json = _load_trace(filename)
if isinstance(trace_json, list):
for event in trace_json:
if event['ph'] == 'M' and event['name'] == 'clock_sync_timestamp_us':
clock_sync = event['args']['value']
rank_to_clock_sync[rank] = clock_sync
break
else:
assert isinstance(trace_json, dict)
if trace_json.get('clock_sync_timestamp_us') is not None:
rank_to_clock_sync[rank] = trace_json['clock_sync_timestamp_us']
return rank_to_clock_sync
def merge_traces(output_file: Union[str, pathlib.Path], *trace_files: Union[str, pathlib.Path]):
"""Merge profiler output JSON trace files together.
This function will update the trace events such that:
- The ``pid`` will be set to the global rank.
- The ``ts`` is synchronized with that of the rank 0 process.
- The backward pass process appears below the forward process.
Args:
output_file (str | pathlib.Path): The file to write the merged trace to
trace_files (str | pathlib.Path): Variable number of trace files to merge together
"""
ranks_to_clock_sync = _get_rank_to_clock_syncs(trace_files)
rank_to_backwards_thread = {}
rank_to_seen_threads = {rank: set() for rank in ranks_to_clock_sync.keys()}
# Local rank zero will be the lowest global rank
# Use that as the base timestamp for clock syncing
lowest_rank = float('inf')
for k in ranks_to_clock_sync:
lowest_rank = min(k, lowest_rank)
assert isinstance(lowest_rank, int), 'there should be at least one rank'
rank_zero_clock_sync = ranks_to_clock_sync[lowest_rank]
with open(output_file, 'w+') as output_f:
is_first_line = True
output_f.write('[')
for trace_filename in trace_files:
rank = _get_global_rank_from_file(trace_filename)
clock_sync_diff = rank_zero_clock_sync - ranks_to_clock_sync[rank]
with open(trace_filename, 'r') as trace_f:
trace_data = json.load(trace_f)
if isinstance(trace_data, list):
trace_list = trace_data
else:
assert isinstance(trace_data, dict)
trace_list = trace_data['traceEvents']
for event in trace_list:
if 'pid' not in event:
# we need the pid to merge
continue
if 'tid' not in event:
continue
if 'PyTorch Profiler' in str(event['tid']):
# skip this line; it pollutes the UI
continue
if 'ts' in event:
event['ts'] = event['ts'] + clock_sync_diff
event['pid'] = rank
if event['tid'] not in rank_to_seen_threads[rank]:
# By default, make all threads display last
# The training loop thread later sets itself as thread 0
# and the backwards pass thread is set as thread 1
if not is_first_line:
output_f.write(',')
output_f.write('\n ')
json.dump(
{
'name': 'thread_sort_index',
'ph': 'M',
'pid': rank,
'tid': event['tid'],
'args': {
'sort_index': 99999,
}
}, output_f)
rank_to_seen_threads[rank].add(event['tid'])
is_first_line = False
if event['name'] == 'MulBackward0':
rank_to_backwards_thread[rank] = event['tid']
if not is_first_line:
output_f.write(',')
is_first_line = False
output_f.write(f'\n ')
json.dump(event, output_f)
for pid, tid in rank_to_backwards_thread.items():
output_f.write(',\n ')
json.dump({
'name': 'thread_sort_index',
'ph': 'M',
'pid': pid,
'tid': tid,
'args': {
'sort_index': 1
}
}, output_f)
output_f.write('\n]\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_files', type=str, nargs='+', help='Input files')
parser.add_argument('-o', '--output_file', help='Output File', required=True)
args = parser.parse_args()
output_file = args.output_file
input_files = args.input_files
merge_traces(output_file, *input_files)
| composer-dev | composer/profiler/json_trace_merger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Profiler to record system level metrics."""
from __future__ import annotations
import threading
import time
from typing import TYPE_CHECKING, Dict, cast
import psutil
from composer.core import Callback
if TYPE_CHECKING:
from composer.core import State
from composer.loggers import Logger
from composer.profiler import Profiler
__all__ = ['SystemProfiler']
class SystemProfiler(Callback):
"""The SystemProfiler records system level metrics.
.. note::
The Composer :class:`.Trainer` automatically creates an instance of this
:class:`.SystemProfiler` callback whenever any of the System Profiler arguments (``sys_prof_cpu``,
``sys_prof_memory``, ``sys_prof_disk``, or ``sys_prof_net``) are enabled.
When using the Composer :class:`.Trainer`, one does not need to directly create an
instance of this :class:`.SystemProfiler` callback.
Args:
profile_cpu (bool): Whether to record cpu statistics (Default: ``True``)
profile_memory (bool): Whether to record memory statistics (Default: ``False``)
profile_disk (bool): Whether to record disk I/O statistics (Default: ``False``)
profile_net (bool): Whether to record network I/O statistics (Default: ``False``)
stats_thread_interval_seconds (float): Interval to record system-level stats, in seconds. (Default: every ``0.5`` seconds)
"""
def __init__(self,
profile_cpu: bool = True,
profile_memory: bool = False,
profile_disk: bool = False,
profile_net: bool = False,
stats_thread_interval_seconds: float = 0.5) -> None:
self.profile_cpu = profile_cpu
self.profile_disk = profile_disk
self.profile_memory = profile_memory
self.profile_net = profile_net
self.stats_thread_interval_seconds = stats_thread_interval_seconds
self.finished_event = threading.Event()
def init(self, state: State, logger: Logger):
del logger # unused
if state.profiler is None:
raise RuntimeError(('The Composer Profiler was not enabled, which is required to use the '
f'{type(self).__name__}. To enable, set the `prof_schedule` argument of the Trainer.'))
# Start the stats thread
self.finished_event.clear()
threading.Thread(target=self._stats_thread, daemon=True, args=[state.profiler]).start()
def close(self, state: State, logger: Logger) -> None:
self.finished_event.set()
def _stats_thread(self, profiler: Profiler):
from composer.callbacks import memory_monitor
"""Gathers requested system metrics at :attr:`SystemProfiler.stats_thread_interval_seconds` interval."""
psutil.disk_io_counters.cache_clear()
psutil.net_io_counters.cache_clear()
if self.profile_cpu:
psutil.cpu_percent() # spin it once to clear the default 0.0 value on the first call
while not self.finished_event.is_set():
if self.profile_cpu:
cpu_percent = psutil.cpu_percent()
profiler.marker(name='cpu', categories=['cpu']).counter({'cpu_percent': cpu_percent})
if self.profile_memory:
cuda_memory_stats = memory_monitor._get_memory_report()
for name, val in cuda_memory_stats.items():
profiler.marker(f'memory/cuda/{name}', categories=['memory']).counter({name: val})
swap_memory = psutil.swap_memory()
profiler.marker('memory/swap', categories=['memory']).counter({
'used_gb': swap_memory.used / 2**9,
'free_gb': swap_memory.free / 2**9
})
virtual_memory = psutil.virtual_memory()
profiler.marker('memory/virtual', categories=['memory']).counter({
'used_gb': virtual_memory.used / 2**9,
'available_gb': virtual_memory.available / 2**9
})
if self.profile_disk:
disk_io_counters = cast(Dict[str, psutil._common.sdiskio], psutil.disk_io_counters(perdisk=True))
for disk_name, disk_stats in disk_io_counters.items():
for field_name in ('read_count', 'write_count', 'read_bytes', 'write_bytes', 'read_time',
'write_time', 'busy_time'):
profiler.marker(f'disk/{disk_name}/{field_name}',
categories=['disk']).counter({'field_name': getattr(disk_stats, field_name)})
if self.profile_net:
net_io_counters = cast(Dict[str, psutil._common.snetio], psutil.net_io_counters(pernic=True))
for nic, nic_stats in net_io_counters.items():
profiler.marker(f'network/{nic}/kb_sent',
categories=['net']).counter({'kb_sent': nic_stats.bytes_sent / 2**3})
profiler.marker(f'network/{nic}/kb_recv',
categories=['net']).counter({'kb_recv': nic_stats.bytes_recv / 2**3})
time.sleep(self.stats_thread_interval_seconds)
| composer-dev | composer/profiler/system_profiler.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Profiler Trace Handler."""
from __future__ import annotations
import abc
import pathlib
from typing import TYPE_CHECKING, Dict, List, Tuple, Union
from composer.core.callback import Callback
if TYPE_CHECKING:
from composer.core import Timestamp
__all__ = ['TraceHandler']
class TraceHandler(Callback, abc.ABC):
"""Base class for Composer Profiler trace handlers.
Subclasses should implement :meth:`process_duration_event`, :meth:`process_instant_event`,
:meth:`process_counter_event`, and :meth:`process_chrome_json_trace_file` to record trace events.
Since :class:`TraceHandler` subclasses :class:`.Callback`, a trace handler can run on any
:class:`.Event` (such as on :attr:`.Event.INIT` to open files or on :attr:`.Event.BATCH_END` to periodically dump
data to files) and use :meth:`.Callback.close` to perform any cleanup.
"""
def process_duration_event(
self,
name: str,
categories: Union[List[str], Tuple[str, ...]],
is_start: bool,
timestamp: Timestamp,
wall_clock_time_ns: int,
) -> None:
"""Invoked whenever there is a duration event to record.
This method is called twice for each duration event -- once with ``is_start = True``,
and then again with ``is_start = False``. Interleaving events are not permitted.
Specifically, for each event (identified by the ``name``), a call with ``is_start = True`` will be followed
by a call with ``is_start = False`` before another call with ``is_start = True``.
Args:
name (str): The name of the event.
categories (Union[List[str], Tuple[str, ...]]): The categories for the event.
is_start (bool): Whether the event is a start event or end event.
timestamp (Timestamp): Snapshot of the training time.
wall_clock_time_ns (int): The :py:func:`time.time_ns` corresponding to the event.
"""
del name, categories, is_start, timestamp, wall_clock_time_ns # unused
pass
def process_instant_event(
self,
name: str,
categories: Union[List[str], Tuple[str, ...]],
timestamp: Timestamp,
wall_clock_time_ns: int,
) -> None:
"""Invoked whenever there is an instant event to record.
Args:
name (str): The name of the event.
categories (List[str] | Tuple[str, ...]): The categories for the event.
timestamp (Timestamp): Snapshot of current training time.
wall_clock_time_ns (int): The :py:func:`time.time_ns` corresponding to the event.
"""
del name, categories, timestamp, wall_clock_time_ns # unused
pass
def process_counter_event(
self,
name: str,
categories: Union[List[str], Tuple[str, ...]],
timestamp: Timestamp,
wall_clock_time_ns: int,
values: Dict[str, Union[int, float]],
) -> None:
"""Invoked whenever there is an counter event to record.
Args:
name (str): The name of the event.
categories (List[str] | Tuple[str, ...]): The categories for the event.
timestamp (Timestamp): The timestamp.
wall_clock_time_ns (int): The :py:func:`time.time_ns` corresponding to the event.
values (Dict[str, int | float]): The values corresponding to this counter event.
"""
del name, categories, timestamp, wall_clock_time_ns, values # unused
pass
def process_chrome_json_trace_file(self, filepath: pathlib.Path) -> None:
"""Invoked when there are events in Chrome JSON format to record.
See `this document <https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview>`_
for more information.
Args:
filepath (pathlib.Path): The filepath to a Chrome JSON trace file.
"""
del filepath # unused
pass
| composer-dev | composer/profiler/trace_handler.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Composer Profiler."""
from __future__ import annotations
import logging
import pathlib
from typing import TYPE_CHECKING, Callable, Dict, List, Sequence, Tuple, Union
from composer.profiler.marker import Marker
from composer.profiler.profiler_action import ProfilerAction
from composer.profiler.system_profiler import SystemProfiler
from composer.profiler.torch_profiler import TorchProfiler
from composer.profiler.trace_handler import TraceHandler
from composer.utils import ensure_tuple
if TYPE_CHECKING:
from composer.core import Callback, State
__all__ = ['Profiler']
log = logging.getLogger(__name__)
class Profiler:
"""Composer Profiler.
See the :doc:`Profiling Guide </trainer/performance_tutorials/profiling>` for additional information.
Args:
schedule ((State) -> ProfilerAction): The profiling scheduling function.
It takes the training state and returns a :class:`.ProfilerAction`.
For convenience, Composer includes a :meth:`~composer.profiler.cyclic_schedule.cyclic_schedule` helper.
.. testsetup::
from composer.profiler import Profiler, cyclic_schedule
original_profiler_init = Profiler.__init__
def new_profiler_init(self, dummy_ellipsis=None, **kwargs):
if 'trace_handlers' not in kwargs:
kwargs['trace_handlers'] = []
original_profiler_init(self, **kwargs)
Profiler.__init__ = new_profiler_init
.. testcode::
from composer.profiler import Profiler, cyclic_schedule
profiler = Profiler(
...,
schedule=cyclic_schedule(
skip_first=1,
wait=0,
warmup=1,
active=4,
repeat=1,
),
)
trace_handlers (TraceHandler | Sequence[TraceHandler]): Trace handlers which record and
save profiling data to traces.
sys_prof_cpu (bool, optional): Whether to record cpu statistics. (default: ``True``).
sys_prof_memory (bool, optional): Whether to record memory statistics. (default: ``False``).
sys_prof_disk (bool, optional): Whether to record disk statistics. (default: ``False``).
sys_prof_net (bool, optional): Whether to record network statistics. (default: ``False``).
sys_prof_stats_thread_interval_seconds (float, optional): Interval to record stats, in seconds.
(default: ``0.5``).
torch_prof_folder (str, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_filename (str, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_remote_file_name (str, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_overwrite (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_use_gzip (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_record_shapes (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_profile_memory (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_with_stack (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_with_flops (bool, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
torch_prof_num_traces_to_keep (int, optional): See :class:`~composer.profiler.torch_profiler.TorchProfiler`.
"""
def __init__(
self,
schedule: Callable[[State], ProfilerAction],
trace_handlers: List[TraceHandler],
sys_prof_cpu: bool = True,
sys_prof_memory: bool = False,
sys_prof_disk: bool = False,
sys_prof_net: bool = False,
sys_prof_stats_thread_interval_seconds: float = 0.5,
torch_prof_folder: str = '{run_name}/torch_traces',
torch_prof_filename: str = 'rank{rank}.{batch}.pt.trace.json',
torch_prof_remote_file_name: str = '{run_name}/torch_traces/rank{rank}.{batch}.pt.trace.json',
torch_prof_overwrite: bool = False,
torch_prof_use_gzip: bool = False,
torch_prof_record_shapes: bool = False,
torch_prof_profile_memory: bool = True,
torch_prof_with_stack: bool = False,
torch_prof_with_flops: bool = True,
torch_prof_num_traces_to_keep: int = -1,
) -> None:
self._names_to_markers: Dict[str, Marker] = {}
self._trace_handlers = list(ensure_tuple(trace_handlers))
self.schedule = schedule
self.state = None
self._callbacks: List[Callback] = []
if sys_prof_cpu or sys_prof_memory or sys_prof_disk or sys_prof_net:
self._callbacks.append(
SystemProfiler(profile_cpu=sys_prof_cpu,
profile_memory=sys_prof_memory,
profile_disk=sys_prof_disk,
profile_net=sys_prof_net,
stats_thread_interval_seconds=sys_prof_stats_thread_interval_seconds))
if torch_prof_record_shapes or torch_prof_profile_memory or torch_prof_with_stack or torch_prof_with_flops:
self._callbacks.append(
TorchProfiler(filename=torch_prof_filename,
folder=torch_prof_folder,
remote_file_name=torch_prof_remote_file_name,
num_traces_to_keep=torch_prof_num_traces_to_keep,
overwrite=torch_prof_overwrite,
record_shapes=torch_prof_record_shapes,
profile_memory=torch_prof_profile_memory,
use_gzip=torch_prof_use_gzip,
with_stack=torch_prof_with_stack,
with_flops=torch_prof_with_flops))
def bind_to_state(
self,
state: State,
):
"""Bind the profiler to the ``state``.
.. note::
The :class:`.Trainer` automatically invokes this method.
Args:
state (State): The training state.
"""
self.state = state
self.state.callbacks.extend(self._callbacks)
self.state.callbacks.extend(self._trace_handlers)
@property
def trace_handlers(self):
"""Profiler trace handlers."""
return self._trace_handlers
@trace_handlers.setter
def trace_handlers(self, trace_handlers: Union[TraceHandler, Sequence[TraceHandler]]):
"""Profiler trace handlers."""
self._trace_handlers[:] = ensure_tuple(trace_handlers)
def record_chrome_json_trace_file(self, filepath: Union[str, pathlib.Path]):
"""Record trace events in Chrome JSON format in the trace handlers.
See `this document <https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview>`_
for more information about Chrome JSON format.
.. note::
For custom profiling, it is recommended to use :meth:`marker` instead of manually creating a Chrome JSON
trace file. By default, the Composer Profiler will automatically saving :class:`.Marker` events in Chrome
JSON format.
This method exists for external profilers that natively record events in Chrome JSON format (such as the
:class:`~composer.profiler.torch_profiler.TorchProfiler`). These profilers can use this method to route
their profiling traces to the Composer profiler :attr:`~trace_handlers` so events from both the Composer
Profiler and external profilers are recorded in the same trace file.
"""
for recorder in self.trace_handlers:
recorder.process_chrome_json_trace_file(pathlib.Path(filepath))
def marker(
self,
name: str,
actions: Sequence[ProfilerAction] = (ProfilerAction.WARMUP, ProfilerAction.ACTIVE,
ProfilerAction.ACTIVE_AND_SAVE),
record_instant_on_start: bool = False,
record_instant_on_finish: bool = False,
categories: Union[List[str], Tuple[str, ...]] = (),
) -> Marker:
"""Create and get an instance of a :class:`.Marker`.
If a :class:`.Marker` with the specified ``name`` does not already exist, it will be created.
Otherwise, the existing instance will be returned.
.. note::
:meth:`.Profiler.marker()` should be used to construct markers. :class:`.Marker` **should not** be
instantiated directly by the user.
For example:
.. testsetup:: composer.profiler.profiler.Profiler.marker
from composer.profiler import Profiler, cyclic_schedule
profiler = Profiler(schedule=cyclic_schedule(), trace_handlers=[])
profiler.bind_to_state(state)
state.profiler = profiler
.. doctest:: composer.profiler.profiler.Profiler.marker
>>> marker = profiler.marker("foo")
>>> marker
<composer.profiler.marker.Marker object at ...>
Please see :meth:`.Marker.start()` and :meth:`.Marker.finish()` for usage on creating markers to measure duration events,
:meth:`.Marker.instant()` for usage on creating markers to mark instant events and :meth:`.Marker.counter()` for usage on
creating markers for counting.
Args:
name (str): The name for the :class:`.Marker`.
actions (Sequence[ProfilerAction], optional): :class:`.ProfilerAction` states to record on.
Defaults to (:attr:`~.ProfilerAction.WARMUP`, :attr:`~.ProfilerAction.ACTIVE`,
:attr:`~.ProfilerAction.ACTIVE_AND_SAVE`).
record_instant_on_start (bool, optional): Whether to record an instant event whenever the marker is started.
Defaults to ``False``.
record_instant_on_finish (bool, optional): Whether to record an instant event whenever the marker is finished.
Defaults to ``False``.
categories (Union[List[str], Tuple[str, ...]], optional): Categories for this marker. Defaults to ``None``.
Returns:
Marker: Marker instance.
"""
if self.state is None:
raise RuntimeError('Profiler.bind_to_state() must be invoked before the Profiler can be used.')
if name not in self._names_to_markers:
def should_record(state: State) -> bool:
return self.schedule(state) in actions
self._names_to_markers[name] = Marker(
state=self.state,
trace_handlers=self.trace_handlers,
name=name,
should_record=should_record,
record_instant_on_start=record_instant_on_start,
record_instant_on_finish=record_instant_on_finish,
categories=categories,
)
self._names_to_markers[name].categories = categories
return self._names_to_markers[name]
| composer-dev | composer/profiler/profiler.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Functional API for applying algorithms in your own training loop.
.. code-block:: python
from composer import functional as cf
from torchvision import models
model = models.resnet50()
# replace some layers with blurpool
cf.apply_blurpool(model)
# replace some layers with squeeze-excite
cf.apply_squeeze_excite(model, latent_channels=64, min_channels=128)
"""
from composer.algorithms.alibi import apply_alibi
from composer.algorithms.augmix import augmix_image
from composer.algorithms.blurpool import apply_blurpool
from composer.algorithms.channels_last import apply_channels_last
from composer.algorithms.colout import colout_batch
from composer.algorithms.cutmix import cutmix_batch
from composer.algorithms.cutout import cutout_batch
from composer.algorithms.ema import compute_ema
from composer.algorithms.factorize import apply_factorization
from composer.algorithms.fused_layernorm import apply_fused_layernorm
from composer.algorithms.gated_linear_units import apply_gated_linear_units
from composer.algorithms.ghost_batchnorm import apply_ghost_batchnorm
from composer.algorithms.gradient_clipping import apply_gradient_clipping
from composer.algorithms.gyro_dropout import apply_gyro_dropout
from composer.algorithms.label_smoothing import smooth_labels
from composer.algorithms.layer_freezing import freeze_layers
from composer.algorithms.low_precision_groupnorm import apply_low_precision_groupnorm
from composer.algorithms.low_precision_layernorm import apply_low_precision_layernorm
from composer.algorithms.mixup import mixup_batch
from composer.algorithms.progressive_resizing import resize_batch
from composer.algorithms.randaugment import randaugment_image
from composer.algorithms.selective_backprop import select_using_loss, should_selective_backprop
from composer.algorithms.seq_length_warmup import set_batch_sequence_length
from composer.algorithms.squeeze_excite import apply_squeeze_excite
from composer.algorithms.stochastic_depth import apply_stochastic_depth
from composer.algorithms.weight_standardization import apply_weight_standardization
# All must be manually defined so sphinx automodule will work properly
__all__ = [
'apply_alibi',
'augmix_image',
'apply_blurpool',
'apply_channels_last',
'colout_batch',
'compute_ema',
'cutmix_batch',
'cutout_batch',
'apply_factorization',
'apply_fused_layernorm',
'apply_gated_linear_units',
'apply_ghost_batchnorm',
'apply_gradient_clipping',
'apply_low_precision_layernorm',
'apply_low_precision_groupnorm',
'smooth_labels',
'freeze_layers',
'mixup_batch',
'resize_batch',
'randaugment_image',
'should_selective_backprop',
'select_using_loss',
'set_batch_sequence_length',
'apply_squeeze_excite',
'apply_stochastic_depth',
'apply_weight_standardization',
'apply_gyro_dropout',
]
| composer-dev | composer/functional/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helpers for running distributed data parallel training."""
import collections
import logging
import warnings
from contextlib import contextmanager, nullcontext
from typing import Any, Callable, ContextManager, Dict, Optional, Sequence, Union, cast
import torch
from packaging import version
from torch.nn.parallel import DistributedDataParallel
from torchmetrics import Metric, MetricCollection
from composer.core import Precision
from composer.core.state import State
from composer.trainer.meta_safe_apply import meta_safe_apply
from composer.utils import StringEnum, dist, ensure_tuple, using_torch_2_0
__all__ = ['DDPSyncStrategy', 'ddp_sync_context', 'prepare_ddp_module', 'prepare_fsdp_module']
log = logging.getLogger(__name__)
class DDPSyncStrategy(StringEnum):
"""How and when gradient synchronization should happen.
Attributes:
SINGLE_AUTO_SYNC: The default behavior. Gradients are synchronized as they
computed, for only the final microbatch of a batch. This is the most efficient
strategy, but can lead to errors when ``find_unused_parameters`` is set, since
it is possible different microbatches may use different sets of parameters,
leading to an incomplete sync.
MULTI_AUTO_SYNC: The default behavior when ``find_unused_parameters`` is set.
Gradients are synchronized as they are computed for all microbatches. This ensures
complete synchronization, but is less efficient than :attr:`SINGLE_AUTO_SYNC`. This
efficiency gap is usually small, as long as either DDP syncs are a small portion
of the trainer's overall runtime, or the number of microbatches per batch is
relatively small.
FORCED_SYNC: Gradients are manually synchronized only after all gradients have been
computed for the final microbatch of a batch. Like :attr:`MULTI_AUTO_SYNC`, this
strategy ensures complete gradient synchronization, but this tends to be slower than
:attr:`MULTI_AUTO_SYNC`. This is because ordinarily syncs can happen in parallel
with the ``loss.backward()`` computation, meaning syncs can be mostly complete by
the time that function finishes. However, in certain circumstances, syncs may take
a very long time to complete - if there are also a lot of microbatches per batch,
this strategy may be optimal.
"""
SINGLE_AUTO_SYNC = 'single_auto_sync'
MULTI_AUTO_SYNC = 'multi_auto_sync'
FORCED_SYNC = 'forced_sync'
@contextmanager
def ddp_sync_context(state: State, is_final_microbatch: bool, sync_strategy: Union[str, DDPSyncStrategy]):
"""A context manager for handling the :class:`DDPSyncStrategy`.
Args:
state (State): The state of the :class:`.Trainer`.
is_final_microbatch (bool): Whether or not the context is being used during the final
microbatch of the gradient accumulation steps.
sync_strategy (str | DDPSyncStrategy): The ddp sync strategy to use. If a string
is provided, the string must be one of the values in :class:`DDPSyncStrategy`.
"""
if not isinstance(state.model, DistributedDataParallel):
yield
return
assert state.optimizers is not None, 'optimizers have not been initialized'
sync_strategy = DDPSyncStrategy(sync_strategy)
no_sync_context = cast(Callable[[], ContextManager], state.model.no_sync)
auto_sync_context = nullcontext
if sync_strategy == DDPSyncStrategy.SINGLE_AUTO_SYNC:
context = auto_sync_context if is_final_microbatch else no_sync_context
with context():
yield
elif sync_strategy == DDPSyncStrategy.MULTI_AUTO_SYNC:
with auto_sync_context():
yield
elif sync_strategy == DDPSyncStrategy.FORCED_SYNC:
try:
with no_sync_context():
yield
finally:
if is_final_microbatch:
for optimizer in state.optimizers:
for group in optimizer.param_groups:
for p in group['params']:
if p.grad is not None:
dist.all_reduce(p.grad)
p.grad = p.grad / dist.get_world_size()
else:
raise ValueError('Unknown sync strategy', sync_strategy)
def prepare_ddp_module(module: torch.nn.Module, find_unused_parameters: bool) -> torch.nn.Module:
"""Wraps the module in a :class:`torch.nn.parallel.DistributedDataParallel` object if running distributed training.
Args:
module (torch.nn.Module): The module to wrap.
find_unused_parameters (bool): Whether or not to do a pass over the autograd graph
to find parameters to not expect gradients for. This is useful if there are some
parameters in the model that are not being trained.
"""
if dist.is_available() and dist.is_initialized():
if any((p.requires_grad for p in module.parameters())):
log.debug('Wrapping model with DistributedDataParallel')
ddp_model = DistributedDataParallel(module, find_unused_parameters=find_unused_parameters)
return ddp_model
return module
if dist.is_available():
raise RuntimeError('Please call dist.initialize_dist() before calling ddp.prepare_module()')
raise RuntimeError('When the world size is > 1, ``torch.distributed`` must be used. However, it is '
'not available in your installation of PyTorch. Please install or build PyTorch '
'with distributed support.')
def prepare_fsdp_module(model: torch.nn.Module, optimizers: Optional[Union[torch.optim.Optimizer,
Sequence[torch.optim.Optimizer]]],
fsdp_config: Dict[str, Any], precision: Precision) -> None:
"""Prepare a module (assumed ComposerModel) and optimizer for use with :class:`torch.distributed.fsdp.FullyShardedDataParallel`.
Args:
model (torch.nn.Module): The model to wrap.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional): The optimizer for `model`, assumed to have a single param group := model.parameters().
fsdp_config (Dict[str, Any]): The FSDP config.
precision: (Precision): The precision being used by the Trainer, used to fill in defaults for FSDP `mixed_precision` settings.
"""
if version.parse(torch.__version__) < version.parse('1.13.0'):
raise RuntimeError('To use FSDP with Composer, you must use torch>=1.13.0.')
is_torch_2_0 = using_torch_2_0()
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (CheckpointImpl,
apply_activation_checkpointing,
checkpoint_wrapper)
from torch.distributed.fsdp import FullyShardedDataParallel
if not is_torch_2_0:
from torch.distributed.fsdp.flatten_params_wrapper import FlattenParamsWrapper
from composer.trainer.mosaic_fsdp import (MosaicFullyShardedDataParallel, backward_prefetch_map, get_cpu_offload,
get_mixed_precision, sharding_map)
if optimizers:
optimizers_tuple = ensure_tuple(optimizers)
if len(optimizers_tuple) != 1:
raise NotImplementedError(f'Only one optimizer is supported; found {len(optimizers_tuple)} optimizers')
# clearing optimizer param groups and state
# that will be recreated at the end of prepare_fsdp_module
optim = optimizers_tuple[0]
optim.param_groups.clear()
optim.state.clear()
sharding_map_key = fsdp_config.get('sharding_strategy', 'FULL_SHARD').upper()
sharding_strategy = sharding_map[sharding_map_key]
cpu_offload = get_cpu_offload(cpu_offload=fsdp_config.get('cpu_offload', False))
mixed_precision = fsdp_config.get('mixed_precision', 'DEFAULT')
keep_low_precision_grads = fsdp_config.get('keep_low_precision_grads', False)
mixed_precision, param_dtype, _, _ = get_mixed_precision(precision,
mixed_precision=mixed_precision,
keep_low_precision_grads=keep_low_precision_grads)
# Note: FSDP does support the use of torch.float32 with sharding.
# They just never expected a user to pass in torch.float32 into mixed_precision as a param_dtype.
# See: https://github.com/pytorch/pytorch/issues/90584
# The PR fixing this bug is merged into PyTorch, but it hasn't made its way into a release yet.
# Instead a user needs to pass in `None` as param_dtype to have the parameters as torch.float32.
# TODO: remove these checks when PyTorch has a release that includes the fix.
if sharding_map_key != 'NO_SHARD':
if (precision == Precision.AMP_FP16 and param_dtype not in [torch.float16, None] or
precision == Precision.AMP_BF16 and param_dtype not in [torch.bfloat16, None]):
raise ValueError(
f'FSDP in PyTorch 1.13 does not support precision `{precision}` with sharding strategy `{sharding_strategy}` '
f'and param_dtype `{param_dtype}.` Consider using one of the predefined mixed_precision strategies '
"(choose: `'FULL'`, `'DEFAULT'`, `'PURE'`)")
if param_dtype == torch.float32:
raise ValueError(
f'FSDP in PyTorch 1.13 does not support param_dtype `{param_dtype}` with sharding_strategy `{sharding_map_key}` '
f'Consider using `amp` or `bf16` for precision or setting param_dtype in mixed_precision to `None` '
f'with sharding strategy `{sharding_map_key}.`')
backward_prefetch = backward_prefetch_map[fsdp_config.get('backward_prefetch', 'BACKWARD_POST').upper()]
min_params = int(float(fsdp_config.get('min_params', 1e9)))
activation_checkpointing = fsdp_config.get('activation_checkpointing', False)
activation_cpu_offload = fsdp_config.get('activation_cpu_offload', False)
sync_module_states = fsdp_config.get('sync_module_states', False)
forward_prefetch = fsdp_config.get('forward_prefetch', False)
limit_all_gathers = fsdp_config.get('limit_all_gathers', False)
ignored_modules = fsdp_config.get('ignored_modules', None)
state_dict_type = fsdp_config.get('state_dict_type', 'full')
activation_checkpointing_reentrant = fsdp_config.get('activation_checkpointing_reentrant', True)
# We choose to not wrap the ComposerModel directly, but instead wrap any submodules like `ComposerModel.model`
# This makes it safer to call ComposerModel-specific functions like 'eval_forward' that
# may make calls to sharded submodules. If we only wrap the submodules, then any call that ComposerModel makes
# to a FSDP-wrapped submodule's `forward()` function will be safe and all-gather the necessary weights before `forward()`.
for obj_name, obj in model.named_children():
if not isinstance(obj, (Metric, MetricCollection)):
# Skip wrapping submodules which are explicitly marked with no wrap
if hasattr(obj, '_fsdp_wrap') and not bool(obj._fsdp_wrap):
continue
def _param_init_fn(module: torch.nn.Module) -> None:
# A dictionary of all tied parameter pointers to module names
tied_pointers = {}
# Goes through all modules finding which weights have the same pointers
for name, mod in module.named_modules():
for attr in ['weight', 'bias']:
if hasattr(mod, attr):
ptr = id(getattr(mod, attr))
ptr_attr = (ptr, attr)
name_list = tied_pointers.get(ptr_attr, [])
name_list.append(name)
tied_pointers[ptr_attr] = name_list
# Creates a dictionary of module names that should be tied together
tied_mod_names = collections.defaultdict(list)
# Creates a set of modules we should not initialize
should_not_init_params = set()
for ptr_attr_type, mod_names in tied_pointers.items():
# No modules for this pointer are tied
if len(mod_names) == 1:
continue
_, attr_type = ptr_attr_type
first = next(mod_names.__iter__())
for elem in mod_names:
should_not_init_params.add('.'.join([elem, attr_type]))
tied_mod_names[(first, attr_type)].append(elem)
# Make sure at least one of the tied parameters is initialized
should_not_init_params.remove('.'.join([first, attr_type]))
meta_safe_apply(module,
lambda t: torch.empty_like(t, device=f'cuda:{torch.cuda.current_device()}'),
should_not_init_params,
module_name='')
if len(tied_mod_names) > 0:
warnings.warn(('The passed in model appears to have tied weights. In order to '
'support effective weight tying, the tied modules need to be '
'in the same FSDP module. If the weights are not properly tied '
'it can lead to loss spikes. We have tried our best to ensure '
'the tied weights are in the same FSDP module.'))
# Redoes weight tying
for name_attr, tied_names in tied_mod_names.items():
name, attr = name_attr
src_mod = module.get_submodule(name)
# We need to make sure the source and destination
# modules end up in the same FSDP module otherwise
# with sharding weight tying gets violated
src_mod._fsdp_wrap = False # type: ignore
src_params = getattr(src_mod, attr)
for tied_name in tied_names:
dest_mod = module.get_submodule(tied_name)
dest_mod._fsdp_wrap = False # type: ignore
setattr(dest_mod, attr, src_params)
if hasattr(obj, 'param_init_fn') and isinstance(obj.param_init_fn, Callable):
module.apply(obj.param_init_fn)
elif hasattr(module, 'reset_parameters') and isinstance(module.reset_parameters, Callable):
module.reset_parameters()
else:
raise ValueError(
f'Object `{obj_name}` does not have a ``param_init_fn`` or a ``reset_parameters`` function. '
'This leaves parameters without initialization. Please add a ``param_init_fn`` or ``reset_parameters`` '
f'to module `{obj_name}`.')
# Choose which modules to FSDP wrap according to the following priority:
# If module has attribute `module._fsdp_wrap = ...`, always respect it
# Otherwise wrap if root object `obj.fsdp_wrap_fn(module)` is true
# Or if unwrapped params in module in greater than or equal to fsdp_config.min_params
def __auto_wrap_policy(module: torch.nn.Module, recurse: bool, nonwrapped_numel: int) -> bool:
if recurse:
return True
else:
if hasattr(module, '_fsdp_wrap'):
return bool(module._fsdp_wrap)
is_large = nonwrapped_numel >= min_params
if hasattr(obj, 'fsdp_wrap_fn') and isinstance(obj.fsdp_wrap_fn, Callable):
return obj.fsdp_wrap_fn(module) or is_large
else:
return is_large
if is_torch_2_0:
def _auto_wrap_policy_new(module: torch.nn.Module, recurse: bool, nonwrapped_numel: int) -> bool:
return __auto_wrap_policy(module, recurse, nonwrapped_numel)
_auto_wrap_policy = _auto_wrap_policy_new
else:
def _auto_wrap_policy_old(module: torch.nn.Module, recurse: bool, unwrapped_params: int) -> bool:
return __auto_wrap_policy(module, recurse, unwrapped_params)
_auto_wrap_policy = _auto_wrap_policy_old
fsdp_obj = MosaicFullyShardedDataParallel(
obj,
sharding_strategy=sharding_strategy,
auto_wrap_policy=_auto_wrap_policy,
cpu_offload=cpu_offload,
mixed_precision=mixed_precision,
backward_prefetch=backward_prefetch,
ignored_modules=ignored_modules,
param_init_fn=_param_init_fn,
device_id=torch.cuda.current_device(),
sync_module_states=sync_module_states,
forward_prefetch=forward_prefetch,
limit_all_gathers=limit_all_gathers,
)
# Activation Checkpointing
if activation_checkpointing or activation_cpu_offload:
if not activation_checkpointing_reentrant:
first_wrap_fn = lambda m: checkpoint_wrapper(m, checkpoint_impl=CheckpointImpl.NO_REENTRANT
) if activation_checkpointing else (lambda module:
module)
second_wrap_fn = (
lambda module: checkpoint_wrapper(
first_wrap_fn(module), # type: ignore reportGeneralTypeIssues
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
offload_to_cpu=True)) if activation_cpu_offload else first_wrap_fn
else:
first_wrap_fn = checkpoint_wrapper if activation_checkpointing else (lambda module: module)
second_wrap_fn = (
lambda module: checkpoint_wrapper(
first_wrap_fn(module), # type: ignore reportGeneralTypeIssues
offload_to_cpu=True)) if activation_cpu_offload else first_wrap_fn
# Choose which modules to activation checkpoint according to the following priority:
# If module has attribute `module._activation_checkpointing = ...`, always respect it
# Otherwise checkpoint if root object `obj.activation_checkpointing_fn(module)` is true
def _check_fn(module: torch.nn.Module) -> bool:
if not is_torch_2_0 and isinstance(module, FlattenParamsWrapper):
return False
if isinstance(module, FullyShardedDataParallel):
return False
if hasattr(module, '_activation_checkpointing'):
return bool(module._activation_checkpointing)
if hasattr(obj, 'activation_checkpointing_fn') and isinstance(obj.activation_checkpointing_fn,
Callable):
return obj.activation_checkpointing_fn(module)
return False
apply_activation_checkpointing(
fsdp_obj,
checkpoint_wrapper_fn=second_wrap_fn, # type: ignore
check_fn=_check_fn, # type: ignore
)
setattr(model, obj_name, fsdp_obj)
# Print FSDP wrapped model and FSDP config if `verbose=True`
if fsdp_config.get('verbose', False):
print(f'FSDP: Wrapped Model:')
print(model)
print(f'FSDP: Using sharding_strategy={sharding_strategy}')
print(f'FSDP: Using cpu_offload={cpu_offload}')
print(f'FSDP: Using mixed_precision={mixed_precision}')
print(f'FSDP: Using backward_prefetch={backward_prefetch}')
print(f'FSDP: Using min_params={min_params}')
print(f'FSDP: Using activation_checkpointing={activation_checkpointing}')
print(f'FSDP: Using activation_cpu_offload={activation_cpu_offload}')
print(f'FSDP: Using sync_module_states={sync_module_states}')
print(f'FSDP: Using forward_prefetch={forward_prefetch}')
print(f'FSDP: Using limit_all_gathers={limit_all_gathers}')
print(f'FSDP: Using state_dict_type={state_dict_type}')
# Rebuild optimizer now that parameters are sharded
if optimizers:
optimizers_tuple = ensure_tuple(optimizers)
optim = optimizers_tuple[0]
optim.param_groups.clear()
optim.add_param_group({'params': list(model.parameters())})
| composer-dev | composer/trainer/dist_strategy.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# Released under BSD 3-Clause License,
# Copyright (c) Facebook, Inc. and its affiliates.
"""Updates FSDPs _auto_wrap to enable module_kwargs and custom process_group cache."""
import functools
import warnings
from typing import Any, Callable, Dict, Set, Tuple, Union, cast
import torch
import torch.nn as nn
from torch import distributed
from torch.distributed import ProcessGroup
from torch.distributed.fsdp import (BackwardPrefetch, CPUOffload, FullyShardedDataParallel, MixedPrecision,
ShardingStrategy)
from torch.distributed.fsdp._utils import _contains_batchnorm, _override_batchnorm_mixed_precision
from torch.distributed.fsdp.wrap import _or_policy, _wrap, _wrap_batchnorm_individually
from composer.core import Precision
from composer.utils import dist
__all__ = [
'sharding_map',
'backward_prefetch_map',
'get_torch_dtype',
'get_mixed_precision',
'get_cpu_offload',
'get_process_group',
'MosaicFullyShardedDataParallel',
]
sharding_map = {
'NO_SHARD': ShardingStrategy.NO_SHARD,
'SHARD_GRAD_OP': ShardingStrategy.SHARD_GRAD_OP,
'FULL_SHARD': ShardingStrategy.FULL_SHARD,
}
backward_prefetch_map = {
'NONE': None,
'BACKWARD_PRE': BackwardPrefetch.BACKWARD_PRE,
'BACKWARD_POST': BackwardPrefetch.BACKWARD_POST,
}
def get_torch_dtype(dtype: Union[Precision, str]):
"""Convert common string representations of dtypes to torch dtypes."""
dtype = dtype.value if isinstance(dtype, Precision) else dtype
if dtype in ['float32', 'torch.float32', 'fp32']:
return torch.float32
elif dtype in ['float16', 'torch.float16', 'half', 'fp16', 'amp', 'amp_fp16']:
return torch.float16
elif dtype in ['bfloat16', 'bfloat', 'torch.bfloat16', 'bf16', 'amp_bf16']:
return torch.bfloat16
elif dtype in ['float8', 'torch.float8', 'fp8', 'amp_fp8']:
if hasattr(torch, 'float8'):
raise NotImplementedError('Torch has enabled float8. This should be updated to `return torch.float8`')
else:
warnings.warn('We use torch.bfloat16 by default for amp_fp8 as there is no fp8 datatype in PyTorch yet.')
return torch.bfloat16
else:
raise ValueError(f'Not sure how to convert dtype={dtype} to a torch dtype.')
def get_mixed_precision(precision, mixed_precision='DEFAULT', keep_low_precision_grads=False):
"""Helper function for configuring mixed_precision."""
param_dtype = None
reduce_dtype = None
buffer_dtype = None
if isinstance(mixed_precision, dict):
param_dtype = mixed_precision.get('param_dtype', None)
if param_dtype is not None:
param_dtype = get_torch_dtype(param_dtype)
reduce_dtype = mixed_precision.get('reduce_dtype', None)
if reduce_dtype is not None:
reduce_dtype = get_torch_dtype(reduce_dtype)
buffer_dtype = mixed_precision.get('buffer_dtype', None)
if buffer_dtype is not None:
buffer_dtype = get_torch_dtype(buffer_dtype)
elif isinstance(mixed_precision, str):
mixed_precision = mixed_precision.upper()
if mixed_precision == 'FULL':
pass
elif mixed_precision == 'DEFAULT':
param_dtype = get_torch_dtype(precision)
buffer_dtype = get_torch_dtype(precision)
elif mixed_precision == 'PURE':
param_dtype = get_torch_dtype(precision)
reduce_dtype = get_torch_dtype(precision)
buffer_dtype = get_torch_dtype(precision)
else:
raise ValueError(f'Unable to interpret mixed_precision={mixed_precision}')
else:
raise ValueError(f'Unable to interpret mixed_precision={mixed_precision}')
mixed_precision = MixedPrecision(
param_dtype=param_dtype,
reduce_dtype=reduce_dtype,
buffer_dtype=buffer_dtype,
keep_low_precision_grads=keep_low_precision_grads,
)
return mixed_precision, param_dtype, reduce_dtype, buffer_dtype
def get_cpu_offload(cpu_offload=False):
"""Helper fn for configuring cpu_offload."""
cpu_offload = CPUOffload(offload_params=True) if cpu_offload else None
if cpu_offload is not None:
raise ValueError('FSDP CPU Offload not supported yet.')
return cpu_offload
def get_process_group(pg, process_group_cache=None):
"""Helper function for configuring and/or retrieving process groups."""
warnings.warn(f'Instantiating FSDP with custom process groups is an experimental feature.')
# Return regular process_groups as is, no cacheing
if pg is None or isinstance(pg, ProcessGroup):
return pg
world_size = dist.get_world_size()
local_world_size = dist.get_local_world_size()
# Handle special str process_group cases
if pg == 'self':
pg = 'set1'
warnings.warn(f"Converting process_group='self' to process_group='{pg}'")
elif pg == 'node':
pg = f'set{local_world_size}'
warnings.warn(f"Converting process_group='node' to process_group='{pg}'")
elif pg == 'local_rank_across_nodes':
pg = f'mod{local_world_size}'
warnings.warn(f"Converting process_group='local_rank_across_nodes' to process_group='{pg}'")
# Handle str and Union[List[int], Tuple[int]] process_group cases
if isinstance(pg, str) and pg.startswith('set'):
k = int(pg.strip('set'))
world_size = dist.get_world_size()
if world_size % k != 0:
raise RuntimeError(f'{world_size} must be divisible by set size ({k})')
start = dist.get_global_rank() // k * k
ranks = tuple(range(start, start + k))
elif isinstance(pg, str) and pg.startswith('mod'):
k = int(pg.strip('mod'))
world_size = dist.get_world_size()
if world_size % k != 0:
raise RuntimeError(f'{world_size} must be divisible by mod ({k})')
ranks = tuple(range(dist.get_global_rank() % k, world_size, k))
elif isinstance(pg, (list, tuple)):
ranks = tuple(pg)
else:
raise ValueError(f'Unsure how to setup process_group={pg}')
if process_group_cache is not None and ranks in process_group_cache:
warnings.warn(
f'On rank={dist.get_global_rank()} using cached progress group with {ranks=}. ' +\
'If the intention was to use a new process group, a new process group can be instantiated and passed' +\
"in as an arguement (`'process_group': newly_instantiated_process_group_obect,`)"
)
return process_group_cache[ranks]
warnings.warn(
f'Composer is instantiating custom process groups with {ranks=} (on rank={dist.get_global_rank()}). ' +\
'This is an experimental feature.'
)
ranks_per_subgroup_list = list(set(dist.all_gather_object(ranks)))
current_group, _subgroups = distributed.distributed_c10d.new_subgroups_by_enumeration(ranks_per_subgroup_list)
if process_group_cache is not None:
process_group_cache[ranks] = current_group
return current_group
def _custom_recursive_wrap(module: nn.Module,
auto_wrap_policy: Callable,
wrapper_cls: Callable,
ignored_modules: Set[nn.Module],
ignored_params: Set[nn.Parameter],
process_group_cache: Dict[Tuple[int], Any],
only_wrap_children: bool = False,
**kwargs: Any) -> Tuple[nn.Module, int]:
"""Updates FSDPs _recursive_wrap to enable module_kwargs and custom process_group cache.
modified version of
https://github.com/pytorch/pytorch/blob/d922c29a22e4bf0fba49526f7536395eb8cd66f4/torch/distributed/fsdp/wrap.py#L353
which recursively wraps modules as FSDP modules for parameter sharding.
This modification enables the user to pass custom FSDP arguements for every wrapped module.
The added process_group_cache enables different FSDP modules to, when appropriate, use the
same process group instead of instantiating a new process group.
Automatically wrap child modules of *module* that meet the given
criteria with :func:`auto_wrap`. Does not rely on _ConfigAutoWrap.
Args:
module (nn.Module):
module to recursively wrap
auto_wrap_policy (Callable):
A callable specifying a policy to recursively wrap layers with FSDP.
ignored_modules (Set[torch.nn.Module]): Modules to ignore when
wrapping.
ignored_params (Set[torch.nn.Parameter]): Parameters to ignore when
wrapping; these should be the parameters contained in the modules
in ``ignored_modules``.
process_group_cache (Dict[Tuple[int], Any]): a cache of process_group to
use instead of potentially instantiating a new process_group
Returns:
(nn.Module, int):
Wrapped module and the number parameters wrapped recursively.
"""
assert auto_wrap_policy is not None, 'Must specify auto_wrap_policy.'
assert wrapper_cls is not None, 'Must specify wrapper_cls'
# Make sure no child is already wrapped.
for _, child in module.named_modules():
if child in ignored_modules:
continue
try:
assert not isinstance(child, cast(type, wrapper_cls))
except TypeError:
# wrapper_cls is a function as opposed to a class type, just bypass above check.
pass
# We count all params, assuming none of them are already wrapped.
num_params = sum(p.numel() for p in module.parameters() if p not in ignored_params)
assert auto_wrap_policy is not None
if auto_wrap_policy(module=module, recurse=True, unwrapped_params=num_params):
total_wrapped_params = 0
# Iterate through the children, recursively wrap if necessary
for name, child in module.named_children():
if child in ignored_modules:
continue
wrapped_child, num_wrapped_params = _custom_recursive_wrap(
module=child,
auto_wrap_policy=auto_wrap_policy,
wrapper_cls=wrapper_cls,
ignored_modules=ignored_modules,
ignored_params=ignored_params,
process_group_cache=process_group_cache,
**kwargs,
)
setattr(module, name, wrapped_child)
# Keep track of how many parameters have been wrapped
total_wrapped_params += num_wrapped_params
# decide if we need to wrap the current module,
# since the left over parameters exceed the number of params to wrap
remainder = num_params - total_wrapped_params
module_kwargs = auto_wrap_policy(module=module, recurse=False, unwrapped_params=remainder)
if not only_wrap_children and module_kwargs:
module_kwargs = module_kwargs if isinstance(module_kwargs, dict) else {}
# backward_prefetch_map
if 'sharding_strategy' in module_kwargs and module_kwargs['sharding_strategy'] not in sharding_map.values():
module_kwargs['sharding_strategy'] = sharding_map[module_kwargs['sharding_strategy'].upper()]
if 'backward_prefetch' in module_kwargs and module_kwargs[
'backward_prefetch'] not in backward_prefetch_map.values():
module_kwargs['backward_prefetch'] = backward_prefetch_map[module_kwargs['backward_prefetch'].upper()]
if 'cpu_offload' in module_kwargs and not isinstance(module_kwargs['cpu_offload'], CPUOffload):
module_kwargs['cpu_offload'] = get_cpu_offload(cpu_offload=module_kwargs['cpu_offload'].upper())
if 'mixed_precision' in module_kwargs and not isinstance(module_kwargs['mixed_precision'], MixedPrecision):
# `precision` needs to set `'mixed_precision'`, but `precision` is not part of fsdp kwargs
raise NotImplementedError(
f"Automated setting of custom per module mixed_precision is not implemented, but it can be set if `isinstance(module_kwargs['mixed_precision'], MixedPrecision)`"
)
if 'process_group' in module_kwargs:
module_kwargs['process_group'] = get_process_group(module_kwargs['process_group'], process_group_cache)
final_kwargs = {**kwargs, **module_kwargs}
# Leaf node or final wrapping of the remainder both happen here.
return _wrap(module, wrapper_cls, **final_kwargs), num_params
else:
return module, total_wrapped_params
return module, 0
class MosaicFullyShardedDataParallel(FullyShardedDataParallel):
"""Updates FSDP's _auto_wrap to enable module_kwargs."""
def _auto_wrap(
self,
auto_wrap_kwargs: Dict[str, Any],
fsdp_kwargs: Dict[str, Any],
) -> None:
"""Updates _auto_wrap to enable module_kwargs.
modified version of
https://github.com/pytorch/pytorch/blob/d922c29a22e4bf0fba49526f7536395eb8cd66f4/torch/distributed/fsdp/fully_sharded_data_parallel.py#L1252
FSDP's _auto_wrap recursively wraps modules as FSDP modules for parameter sharding.
This modification enables the user to pass custom FSDP arguements for every wrapped module.
The added process_group_cache enables different FSDP modules to, when appropriate, use the
same process group instead of instantiating a new process group.
Recursively auto wraps the root module given by the key "module" in
``auto_wrap_kwargs`` with the arguments in ``auto_wrap_kwargs`` and
``fsdp_kwargs``.
Precondition: ``auto_wrap_policy`` contains the arguments expected by
``_recursive_wrap()``, where ``auto_wrap_policy`` is not ``None``.
``fsdp_kwargs`` contains all FSDP arguments except ``module``.
"""
auto_wrap_policy = auto_wrap_kwargs['auto_wrap_policy']
root_module = auto_wrap_kwargs['module']
assert auto_wrap_policy is not None
# For auto wrapping, submodules should not already be wrapped with FSDP
# since double wrapping is not supported
for module_name, module in root_module.named_modules():
if isinstance(module, FullyShardedDataParallel):
raise ValueError(f'Expected {module_name} to NOT be FullyShardedDataParallel '
'if using an `auto_wrap_policy`')
mixed_precision = fsdp_kwargs['mixed_precision']
if mixed_precision is not None and _contains_batchnorm(root_module):
_override_batchnorm_mixed_precision(root_module)
auto_wrap_policy = functools.partial(_or_policy, policies=[_wrap_batchnorm_individually, auto_wrap_policy])
warnings.warn('Both mixed precision and an `auto_wrap_policy` were specified '
'for FSDP, where the wrapped module has batch norm submodules. '
'The batch norm submodules will be wrapped as separate FSDP '
'instances with mixed precision disabled since some batch norm '
'kernels do not support low precision.')
auto_wrap_kwargs['auto_wrap_policy'] = auto_wrap_policy
auto_wrap_kwargs['process_group_cache'] = {}
_custom_recursive_wrap(**auto_wrap_kwargs, **fsdp_kwargs)
| composer-dev | composer/trainer/mosaic_fsdp.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helpers for the `DeepSpeed <https://www.deepspeed.ai>`_ integration with Composer."""
import copy
import warnings
from typing import Any, Dict, cast
import torch
import torch.utils.data
from composer.core import Batch, Precision, State
from composer.utils import dist, map_collection
__all__ = ['_fix_batch_precision_for_deepspeed', '_parse_deepspeed_config']
def _add_batch_config(config: Dict[str, Any], state: State):
if state.dataloader is None:
raise ValueError(
'When using DeepSpeed, the `train_dataloader` must be specified when constructing the Trainer.')
try:
batch_size = state.dataloader.batch_size # type: ignore as we catch the exception
except AttributeError as e:
raise RuntimeError('DeepSpeed requires the `state.dataloader` to have a `batch_size` attribute.') from e
assert state.device_train_microbatch_size is not None
if batch_size % state.device_train_microbatch_size != 0:
# DeepSpeed will throw an error in this configuration.
raise ValueError('The Mosaic trainer has been configured to use batch size='
f'{batch_size}, but this is not divisible by the '
f'train device microbatch size={state.device_train_microbatch_size}. '
'This is unsupported when using DeepSpeed.')
train_batch_size = batch_size * dist.get_world_size()
# Per the check at the start of this function, the following division is always clean.
grad_accum = batch_size // state.device_train_microbatch_size
if 'train_batch_size' in config:
ds_train_batch_size = config['train_batch_size']
if ds_train_batch_size != train_batch_size:
raise ValueError(f'Provided DeepSpeed configuration specifies batch size={ds_train_batch_size}, '
f'but the Mosaic trainer has been configured with batch size={train_batch_size}.')
else:
config['train_batch_size'] = train_batch_size
if 'gradient_accumulation_steps' not in config:
config['gradient_accumulation_steps'] = grad_accum
if 'train_micro_batch_size_per_gpu' in config:
ds_per_gpu_microbatch_size = config['train_micro_batch_size_per_gpu']
if ds_per_gpu_microbatch_size != state.device_train_microbatch_size:
raise ValueError('Provided DeepSpeed configuration specifies per-GPU microbatch size='
f'{ds_per_gpu_microbatch_size}, but the Mosaic trainer has been '
f'configured with per-GPU microbatch size={state.device_train_microbatch_size}.')
else:
config['train_micro_batch_size_per_gpu'] = state.device_train_microbatch_size
def _ensure_no_optim_in_config(config: Dict[str, Any]):
if 'optimizer' in config:
raise ValueError(('The DeepSpeed configuration specifies an optimizer, but the Mosaic '
'trainer will override this setting.'))
if 'scheduler' in config:
raise ValueError(('The DeepSpeed configuration specifies a scheduler, but the Mosaic '
'trainer will override this setting.'))
def _add_precision_config(config: Dict[str, Any], state: State):
precision = state.precision
# Verify DeepSpeed config is consistent with state.precision if set. DeepSpeed precision config
# has many different ways to specify approximately the same thing. See https://www.deepspeed.ai/docs/config-json/.
ds_precision = None
if 'fp16' in config and 'enabled' in config['fp16'] and config['fp16']['enabled']:
ds_precision = Precision.AMP_FP16
elif 'bf16' in config and 'enabled' in config['bf16'] and config['bf16']['enabled']:
ds_precision = Precision.AMP_BF16
elif 'amp' in config and 'enabled' in config['amp'] and config['amp']['enabled']:
ds_precision = Precision.AMP_FP16
if ds_precision is not None and ds_precision != precision:
raise ValueError((f'Provided DeepSpeed configuration specifies precision={ds_precision}, '
f'but the Mosaic trainer has been configured with precision={precision}.'))
# Set DeepSpeed config based on state.precision if not set
if precision == Precision.AMP_FP16 and 'fp16' not in config:
config['fp16'] = cast(Dict[str, Any], {'enabled': True})
elif precision == Precision.AMP_BF16 and 'bf16' not in config:
config['bf16'] = cast(Dict[str, Any], {'enabled': True})
def _parse_deepspeed_config(
config: Dict[str, Any],
state: State,
) -> Dict[str, Any]:
"""Parses the provided DeepSpeed config for compatibility with the Mosaic trainer.
Broadly speaking, this function does three things.
1. Check for settings that are unsupported, like DeepSpeed optimizers.
2. Check for inconsistencies between Mosaic trainer config and DeepSpeed config.
3. Use Mosaic trainer config to fill in some defaults for DeepSpeed config.
Args:
config (Dict[str, Any]): The DeepSpeed config to use. Must follow the format specified
in `DeepSpeed's documentation <https://www.deepspeed.ai/docs/config-json/>`_.
state (State): The state of the trainer.
Returns:
Dict[str, Any]: The DeepSpeed config updated with values from the arguments passed to the
:class:`.Trainer`.
Raises:
ValueError: If any of the values in the DeepSpeed config conflict with arguments passed
to the trainer.
RuntimeError: If the batch size of the train dataloader in the provided state is not set.
"""
new_config = copy.deepcopy(config)
_add_batch_config(new_config, state)
_ensure_no_optim_in_config(new_config)
_add_precision_config(new_config, state)
if 'zero_allow_untested_optimizer' in new_config and not new_config['zero_allow_untested_optimizer']:
warnings.warn(('Provided DeepSpeed configuration specifies zero_allow_untested_optimizer=False. '
'This causes DeepSpeed to reject certain Mosaic optimizers that are known to '
'work well with DeepSpeed.'))
new_config['zero_allow_untested_optimizer'] = True
return new_config
def _convert_fp32_tensor_to_fp16(tensor: torch.Tensor):
if tensor.dtype == torch.float32:
return tensor.half()
return tensor
def _convert_fp32_tensor_to_bf16(tensor: torch.Tensor):
if tensor.dtype == torch.float32:
return tensor.to(torch.bfloat16)
return tensor
def _fix_batch_precision_for_deepspeed(batch: Batch, precision: Precision) -> Batch:
"""Ensures that a batch is properly formatted for DeepSpeed precisions, if active.
.. note:: Just because the precision is set to FP16 doesn't mean the entire batch can
be FP16 too. For example, integer tensors are common in inputs and outputs of
various models, and these must not be converted. The assumption here is
that a tensor should only be converted to FP16 if it was given in FP32.
Args:
batch (Batch): The batch of data to adjust the precision for.
precision (Precision): The precision to use.
Returns:
Batch: The batch with it's precision adjusted to the specified precision.
"""
if precision == Precision.AMP_FP16:
return map_collection(batch, _convert_fp32_tensor_to_fp16) # type: ignore
elif precision == Precision.AMP_BF16:
return map_collection(batch, _convert_fp32_tensor_to_bf16) # type: ignore
return batch
| composer-dev | composer/trainer/_deepspeed.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Train models with flexible insertion of algorithms."""
from composer.trainer.trainer import Trainer
__all__ = ['Trainer']
| composer-dev | composer/trainer/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# Source code is compiled from a modified version of:
# https://github.com/pytorch/pytorch/blob/v1.13.0/torch/nn/modules/module.py
# Link to PyTorch License File: https://github.com/pytorch/pytorch/blob/master/LICENSE
# TODO: This code will need to be removed when PyTorch correctly supports delayed initialization
# with meta tensors.
"""Helper function to safely call .apply for initializing meta tensors in PyTorch."""
from typing import Set
import torch
from torch.nn.parameter import Parameter
def meta_safe_apply(self, fn, ignored_modules: Set, module_name: str):
"""Applies the function recursively to a module's children and the module itself.
This variant allows us to ignore modules to apply the function.
The function is a slightly modified version of the one from PyTorch:
https://github.com/pytorch/pytorch/blob/v1.13.0/torch/nn/modules/module.py#L637
Args:
self: the module to apply fn to.
fn: the function called to each submodule
ignored_modules: a set of names of modules to not apply fn.
module_name: the current module's name.
"""
for name, module in self.named_children():
module_name_list = [module_name, name]
if module_name == '':
module_name_list = [name]
curr_module_name = concatenate_strings(module_name_list)
meta_safe_apply(module, fn, ignored_modules, curr_module_name)
def compute_should_use_set_data(tensor, tensor_applied):
if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
# If the new tensor has compatible tensor type as the existing tensor,
# the current behavior is to change the tensor in-place using `.data =`,
# and the future behavior is to overwrite the existing tensor. However,
# changing the current behavior is a BC-breaking change, and we want it
# to happen in future releases. So for now we introduce the
# `torch.__future__.get_overwrite_module_params_on_conversion()`
# global flag to let the user control whether they want the future
# behavior of overwriting the existing tensor or not.
return not torch.__future__.get_overwrite_module_params_on_conversion()
else:
return False
for key, param in self._parameters.items():
curr_name = concatenate_strings([module_name, key])
if param is None or curr_name in ignored_modules:
continue
# Tensors stored in modules are graph leaves, and we don't want to
# track autograd history of `param_applied`, so we have to use
# `with torch.no_grad():`
with torch.no_grad():
param_applied = fn(param)
should_use_set_data = compute_should_use_set_data(param, param_applied)
if should_use_set_data:
param.data = param_applied
out_param = param
else:
assert isinstance(param, Parameter)
assert param.is_leaf
out_param = Parameter(param_applied, param.requires_grad)
self._parameters[key] = out_param
if param.grad is not None:
with torch.no_grad():
grad_applied = fn(param.grad)
should_use_set_data = compute_should_use_set_data(param.grad, grad_applied)
if should_use_set_data:
assert out_param.grad is not None
out_param.grad.data = grad_applied
else:
assert param.grad.is_leaf
out_param.grad = grad_applied.requires_grad_(param.grad.requires_grad)
for key, buf in self._buffers.items():
if buf is not None:
self._buffers[key] = fn(buf)
return self
def concatenate_strings(str_list, delim='.'):
"""Concatenates a list of strings together with a delimiter in between the strings in the list.
Args:
str_list: a list of string to join.
delim: the delimiter to separate all strings
"""
return delim.join(str_list)
| composer-dev | composer/trainer/meta_safe_apply.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from collections import defaultdict
from typing import Optional, Union
import torch
from torch.cuda.amp.grad_scaler import GradScaler, OptState, _refresh_per_optimizer_state
from torch.optim import Optimizer
from composer.utils import dist
__all__ = ['ClosureGradScaler']
class ClosureGradScaler(GradScaler):
"""ClosureGradScaler allows for gradient scaling during with closures.
We use closures with optimizers (see `here <https://pytorch.org/docs/stable/optim.html>`__)
during training in order to support certain algorithms like
:class:`~composer.algorithms.SAM`. This class allows us to perform gradient
scaling (see `here <https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler>`__)
along with the use of closures during training.
Args:
ddp_reduce_scalar_and (Callable[[bool], bool]): A function that performs a
ddp reduction with an `and` operation. Used to determine whether
or not to continue computing an optimizer's `step` based on the presence
of `inf/nan` in the gradients.
ddp_reduce_tensor_sum (Callable[[Tensor], Tensor]): A function that performs
a ddp reduction across tensors with a `sum` operation. Used to aggregate
`inf/nan` information stored in tensors across devices.
"""
def _force_scaler_ready(self, optimizer: Optimizer):
optimizer_state = self._per_optimizer_states[id(optimizer)]
optimizer_state['stage'] = OptState.READY
def _empty_all_grads(self, optimizer):
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad = None
def _unscale_grads_and_continue(self, optimizer: Optimizer):
if (not self._enabled):
return True
self._check_scale_growth_tracker('step')
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state['stage'] is OptState.STEPPED:
raise RuntimeError('step() has already been called since the last update().')
if optimizer_state['stage'] is OptState.READY:
self.unscale_(optimizer)
inf_detected = sum(v.item() for v in optimizer_state['found_inf_per_device'].values())
return not inf_detected
def step(self, optimizer: Optimizer, *args, **kwargs):
"""Step the optimizer with amp.
Always called before the optimizer step. Checks if the optimizer can handle AMP closures (currently only
Composer's SAM optimizer) If so, it passes an AMP-modified closure to the optimizer.
"""
closure = kwargs['closure']
def _amp_closure(**kwargs):
self._force_scaler_ready(optimizer)
self._empty_all_grads(optimizer)
retval: float = closure(**kwargs)
should_continue = self._unscale_grads_and_continue(optimizer)
other_should_continue = dist.all_gather_object(should_continue)
return retval if all(other_should_continue) else None
return optimizer.step(closure=_amp_closure) # type: ignore
# Mostly copied from original grad_scaler implementation
# See: https://pytorch.org/docs/stable/_modules/torch/cuda/amp/grad_scaler.html#GradScaler
def update(self, new_scale: Optional[Union[float, torch.FloatTensor]] = None):
"""Updates the scale factor.
If any optimizer steps were skipped, the scale is multiplied by ``backoff_factor``
to reduce it. If ``growth_interval`` non-skipped iterations occurred consecutively,
the scale is multiplied by ``growth_factor`` to increase it.
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
used directly; it is used to fill GradScaler's internal scale tensor. So, if
``new_scale`` was a tensor, later in-place changes to that tensor will not further
affect the scale that the GradScaler uses internally.)
.. warning::
This method should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
been invoked for all optimizers used this iteration.
Args:
new_scale (float | FloatTensor, optional): New scale factor. (default: ``None``)
"""
if not self._enabled:
return
_scale, _growth_tracker = self._check_scale_growth_tracker('update')
if new_scale is not None:
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale.fill_(new_scale) # type: ignore[union-attr]
else:
reason = 'new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False.'
assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined]
assert new_scale.numel() == 1, reason
assert new_scale.requires_grad is False, reason
self._scale.copy_(new_scale) # type: ignore[union-attr]
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [
found_inf.to(device=_scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state['found_inf_per_device'].values()
]
assert len(found_infs) > 0, 'No inf checks were recorded prior to update.'
found_inf_combined = found_infs[0]
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf_combined += found_infs[i]
# This is the only line changed from original grad_scaler implementation
dist.all_reduce(found_inf_combined, reduce_operation='SUM')
torch._amp_update_scale_(_scale, _growth_tracker, found_inf_combined, self._growth_factor,
self._backoff_factor, self._growth_interval)
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
| composer-dev | composer/trainer/_scaler.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from collections import Counter
from torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts, ExponentialLR, MultiStepLR, StepLR
from composer.core import PyTorchScheduler
def scale_pytorch_scheduler(scheduler: PyTorchScheduler, ssr: float):
"""Makes a learning rate schedule take a different number of epochs.
Training for less time is a strong baseline approach to speeding up
training, provided that the training still gets through the entire
learning rate schedule. E.g., training for half as long often yields
little accuracy degredation, provided that the learning rate schedule
is rescaled to take half as long as well. In contrast, if the schedule
is not rescaled, training for half as long would mean simply stopping
halfway through the training curve, which does reach nearly as
high an accuracy.
To see the difference, consider training for half as long using a cosine
annealing learning rate schedule. If the schedule is not rescaled,
training ends while the learning rate is still ~0.5. If the schedule is
rescaled, training ends after passing through the full cosine
curve, at a learning rate near 0.
.. doctest::
>>> from composer.trainer._scale_schedule import scale_scheduler
>>> from torch.optim.lr_scheduler import CosineAnnealingLR
>>> scheduler = CosineAnnealingLR(optimizer, T_max=90)
>>> scheduler = scale_scheduler(scheduler, ssr=0.5)
Args:
scheduler: A learning rate schedule object. Must be one of:
* ``torch.optim.lr_scheduler.CosineAnnealingLR``
* ``torch.optim.lr_scheduler.CosineAnnealingWarmRestarts``
* ``torch.optim.lr_scheduler.ExponentialLR``
* ``torch.optim.lr_scheduler.MultiStepLR``
* ``torch.optim.lr_scheduler.StepLR``
ssr: the factor by which to scale the duration of the schedule. E.g., 0.5
makes the schedule take half as many epochs and 2.0 makes it
take twice as many epochs.
Raises:
ValueError: If ``scheduler`` is not an instance of one of the above types.
"""
if ssr <= 0:
raise ValueError('Scale schedule ratio must be a positive value.')
if ssr == 1.0:
# If it's 1.0, then scaling is a no-op
return
# Applies SSR on a pytorch scheduler in place.
if isinstance(scheduler, StepLR):
scheduler.step_size = int(scheduler.step_size * ssr) # type: ignore -- unknown attribute
elif isinstance(scheduler, MultiStepLR):
milestones = scheduler.milestones # type: ignore -- unknown attribute
milestones = Counter([int(ms * ssr) for ms in milestones])
scheduler.milestones = milestones # type: ignore -- unknown attribute
elif isinstance(scheduler, CosineAnnealingLR):
scheduler.T_max = int(scheduler.T_max * ssr) # type: ignore -- unknown attribute
elif isinstance(scheduler, CosineAnnealingWarmRestarts):
# TODO: account for warmups
scheduler.T_0 = int(scheduler.T_0 * ssr) # type: ignore -- unknown attribute
elif isinstance(scheduler, ExponentialLR):
factor = 1 / ssr
scheduler.gamma = scheduler.gamma**factor # type: ignore -- unknown attribute
else:
raise ValueError(f'Scale schedule being applied to unrecognized Scheduler {scheduler}. '
'Please implement your scheduler as a function instead.')
| composer-dev | composer/trainer/_scale_schedule.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Train models."""
from __future__ import annotations
import collections.abc
import contextlib
import datetime
import itertools
import logging
import os
import random
import re
import time
import warnings
from collections import defaultdict
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, ContextManager, Dict, Iterable, List, Optional, Sequence, TextIO, Tuple, Union, cast
import coolname
import torch
import torch.distributed
import torch.nn as nn
import torch.utils.data
from torch.cuda.amp.grad_scaler import GradScaler, _refresh_per_optimizer_state
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, DistributedSampler
from torchmetrics import Metric
from composer.callbacks import CheckpointSaver, OptimizerMonitor
from composer.core import (Algorithm, AlgorithmPass, Batch, BreakEpochException, Callback, DataSpec, Engine, Evaluator,
Event, Precision, PyTorchScheduler, State, Time, Timestamp, TimeUnit, TrainerMode,
ensure_data_spec, ensure_evaluator, ensure_time, get_precision_context,
validate_eval_automicrobatching)
from composer.devices import Device, DeviceCPU, DeviceGPU, DeviceMPS, DeviceTPU
from composer.loggers import (ConsoleLogger, Logger, LoggerDestination, ProgressBarLogger, RemoteUploaderDownloader,
WandBLogger)
from composer.models import ComposerModel
from composer.optim import ComposerScheduler, DecoupledSGDW, compile_composer_scheduler
from composer.profiler import Profiler
from composer.trainer._deepspeed import _fix_batch_precision_for_deepspeed, _parse_deepspeed_config
from composer.trainer._scale_schedule import scale_pytorch_scheduler
from composer.trainer._scaler import ClosureGradScaler
from composer.trainer.dist_strategy import DDPSyncStrategy, ddp_sync_context, prepare_ddp_module, prepare_fsdp_module
from composer.utils import (ExportFormat, MissingConditionalImportError, ObjectStore, Transform, checkpoint, dist,
ensure_tuple, export_with_logger, extract_hparams, format_name_with_dist, get_device,
get_file, is_tpu_installed, map_collection, maybe_create_object_store_from_uri,
maybe_create_remote_uploader_downloader_from_uri, model_eval_mode, parse_uri,
reproducibility)
if is_tpu_installed():
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
log = logging.getLogger(__name__)
__all__ = ['Trainer']
# syntax to shorten the Scheduler type annotations
Scheduler = Union[ComposerScheduler, PyTorchScheduler]
def _raise_missing_argument_exception(arg_name: str):
raise ValueError((f'{arg_name} is a required argument and must be specified when constructing the '
f'{Trainer.__name__} or when calling {Trainer.__name__}.{Trainer.fit.__name__}(). '
f'To fix, please specify `{arg_name}` via {Trainer.__name__}({arg_name}=...) or '
f'{Trainer.__name__}.{Trainer.fit.__name__}({arg_name}=...).'))
def _scale_max_duration_by_ssr(
scale_schedule_ratio: float,
orig_max_duration: Optional[Time[int]],
) -> Optional[Time[int]]:
if orig_max_duration is None:
return None
max_duration = cast(Time[int], orig_max_duration * scale_schedule_ratio)
log.info(f'max_duration changed from {orig_max_duration} to {max_duration}')
if max_duration.value == 0:
raise ValueError('Scale schedule has reduced the max_duration to 0. Set a higher ratio or use more epochs.')
return max_duration
def _get_default_scheduler_frequency(schedulers: Optional[Union[Scheduler, Sequence[Scheduler]]]):
has_pytorch_scheduler = any(isinstance(scheduler, PyTorchScheduler) for scheduler in ensure_tuple(schedulers))
if has_pytorch_scheduler:
log.info(('Stepping schedulers every epoch, as a PyTorch scheduler was provided. '
'The trainer cannot automatically convert the parameters (e.g. step_size, T_max) of the '
'PyTorch scheduler to be in terms of batches. If the PyTorch scheduler should be stepped '
'every batch, set `step_schedulers_every_batch=True`.'))
return TimeUnit.EPOCH
else:
log.info(('Stepping schedulers every batch. '
'To step schedulers every epoch, set `step_schedulers_every_batch=False`.'))
return TimeUnit.BATCH
def _filter_metrics(metrics: Dict[str, Metric], metric_names: Optional[List[str]]) -> Dict[str, Metric]:
"""Filter the metrics based on the given metric_names as regex strings (e.g. 'Accuracy', 'f1' for 'BinaryF1Score', 'Top-.' for 'Top-1 Accuracy' and 'Top-2 Accuracy', etc). If no metric_names are provided, all metrics will be returned."""
metrics = deepcopy(metrics)
if not metric_names:
return metrics
else:
filtered_metrics = {}
for name, metric in metrics.items():
if any(re.match(f'.*{metric_name}.*', name, re.IGNORECASE) for metric_name in metric_names):
filtered_metrics[name] = metric
return filtered_metrics
def _validate_precision(precision: Precision, device: Device):
if isinstance(device, DeviceCPU) and precision != Precision.FP32:
raise ValueError(f'{precision} is not supported for CPU training.')
def _compile_schedulers(
schedulers: Optional[Union[Scheduler, Sequence[Scheduler]]],
state: State,
scale_schedule_ratio: float,
) -> List[PyTorchScheduler]:
compiled_schedulers = []
for scheduler in ensure_tuple(schedulers):
if isinstance(scheduler, PyTorchScheduler):
scale_pytorch_scheduler(scheduler, scale_schedule_ratio)
compiled_schedulers.append(scheduler)
else: # it's a composer scheduler
compiled_schedulers.append(compile_composer_scheduler(scheduler, state, scale_schedule_ratio))
return compiled_schedulers
def _set_evaluator_interval_and_subset_num_batches(
evaluators: Sequence[Evaluator],
eval_interval: Union[int, str, Time, Callable[[State, Event], bool]],
subset_num_batches: int,
):
# convert eval_dataloader to `List[Evaluator]`
for evaluator in evaluators:
if evaluator.subset_num_batches is None:
evaluator.subset_num_batches = subset_num_batches
if evaluator.eval_interval is None:
evaluator.eval_interval = eval_interval
eval_dataloader = evaluator.dataloader.dataloader
if isinstance(eval_dataloader, collections.abc.Sized) and (evaluator.subset_num_batches is None or
evaluator.subset_num_batches == -1):
try:
dataloader_len = len(eval_dataloader)
except TypeError:
dataloader_len = None
if dataloader_len == None:
raise ValueError('eval_subset_num_batches must be set when using an infinite sized '
'eval_dataloader where length is `None`. Otherwise, evaluation will '
'run forever and never terminate.')
def _is_auto_microbatching(device_train_microbatch_size: Optional[Union[int, str]], device: Device):
if device_train_microbatch_size == 'auto':
warnings.warn(("Setting `device_train_microbatch_size='auto'` is an experimental feature which may cause "
'uncaught Cuda Out of Memory errors. In this case, please manually '
'set device_train_microbatch_size explicitly to an integer instead.'))
if not isinstance(device, DeviceGPU):
raise ValueError(
'Can only use adaptive device_train_microbatch_size on GPU. Please set device_train_microbatch_size >= 1.'
)
return True
else:
return False
def _get_initial_device_train_microbatch_size(device_train_microbatch_size: Optional[Union[int, str]],
auto_microbatching: bool,
train_dataloader: Optional[Iterable]) -> Optional[int]:
"""Sets initial value of device_train_microbatch_size.
If auto_microbatching, sets initial `device_train_microbatch_size` to per rank batch size. If
`train_dataloader` is not set yet, returns None and this function will be called again when
`train_dataloader` is set, such as when `fit()` is called.
"""
if device_train_microbatch_size is None or auto_microbatching:
# Return None, this function will be called again when `train_dataloader` is set
if train_dataloader is None:
return None
try:
batch_size = getattr(train_dataloader, 'batch_size')
except AttributeError as e:
# Error message when `device_train_microbatch_size` is None
# Note: This code path will be removed after `auto` is made default
if device_train_microbatch_size is None:
raise ValueError(
'`device_train_microbatch_size` must be set when `state.train_dataloader` does not have a `batch_size` attribute.'
) from e
# Error message when `device_train_microbatch_size` is 'auto'
raise AttributeError(
"`device_train_microbatch_size='auto'` requires the `state.train_dataloader` to have a `batch_size` attribute."
) from e
return batch_size
elif isinstance(device_train_microbatch_size, int):
return device_train_microbatch_size
else:
raise ValueError("device_train_microbatch_size must be an int or ``'auto'``")
def _is_cuda_oom(e: RuntimeError):
"""Determines if error is CUDA Out of Memory and if auto_microbatching is enabled."""
if 'CUDA out of memory' in str(e):
return True
# With batch_norm, large batch sizes sometimes result in cuDNN instead of Cuda OOMs.
if 'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED. This error may appear if you passed in a non-contiguous input.' in str(
e):
warnings.warn('Encountered "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED. This error may appear if you passed in '
'a non-contiguous input." This can happen when the batch_size is too large for the GPU so auto '
'auto_microbatching will rerun with a smaller microbatch size value, but there may be a user '
'error with non-contiguous inputs.')
return True
return False
def _adjust_device_train_microbatch_size(state: State):
"""Adjust device_train_microbatch_size if we encounter OOM.
Args:
state (State): State of trainer.
"""
# If any rank hit CUDA OOM, update device_train_microbatch_size and retry. Raise runtime error
# if training 1 sample at a time still resulted in CUDA out of memory.
assert state.device_train_microbatch_size is not None
if state.device_train_microbatch_size == 1:
raise RuntimeError(('CUDA out of memory. The train loop failed with an internal microbatch of size 1.'
'The GPU does not have enough memory to process even 1 sample during train.'))
else:
original_microbatch_size = state.device_train_microbatch_size
state.device_train_microbatch_size = max(int(original_microbatch_size / 2), 1)
warnings.warn(
RuntimeWarning('CUDA out of memory detected. Train microbatch size will be decreased from '
f'{original_microbatch_size} -> {state.device_train_microbatch_size}.'))
# Clear gradients in case failure happened during backwards pass
if hasattr(state, 'outputs'):
del state.outputs
if hasattr(state, 'loss'):
del state.loss
for optimizer in state.optimizers:
optimizer.zero_grad(set_to_none=True)
if state.scaler is not None:
state.scaler._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
torch.cuda.empty_cache()
def _adjust_device_eval_microbatch_size(evaluator: Evaluator):
"""Adjust device_eval_microbatch_size if we encounter OOM.
Args:
evaluator (State): Current evaluator
"""
# If any rank hit CUDA OOM, update device_eval_microbatch_size and retry. Raise runtime error
# if evaluating 1 sample at a time still resulted in CUDA out of memory.
assert evaluator.device_eval_microbatch_size is not None
if evaluator.device_eval_microbatch_size == 1:
raise RuntimeError(('CUDA out of memory. The eval loop failed with an internal microbatch of size 1.'
'The GPU does not have enough memory to process even 1 sample during eval.'))
else:
original_microbatch_size = evaluator.device_eval_microbatch_size
evaluator.device_eval_microbatch_size = max(int(original_microbatch_size / 2), 1)
warnings.warn(
RuntimeWarning('CUDA out of memory detected. Train microbatch size will be decreased from '
f'{original_microbatch_size} -> {evaluator.device_eval_microbatch_size}.'))
torch.cuda.empty_cache()
def _distribute_and_get_random_seed(seed: Optional[int], device: Device):
if not seed:
seed = reproducibility.get_random_seed()
# Ensure that each process has a seed = rank_zero_seed + global_rank
# This "deterministically different" seed behavior is required to be able
# to restore seeds when resuming form checkpoints, since only the
# `rank_zero_seed` is stored on state.
if seed < 0 or seed > reproducibility.MAX_SEED:
raise ValueError(f'Invalid seed: {seed}. It must be on [0; 2**32 - 1)')
# using int64 to prevent overflow
rank_zero_seed = device.tensor_to_device(torch.tensor([seed], dtype=torch.int64))
if dist.get_world_size() > 1:
dist.broadcast(rank_zero_seed, src=0)
rank_zero_seed = rank_zero_seed.item()
assert isinstance(rank_zero_seed, int)
seed = rank_zero_seed + dist.get_global_rank()
return rank_zero_seed, seed
def _get_ddp_sync_strategy(ddp_sync_strategy: Optional[Union[str, DDPSyncStrategy]], find_unused_parameters: bool):
if ddp_sync_strategy is None:
if find_unused_parameters:
ddp_sync_strategy = DDPSyncStrategy.MULTI_AUTO_SYNC
else:
ddp_sync_strategy = DDPSyncStrategy.SINGLE_AUTO_SYNC
else:
ddp_sync_strategy = DDPSyncStrategy(ddp_sync_strategy)
return ddp_sync_strategy
def _get_precision_context(precision: Precision, deepspeed_enabled: bool):
if deepspeed_enabled:
return contextlib.nullcontext()
return get_precision_context(precision)
def _generate_run_name() -> str:
# change coolname randomness for different names with same seed
coolname.replace_random(random.Random(os.urandom(128)))
# prefixing with the time so experiments sorted alphabetically will have the latest experiment last
generated_run_name = str(int(time.time())) + '-' + coolname.generate_slug(2)
run_name_list = [generated_run_name]
# ensure all ranks have the same experiment name
dist.broadcast_object_list(run_name_list)
generated_run_name = run_name_list[0]
return generated_run_name
class Trainer:
"""Train models with Composer algorithms.
The trainer supports models with :class:`~composer.models.base.ComposerModel` instances.
The :class:`.Trainer` is highly customizable and can support a wide variety of workloads.
See the :doc:`training guide</trainer/using_the_trainer>` for more information.
Example
--------
Train a model and save a checkpoint:
.. testcode::
import os
from composer import Trainer
### Create a trainer
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration="1ep",
eval_dataloader=eval_dataloader,
optimizers=optimizer,
schedulers=scheduler,
device="cpu",
eval_interval="1ep",
save_folder="checkpoints",
save_filename="ep{epoch}.pt",
save_interval="1ep",
save_overwrite=True,
)
# Fit and run evaluation for 1 epoch.
# Save a checkpoint after 1 epoch as specified during trainer creation.
trainer.fit()
Load the checkpoint and resume training:
.. testcode::
# Get the saved checkpoint filepath
checkpoint_path = trainer.saved_checkpoints.pop()
# Create a new trainer with the `load_path` argument set to the checkpoint path.
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration="2ep",
eval_dataloader=eval_dataloader,
optimizers=optimizer,
schedulers=scheduler,
device="cpu",
eval_interval="1ep",
load_path=checkpoint_path,
)
# Continue training and running evaluation where the previous trainer left off
# until the new max_duration is reached.
# In this case it will be one additional epoch to reach 2 epochs total.
trainer.fit()
Args:
model (ComposerModel): The model to train. Can be user-defined or one of the models included
with Composer.
.. seealso:: :mod:`composer.models` for models built into Composer.
train_dataloader (Iterable | DataSpec | dict, optional): The dataloader, :class:`.DataSpec`,
or dict of :class:`.DataSpec` kwargs for the training data. In order to specify custom
preprocessing steps on each data batch, specify a :class:`.DataSpec` instead of a dataloader.
It is recommended that the dataloader, whether specified directly or as part of a :class:`.DataSpec`,
should be a :class:`torch.utils.data.DataLoader`.
.. note:: The ``train_dataloader`` should yield per-rank batches. Each per-rank batch
will then be further divided based on the ``device_train_microbatch_size`` parameter. For example, if the
desired optimization batch size is ``2048`` and training is happening across 8 GPUs, then each
``train_dataloader`` should yield a batch of size ``2048 / 8 = 256``. If ``device_train_microbatch_size = 128``,
then the per-rank batch will be divided into ``256 / 128 = 2`` microbatches of size ``128``.
If ``train_dataloader`` is not specified when constructing the trainer, it must be specified when invoking
:meth:`.Trainer.fit`.
train_dataloader_label (str, optional): The label for the train dataloader. (default: ``'train'``)
This label is used to index the training metrics in
:attr:`.State.train_metrics`.
This parameter has no effect if ``train_dataloader`` is not specified.
train_subset_num_batches (int, optional): If specified, finish every epoch early after training
on this many batches. This parameter has no effect if it is greater than ``len(train_dataloader)``.
If ``-1``, then the entire dataloader will be iterated over. (default: ``-1``)
When using the profiler, it can be helpful to set this parameter to the length of the profile schedule.
This setting will end each epoch early to avoid additional training that will not be profiled.
This parameter is ignored if ``train_dataloader`` is not specified.
max_duration (Time | str | int, optional): The maximum duration to train. Can be an integer, which will be
interpreted to be epochs, a str (e.g. ``1ep``, or ``10ba``), or a :class:`.Time` object.
If ``max_duration`` is not specified when constructing the trainer, ``duration`` must be specified when invoking
:meth:`.Trainer.fit`.
algorithms (Algorithm | Sequence[Algorithm], optional): The algorithms to use during training. If ``None``, then
no algorithms will be used. (default: ``None``)
.. seealso:: :mod:`composer.algorithms` for the different algorithms built into Composer.
algorithm_passes ([AlgorithmPass | Tuple[AlgorithmPass, int] | Sequence[AlgorithmPass | Tuple[AlgorithmPass, int]], optional):
Optional list of passes to change order in which algorithms are applied. These passes are merged with the
default passes specified in :class:`.Engine`. If ``None``, then no additional passes will be used.
(default: ``None``)
.. seealso:: :class:`composer.core.Engine` for more information.
optimizers (torch.optim.Optimizer, optional): The optimizer.
If ``None``, will be set to ``DecoupledSGDW(model.parameters(), lr=0.1)``. (default: ``None``)
.. seealso:: :mod:`composer.optim` for the different optimizers built into Composer.
schedulers (PyTorchScheduler | ComposerScheduler | Sequence[PyTorchScheduler | ComposerScheduler], optional):
The learning rate schedulers. If ``[]`` or ``None``, the learning rate will be constant.
(default: ``None``).
.. seealso:: :mod:`composer.optim.scheduler` for the different schedulers built into Composer.
scale_schedule_ratio (float, optional): Ratio by which to scale the training duration and learning rate
schedules. (default: ``1.0``)
E.g., ``0.5`` makes the schedule take half as many epochs and ``2.0`` makes it take twice as
many epochs. ``1.0`` means no change.
This parameter has no effect if ``schedulers`` is not specified.
.. note ::
Training for less time, while rescaling the learning rate schedule,
is a strong baseline approach to speeding up training. E.g., training
for half duration often yields minor accuracy degradation,
provided that the learning rate schedule is also rescaled to take half as long.
To see the difference, consider training for half as long using a cosine
annealing learning rate schedule. If the schedule is not rescaled,
training ends while the learning rate is still ~0.5 of the initial LR.
If the schedule is rescaled with ``scale_schedule_ratio``, the LR schedule
would finish the entire cosine curve, ending with a learning rate near zero.
step_schedulers_every_batch (bool, optional): By default, native
`PyTorch schedulers <https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate>`_
are updated every epoch, while :doc:`Composer Schedulers</trainer/schedulers>` are updated every step.
Setting this to ``True`` will force schedulers to be stepped every batch,
while ``False`` means schedulers stepped every epoch. ``None`` indicates the default behavior.
(default: ``None``)
eval_dataloader (DataLoader | DataSpec | Evaluator | Sequence[Evaluator], optional): The :class:`.DataLoader`,
:class:`.DataSpec`, :class:`.Evaluator`, or sequence of evaluators for the evaluation data.
To evaluate one or more specific metrics across one or more datasets, pass in an
:class:`.Evaluator`. If a :class:`.DataSpec` or :class:`.DataLoader` is passed in, then all
metrics returned by ``model.get_metrics()`` will be used during evaluation.
``None`` results in no evaluation. (default: ``None``)
eval_interval (int | str | Time | (State, Event) -> bool, optional): Specifies how frequently to run evaluation.
An integer, which will be interpreted to be epochs, a str (e.g. ``1ep``, or ``10ba``), a :class:`.Time`
object, or a callable.
Defaults to ``1`` (evaluate every epoch).
If an integer (in epochs), :class:`.Time` string, or :class:`.Time` instance, the evaluator will be run
with this frequency. :class:`.Time` strings or :class:`.Time` instances must have units of
:attr:`.TimeUnit.BATCH` or :attr:`.TimeUnit.EPOCH`.
Set to ``0`` to disable evaluation.
If a callable, it should take two arguments (:class:`.State`, :class:`.Event`) and return a bool
representing whether the evaluator should be invoked. The event will be either :attr:`.Event.BATCH_END`
or :attr:`.Event.EPOCH_END`.
This ``eval_interval`` will apply to any :class:`.Evaluator` in ``eval_dataloader`` that does not specify
an ``eval_interval`` or if a dataloader is passed in directly. This parameter has no effect if
``eval_dataloader`` is not specified.
When specifying time string or integer for the ``eval_interval``, the evaluator(s) are also run at the ``Event.FIT_END`` if it doesn't
evenly divide the training duration.
eval_subset_num_batches (int, optional): If specified, evaluate on this many batches. Defaults to ``-1``,
which means to iterate over the entire dataloader.
This parameter has no effect if ``eval_dataloader`` is not specified, it is greater than
``len(eval_dataloader)``, or ``eval_dataloader`` is an :class:`.Evaluator` and ``subset_num_batches``
was specified as part of the :class:`.Evaluator`.
callbacks (Callback | Sequence[Callback], optional): The callbacks to run during training. If ``None``,
then no callbacks will be run. (default: ``None``).
.. seealso:: :mod:`composer.callbacks` for the different callbacks built into Composer.
loggers (LoggerDestination | Sequence[LoggerDestination], optional): The destinations to log training information to.
.. seealso:: :mod:`composer.loggers` for the different loggers built into Composer.
run_name (str, optional): A name for this training run. If not specified, the timestamp will be combined with a
:doc:`coolname <coolname:index>`, e.g. ``1654298855-electric-zebra``.
progress_bar (bool): Whether to show a progress bar. (default: ``True``)
log_to_console (bool): Whether to print logging statements to the console. (default: ``False``)
console_stream (TextIO | str, optional): The stream to write to. If a string, it can either be
``'stdout'`` or ``'stderr'``. (default: :attr:`sys.stderr`)
console_log_interval (int | str | Time, optional): Specifies how frequently to log metrics to console.
An integer, which will be interpreted to be epochs, a str (e.g. ``1ep``, or ``10ba``), a :class:`.Time`
object, or a callable. (default: ``1ba``)
Defaults to ``1ba`` (log metrics every batch).
If an integer (in epochs), :class:`.Time` string, or :class:`.Time` instance, the metrics will be logged
with this frequency. :class:`.Time` strings or :class:`.Time` instances must have units of
:attr:`.TimeUnit.BATCH` or :attr:`.TimeUnit.EPOCH`.
Set to ``0`` to disable metrics logging to console.
log_traces (bool): Whether to log traces or not. (default: ``False``)
auto_log_hparams (bool): Whether to automatically extract hyperparameters. (default: ``False``)
load_path (str, optional): The path format string to an existing checkpoint file.
It can be a path to a file on the local disk, a URL, or if ``load_object_store`` is set, the object name
for a checkpoint in a cloud bucket. If a URI is specified, ``load_object_store`` does not need to be set.
When using `Deepspeed ZeRO <https://www.deepspeed.ai/tutorials/zero/>`_, checkpoints are sharded by rank.
Instead of hard-coding the rank in the ``path``, use the following format variables:
+------------------------+-------------------------------------------------------+
| Variable | Description |
+========================+=======================================================+
| ``{rank}`` | The global rank, as returned by |
| | :func:`~.dist.get_global_rank`. |
+------------------------+-------------------------------------------------------+
| ``{local_rank}`` | The local rank of the process, as returned by |
| | :func:`~.dist.get_local_rank`. |
+------------------------+-------------------------------------------------------+
| ``{node_rank}`` | The node rank, as returned by |
| | :func:`~.dist.get_node_rank`. |
+------------------------+-------------------------------------------------------+
For example, suppose that checkpoints are stored in the following structure:
.. code-block::
my_model/ep1-rank0.tar
my_model/ep1-rank1.tar
my_model/ep1-rank2.tar
...
Then, ``load_path`` should be set to ``my_model/ep1-rank{rank}.tar``, and all ranks will load the
correct state.
If ``None`` then no checkpoint will be loaded. (default: ``None``)
load_object_store (Union[ObjectStore, LoggerDestination], optional): If the ``load_path`` is in an
object store (i.e. AWS S3 or Google Cloud Storage), an instance of :class:`.ObjectStore` or
:class:`.LoggerDestination` which will be used to retrieve the checkpoint. Otherwise, if the
checkpoint is a local filepath, set to ``None``. Also, it can be ``None`` if the ``load_path`` is
an S3 URI because the appropriate object store will be automatically constructed in that case.
Ignored if ``load_path`` is ``None``.
(default: ``None``)
Example:
.. testsetup::
import composer.trainer
composer.trainer.trainer.checkpoint.load_checkpoint = lambda *args, **kwargs: None
.. testcode::
from composer import Trainer
from composer.utils import LibcloudObjectStore
# Create the object store provider with the specified credentials
creds = {"key": "object_store_key",
"secret": "object_store_secret"}
store = LibcloudObjectStore(provider="s3",
container="my_container",
provider_kwargs=creds)
checkpoint_path = "./path_to_the_checkpoint_in_object_store"
# Create a trainer which will load a checkpoint from the specified object store
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration="10ep",
eval_dataloader=eval_dataloader,
optimizers=optimizer,
schedulers=scheduler,
device="cpu",
eval_interval="1ep",
load_path=checkpoint_path,
load_object_store=store,
)
load_weights_only (bool, optional): Whether or not to only restore the weights from the checkpoint without
restoring the associated state. Ignored if ``load_path`` is ``None``. (default: ``False``)
load_strict_model_weights (bool, optional): Ensure that the set of weights in the checkpoint and model must exactly match.
Ignored if ``load_path`` is ``None``. (default: ``False``)
load_progress_bar (bool, optional): Display the progress bar for downloading the checkpoint.
Ignored if ``load_path`` is either ``None`` or a local file path. (default: ``True``)
load_ignore_keys (List[str] | (Dict) -> None, optional): A list of paths for the ``state_dict`` of the checkpoint,
which, when provided, will be ignored from the state_dict before a checkpoint is loaded. Each path is a list
of strings specifying the keys to index into ``state_dict`` joined together with `/` as a separator (as PyTorch
uses `.` in parameter names). If a prefix is provided, all children are also ignored (see Example 2).
See :mod:`composer.core.state` for the structure of state_dict.
Example 1: ``load_ignore_keys = ["state/model/layer1.weights", "state/model/layer1.bias"]`` would ignore
layer 1 weights and bias.
Example 2: ``load_ignore_keys = ["state/model/*"]`` would ignore the entire model, which would have the same
effect as the previous example if there was only 1 layer.
Example 3: ``load_ignore_keys = ["state/model/layer*.weights"]`` would ignore all weights in the model.
Example 4: ``load_ignore_keys = ["state/rank_zero_seed", "rng"]`` would reset all randomness when
loading the checkpoint.
If a callable, it should take one argument which is the state_dict. The callable is free to arbitrarily modify
the state_dict before it is loaded.
(default: ``None``)
load_exclude_algorithms (List[str], optional): A list of algorithm names to exclude from loading.
By default, algorithms with `required_on_load=True` which were enabled when training the loaded
checkpoint are automatically applied unless they conflict with a user specified algorithm. These
algorithms often change the model, and not applying them could result in certain layers not having
weights loaded.
Example 1: ``load_exclude_algorithms = ["BlurPool"]`` would exclude BlurPool from loading.
Example 2: ``load_exclude_algorithms = ["FusedLayerNorm", "Alibi"]`` would exclude FusedLayerNorm and Alibi from loading.
(default: ``None``)
save_folder (str, optional): Format string for the folder where checkpoints are saved.
If ``None``, checkpoints will not be saved. Can also be a URI for S3 paths only.
In the case of an S3 URI, the appropriate `~.RemoteUploader` object will be created
automatically. (default: ``None``)
.. seealso:: :class:`~.CheckpointSaver`
.. note::
For fine-grained control on checkpoint saving (e.g. to save different types of checkpoints
at different intervals), leave this parameter as ``None``, and instead pass
instance(s) of :class:`~.CheckpointSaver` directly as ``callbacks``.
save_filename (str, optional): A format string describing how to name checkpoints.
This parameter has no effect if ``save_folder`` is ``None``.
(default: ``"ep{epoch}-ba{batch}-rank{rank}.pt"``)
.. seealso:: :class:`~.CheckpointSaver`
save_latest_filename (str, optional): A format string for the name of a symlink
(relative to ``save_folder``) that points to the last saved checkpoint.
This parameter has no effect if ``save_folder`` is ``None``.
To disable symlinking, set this to ``None``. (default: ``"latest-rank{rank}.pt"``)
.. seealso:: :class:`~.CheckpointSaver`
save_overwrite (bool, optional): Whether existing checkpoints should be overridden.
This parameter has no effect if ``save_folder`` is None. (default: ``False``)
.. seealso:: :class:`~.CheckpointSaver`
save_interval (Time | str | int | (State, Event) -> bool): A :class:`Time`, time-string, integer (in epochs),
or a function that takes (state, event) and returns a boolean whether a checkpoint should be saved.
This parameter has no effect if ``save_folder`` is ``None``. (default: ``'1ep'``)
.. seealso:: :class:`~.CheckpointSaver`
save_weights_only (bool, optional): Whether to save only the model weights instead of the entire training
state. This parameter has no effect if ``save_folder`` is ``None``. (default: ``False``)
.. seealso:: :class:`~.CheckpointSaver`
save_num_checkpoints_to_keep (int, optional): The number of checkpoints to keep locally. The oldest checkpoints
are removed first. Set to ``-1`` to keep all checkpoints locally. (default: ``-1``)
Checkpoints will be removed after they have been uploaded. For example, when this callback
is used in conjunction with the :class:`.RemoteUploaderDownloader`, set this
parameter to ``0`` to immediately delete checkpoints from the local disk after they have been uploaded to
the object store.
This parameter only controls how many checkpoints are kept locally; checkpoints are not deleted from
remote file systems.
autoresume (bool, optional): Whether or not to enable autoresume, which allows for stopping and resuming
training. This allows use of spot instances, as the training run is now fault tolerant. This parameter
requires ``save_folder`` and ``run_name`` to be specified and ``save_overwrite`` to be ``False``.
(default: ``False``)
When enabled, the save_folder is checked for checkpoints of the format ``"{save_folder}/{save_latest_filename}"``,
which are loaded to continue training. If no local checkpoints are found, each logger is checked for potential
remote checkpoints named ``"{save_folder}/{save_latest_filename}"``. Finally, if no logged checkpoints are found, ``load_path`` is
used to load a checkpoint if specified. This should only occur at the start of a run using autoresume.
For example, to run a fine-tuning run on a spot instance, ``load_path`` would be set to the original
weights and an object store logger would be added. In the original run, ``load_path`` would be used
to get the starting checkpoint. For any future restarts, such as due to the spot instance being killed,
the loggers would be queried for the latest checkpoint the object store logger would be downloaded and
used to resume training.
deepspeed_config (Dict[str, Any], optional): Configuration for DeepSpeed, formatted as a JSON
according to `DeepSpeed's documentation <https://www.deepspeed.ai/docs/config-json/>`_. (default: ``None``)
To use DeepSpeed with default values, set to the empty dictionary ``{}``.
To disable DeepSpeed (the default), set to ``None``.
fsdp_config (Dict[str, Any], optional): Configuration for FSDP.
See :doc:`FSDP Documentation </notes/distributed_training>` for more details.
To use FSDP with default values, set to the empty dictionary ``{}``. To
disable FSDP, set to ``None``. (default: ``None``)
device (Device | str, optional): The device to use for training, which can be ``'cpu'``, ``'gpu'``,
``'tpu'``, or ``'mps'``. (default: ``None``)
The default behavior sets the device to ``'gpu'`` if CUDA is available, and otherwise ``'cpu'``.
precision (Precision | str, optional): Numerical precision to use for training. One of ``fp32``, ``amp_bf16``
or ``amp_fp16`` (recommended). (default: ``Precision.FP32`` if training on CPU; ``Precision.AMP_FP16`` if
training on GPU)
device_train_microbatch_size (Union[int, str), optional): The number of samples to process on each device per
microbatch during training. Gradients are summed over the microbatches per device. If set to ``auto``,
dynamically decreases device_train_microbatch_size if microbatch is too large for GPU. (default: ``None``)
.. note:: This is implemented by taking the batch yielded by the ``train_dataloader`` and splitting
it into sections of size ``device_train_microbatch_size``. If the batch size of the dataloader
is not divisible by ``device_train_microbatch_size``, the last section will be potentially smaller.
seed (int, optional): The seed used in randomization. If ``None``, then a random seed
will be created. (default: ``None``)
.. note:: In order to get reproducible results, call the
:func:`.seed_all` function at the start of your script with the seed
passed to the trainer. This will ensure any initialization done before the trainer init
(ex. model weight initialization) also uses the provided seed.
.. seealso:: :mod:`composer.utils.reproducibility` for more details on reproducibility.
deterministic_mode (bool, optional): Run the model deterministically. (default: ``False``)
.. note:: This is an experimental feature. Performance degradations expected. Certain Torch modules may
not have deterministic implementations, which will result in a crash.
.. note:: In order to get reproducible results, call the
:func:`.configure_deterministic_mode` function at the start of your script.
This will ensure any initialization done before the trainer init also runs deterministically.
.. seealso:: :mod:`composer.utils.reproducibility` for more details on reproducibility.
dist_timeout (float, optional): Timeout, in seconds, for initializing the distributed process group.
(default: ``1800.0``)
ddp_sync_strategy (str | DDPSyncStrategy, optional): The strategy to use for synchronizing gradients.
Leave unset to let the trainer auto-configure this. See :class:`.DDPSyncStrategy`
for more details.
profiler (Profiler, optional): The profiler, if profiling should be enabled. (default: ``None``)
.. seealso::
See the :doc:`Profiling Guide </trainer/performance_tutorials/profiling>` for
additional information.
python_log_level (str, optional): The Python log level to use for log statements in the :mod:`composer`
module. (default: ``None``). If it is ``None``, python logging will not be configured (i.e.
``logging.basicConfig`` won't be called).
.. seealso:: The :mod:`logging` module in Python.
Attributes:
state (State): The :class:`.State` object used to store training state.
evaluators (List[Evaluator]): The :class:`.Evaluator` objects to use for validation
during training.
logger (Logger): The :class:`.Logger` used for logging.
engine (Engine): The :class:`.Engine` used for running callbacks and algorithms.
"""
def __init__(
self,
*,
# The Model
model: ComposerModel,
# Train Dataloader
train_dataloader: Optional[Union[Iterable, DataSpec, Dict[str, Any]]] = None,
train_dataloader_label: str = 'train',
train_subset_num_batches: int = -1,
# Stopping Condition
max_duration: Optional[Union[int, str, Time]] = None,
# Algorithms
algorithms: Optional[Union[Algorithm, Sequence[Algorithm]]] = None,
# Engine Pass Registration
algorithm_passes: Optional[Union[AlgorithmPass, Tuple[AlgorithmPass, int],
Sequence[Union[AlgorithmPass, Tuple[AlgorithmPass, int]]]]] = None,
# Optimizers and Scheduling
optimizers: Optional[torch.optim.Optimizer] = None,
schedulers: Optional[Union[ComposerScheduler, PyTorchScheduler, Sequence[Union[ComposerScheduler,
PyTorchScheduler]]]] = None,
scale_schedule_ratio: float = 1.0,
step_schedulers_every_batch: Optional[bool] = None,
# Evaluators
eval_dataloader: Optional[Union[Iterable, DataSpec, Evaluator, Sequence[Evaluator]]] = None,
eval_interval: Union[int, str, Time, Callable[[State, Event], bool]] = 1,
eval_subset_num_batches: int = -1,
# Callbacks and Logging
callbacks: Optional[Union[Callback, Sequence[Callback]]] = None,
loggers: Optional[Union[LoggerDestination, Sequence[LoggerDestination]]] = None,
run_name: Optional[str] = None,
progress_bar: bool = True,
log_to_console: bool = False,
console_stream: Union[str, TextIO] = 'stderr',
console_log_interval: Union[int, str, Time] = '1ba',
log_traces: bool = False,
auto_log_hparams: bool = False,
# Load Checkpoint
load_path: Optional[str] = None,
load_object_store: Optional[Union[ObjectStore, LoggerDestination]] = None,
load_weights_only: bool = False,
load_strict_model_weights: bool = False,
load_progress_bar: bool = True,
load_ignore_keys: Optional[Union[List[str], Callable[[Dict], None]]] = None,
load_exclude_algorithms: Optional[List[str]] = None,
# Save Checkpoint
save_folder: Optional[str] = None,
save_filename: str = 'ep{epoch}-ba{batch}-rank{rank}.pt',
save_latest_filename: Optional[str] = 'latest-rank{rank}.pt',
save_overwrite: bool = False,
save_interval: Union[str, int, Time, Callable[[State, Event], bool]] = '1ep',
save_weights_only: bool = False,
save_num_checkpoints_to_keep: int = -1,
# Graceful Resumption
autoresume: bool = False,
# DeepSpeed
deepspeed_config: Optional[Dict[str, Any]] = None,
fsdp_config: Optional[Dict[str, Any]] = None,
# System/Numerics
device: Optional[Union[str, Device]] = None,
precision: Optional[Union[str, Precision]] = None,
device_train_microbatch_size: Optional[Union[int, str]] = None,
# Reproducibility
seed: Optional[int] = None,
deterministic_mode: bool = False,
# Distributed Training
dist_timeout: float = 1800.0,
ddp_sync_strategy: Optional[Union[str, DDPSyncStrategy]] = None,
# Profiling
profiler: Optional[Profiler] = None,
# Python logging
python_log_level: Optional[str] = None,
):
self.auto_log_hparams = auto_log_hparams
self.python_log_level = python_log_level
if self.python_log_level is not None:
logging.basicConfig(
# Example of format string
# 2022-06-29 11:22:26,152: rank0[822018][MainThread]: INFO: composer.trainer.trainer: Using precision Precision.FP32
# Including the PID and thread name to help with debugging dataloader workers and callbacks that spawn background
# threads / processes
format=
f'%(asctime)s: rank{dist.get_global_rank()}[%(process)d][%(threadName)s]: %(levelname)s: %(name)s: %(message)s'
)
logging.getLogger('composer').setLevel(self.python_log_level.upper())
algorithms = list(ensure_tuple(algorithms))
# Device
device = get_device(device)
# Determine whether DeepSpeed and FSDP are enabled
self.deepspeed_config = deepspeed_config
self.fsdp_config = fsdp_config
self.deepspeed_enabled = self.deepspeed_config is not None
self.fsdp_enabled = self.fsdp_config is not None
# Precision
if precision is None:
precision = Precision.AMP_FP16 if isinstance(device, DeviceGPU) else Precision.FP32
elif isinstance(precision, str):
precision = Precision(precision)
_validate_precision(precision, device)
# Distributed
if self.deepspeed_enabled or self.fsdp_enabled or dist.get_world_size() > 1:
# Deepspeed and FSDP both require torch.distributed to be initialized, even if the world size is 1
# And torch.distributed is always required for multi-rank training
dist.initialize_dist(device, dist_timeout)
# Handle FSDP sharding
if self.fsdp_config is not None:
prepare_fsdp_module(model, optimizers, self.fsdp_config, precision)
# Reproducibility
rank_zero_seed, seed = _distribute_and_get_random_seed(seed, device)
# If hparams is used to create the Trainer this function is called twice
# which is okay because all runs with the hparams codepath will do this
reproducibility.seed_all(seed)
if deterministic_mode:
reproducibility.configure_deterministic_mode()
# Optimizers and Schedulers
if not optimizers:
optimizers = DecoupledSGDW(model.parameters(), lr=0.1)
# hard-coding the optimizer in the warning, as repr(optimizers) would print an annoying, multi-line warning
warnings.warn(('No optimizer was specified. Defaulting to '
f"{type(optimizers).__name__}(lr={optimizers.defaults['lr']})"))
num_optimizers = len(ensure_tuple(optimizers))
if num_optimizers != 1:
raise NotImplementedError(f'Only one optimizer is supported; found {num_optimizers} optimizers')
# Move the model and optimizers to the device
if not (self.deepspeed_enabled or self.fsdp_enabled):
# check if model is already on tpu
if isinstance(device, DeviceTPU) and 'xla' not in str(next(model.parameters()).device):
raise ValueError(
'Use model.to(xm.xla_device()) to set the model to the TPU before providing to the trainer.')
else:
model = device.module_to_device(model)
# Move any remaining optimizer parameters onto the device
# It is possible that optimizer initialize created some internal tensors on CPU
# that need to be moved onto GPU.
optimizers = map_collection(optimizers, device.optimizer_to_device)
# Microbatching
auto_microbatching = _is_auto_microbatching(device_train_microbatch_size, device=device)
if auto_microbatching and profiler:
raise ValueError("`device_train_microbatch_size='auto'` is not compatible with the profiler. It is "
"recommended to run a mini-run with `device_train_microbatch_size='auto'` to identify "
'the optimal device_train_microbatch_size value and then manually specify that in a '
'second run with profiler.')
# If auto_microbatching is True or `device_train_microbatch_size` is not specified, the microbatch size
# will be determined when dataloader is specified. train_dataloader is parsed after `Event.INIT` or in
# fit()
device_train_microbatch_size = _get_initial_device_train_microbatch_size(device_train_microbatch_size,
auto_microbatching, None)
assert not isinstance(device_train_microbatch_size, str)
# Run Name
if run_name is None:
if autoresume:
raise ValueError('When autoresume=True, the `run_name` must be specified.')
run_name = _generate_run_name()
log.info('Run name: %s', run_name)
# Create the State
self.state = State(
rank_zero_seed=rank_zero_seed,
algorithms=algorithms,
model=model,
device=device,
callbacks=callbacks,
device_train_microbatch_size=device_train_microbatch_size,
auto_microbatching=auto_microbatching,
precision=precision,
optimizers=optimizers,
run_name=run_name,
deepspeed_config=deepspeed_config,
fsdp_config=fsdp_config,
)
# Profiler
if profiler is not None:
warnings.warn('The profiler is enabled. Using the profiler adds additional overhead when training.')
self.state.profiler = profiler
self.state.profiler.bind_to_state(self.state)
# Console Logging
loggers = list(ensure_tuple(loggers))
if progress_bar and log_to_console:
warnings.warn(
'Setting both `progress_bar` and `log_to_console` both to True is not recommended and will'
'lead to duplicate logs and weird formatting issues. Please set one of them to False for a better logging experience.'
)
if any(isinstance(x, ProgressBarLogger) for x in loggers):
warnings.warn(
Warning((
f'Specifying the {ProgressBarLogger.__name__} via `loggers` is not recommended as '
'any values set for the following Trainer arguments will be ignored: `progress_bar`, `console_stream`, or `log_traces`. '
'The recommended way of enabling a progress bar is to set `progress_bar` to True instead of '
f'constructing a {ProgressBarLogger.__name__} instance.')))
else:
if progress_bar:
loggers.append(ProgressBarLogger(stream=console_stream, log_traces=log_traces))
# Console Logging
if any(isinstance(x, ConsoleLogger) for x in loggers):
warnings.warn(
Warning((
f'Specifying the {ConsoleLogger.__name__} via `loggers` is not recommended as '
'any values set for the following Trainer arguments will be ignored: `log_to_console`, `console_stream`, `log_traces`, and `console_log_interval`. '
'The recommended way of enabling a console logging is to set `log_to_console` to True instead of '
f'constructing a {ConsoleLogger.__name__} instance.')))
else:
if log_to_console:
loggers.append(
ConsoleLogger(stream=console_stream, log_interval=console_log_interval, log_traces=log_traces))
if save_folder is not None:
remote_ud = maybe_create_remote_uploader_downloader_from_uri(save_folder, loggers)
if remote_ud is not None:
loggers.append(remote_ud)
# Logger
self.logger = Logger(state=self.state, destinations=loggers)
if save_latest_filename is not None:
remote_ud_has_format_string = [
isinstance(logger_destination, RemoteUploaderDownloader) and
logger_destination.file_path_format_string != '{remote_file_name}'
for logger_destination in self.logger.destinations
]
if any(remote_ud_has_format_string):
raise ValueError(
'Specifying a `file_path_format_string` to a `RemoteUploaderDownloader` is not currently supported while using `save_latest_filename`. '
'Please specify the path formatting via `save_folder`, `save_filename`, and `save_latest_filename`')
# Callbacks
self.state.callbacks[:] = list(cast(List[Callback], loggers)) + self.state.callbacks
# Checkpoint Saving
self._checkpoint_saver = None
latest_remote_file_name = None
if save_folder is not None:
_, _, parsed_save_folder = parse_uri(save_folder)
# If user passes a URI with s3:// and a bucket_name, but no other
# path then we assume they just want their checkpoints saved directly in their
# bucket.
if parsed_save_folder == '':
folder = '.'
remote_file_name = save_filename
latest_remote_file_name = save_latest_filename
# If they actually specify a path, then we use that for their local save path
# and we prefix save_filename with that path for remote_file_name.
else:
folder = parsed_save_folder
remote_file_name = str(Path(parsed_save_folder) / Path(save_filename))
if save_latest_filename is not None:
latest_remote_file_name = str(Path(parsed_save_folder) / Path(save_latest_filename))
else:
latest_remote_file_name = None
self._checkpoint_saver = CheckpointSaver(
folder=folder,
filename=save_filename,
remote_file_name=remote_file_name,
latest_filename=save_latest_filename,
latest_remote_file_name=latest_remote_file_name,
overwrite=save_overwrite,
weights_only=save_weights_only,
save_interval=save_interval,
num_checkpoints_to_keep=save_num_checkpoints_to_keep,
)
self.state.callbacks.append(self._checkpoint_saver)
# The Engine
self.engine = Engine(state=self.state, logger=self.logger, algorithm_passes=algorithm_passes)
# Set the logger
self.state.model.logger = self.logger
# Run Event.INIT
self.engine.run_event(Event.INIT)
# Log hparams.
if self.auto_log_hparams:
self.local_hparams = extract_hparams(locals())
self.logger.log_hyperparameters(self.local_hparams)
# Log gpus and nodes.
device_name = self.state.device.__class__.__name__.lstrip('Device').lower()
self.logger.log_hyperparameters({
'num_nodes': int(dist.get_world_size() / dist.get_local_world_size()),
f'num_{device_name}s_per_node': dist.get_local_world_size(),
'node_name': os.environ.get('NODENAME', 'unknown because NODENAME environment variable not set')
})
if not isinstance(self.state.model, ComposerModel):
raise ValueError('Provided model should be a subclass of ComposerModel.')
# After running Event.INIT, then set the "optional" elements of state that could be passed in on FIT instead of INIT
# Setting these attributes here ensures that algorithms do not depend on unavailable attributes during Event.INIT
# Metrics and Evaluators
# Set state.train_metrics and state.eval_metrics here to allow callbacks / algs to potentially
# change the model, which could change what metrics are computed
self.state.train_metrics = deepcopy(self.state.model.get_metrics(is_train=True))
self.state.eval_metrics = {}
if eval_dataloader is None:
evaluators: List[Evaluator] = []
else:
eval_metrics = deepcopy(self.state.model.get_metrics(is_train=False))
model_metric_names = [str(k) for k in eval_metrics.keys()]
evaluators = [
ensure_evaluator(evaluator, default_metric_names=model_metric_names)
for evaluator in ensure_tuple(eval_dataloader)
]
# match metric names to model metrics
self.state.eval_metrics = {
evaluator.label: _filter_metrics(eval_metrics, evaluator.metric_names) for evaluator in evaluators
}
_set_evaluator_interval_and_subset_num_batches(
evaluators=evaluators,
eval_interval=eval_interval,
subset_num_batches=eval_subset_num_batches,
)
for evaluator in evaluators:
validate_eval_automicrobatching(evaluator.auto_microbatching, self.state.device)
if len(evaluators) == 0:
if eval_subset_num_batches != -1:
raise ValueError(
f'Specifying `eval_subset_num_batches={eval_subset_num_batches}` without an `eval_dataloader` '
'has no effect. If trying to run an evaluator, make sure `eval_dataloader` is specified. '
'Otherwise, set `eval_subset_num_batches` to default value -1.')
if eval_interval != 1:
raise ValueError(
f'Specifying `eval_interval={eval_interval}` without an `eval_dataloader` has no effect. '
'If trying to run an evaluator, make sure `eval_dataloader` is specified. Otherwise, '
'set `eval_interval` to default value 1.')
self.state.evaluators = evaluators
# Train Dataloader
self._train_data_spec = None if train_dataloader is None else ensure_data_spec(train_dataloader)
if self._train_data_spec is not None:
self.state.set_dataloader(self._train_data_spec.dataloader, train_dataloader_label,
train_subset_num_batches)
if isinstance(self.state.device, DeviceTPU):
self.state.train_dataloader = pl.MpDeviceLoader(self.state.dataloader, xm.xla_device())
else:
self.state.train_dataloader = self.state.dataloader
self.state.device_train_microbatch_size = _get_initial_device_train_microbatch_size(
self.state.device_train_microbatch_size, self.state.auto_microbatching, self.state.train_dataloader)
# Max Duration
if max_duration is not None:
self.state.max_duration = ensure_time(max_duration, TimeUnit.EPOCH)
self.logger.log_hyperparameters({'rank_zero_seed': rank_zero_seed})
# Schedulers
self.state.schedulers = _compile_schedulers(schedulers, self.state, scale_schedule_ratio)
if scale_schedule_ratio != 1.0:
if len(self.state.schedulers) == 0:
raise ValueError('Specifying `scale_schedule_ratio` without `schedulers` has no effect.')
self.state.max_duration = _scale_max_duration_by_ssr(scale_schedule_ratio, self.state.max_duration)
if step_schedulers_every_batch is None:
self._scheduler_step_frequency = _get_default_scheduler_frequency(schedulers)
else:
self._scheduler_step_frequency = TimeUnit.BATCH if step_schedulers_every_batch else TimeUnit.EPOCH
# Some algorithms require specific settings
self._backwards_create_graph = any(map(lambda x: x.backwards_create_graph, self.state.algorithms))
self._find_unused_parameters = any(map(lambda x: x.find_unused_parameters, self.state.algorithms))
self._ddp_sync_strategy = _get_ddp_sync_strategy(ddp_sync_strategy, self._find_unused_parameters)
# If using DDP or DeepSpeed, we need to wrap the ComposerModel
# But store a reference to the original model for functions like `eval_forward`, `get_metrics`, etc.
self._original_model = self.state.model
if not isinstance(self._original_model, ComposerModel):
raise ValueError('self.state.model must be a subclass of ComposerModel.')
# Configure Deepspeed
if self.state.deepspeed_config is not None:
for callback in self.state.callbacks:
if isinstance(callback, OptimizerMonitor):
raise ValueError('OptimizerMonitor is not supported with DeepSpeed because DeepSpeed clears '
'the gradients before in the last call to .backward see: '
'https://github.com/microsoft/DeepSpeed/issues/2329 for more details.')
try:
import deepspeed
except ImportError as e:
raise MissingConditionalImportError(
extra_deps_group='deepspeed',
conda_package='deepspeed>=0.5.5',
conda_channel=None,
) from e
self.state.deepspeed_config = _parse_deepspeed_config(self.state.deepspeed_config, state=self.state)
optimizer = ensure_tuple(self.state.optimizers)[0]
log.debug('Initializing deepspeed')
(self.state.model, self.state.optimizers, _, _) = deepspeed.initialize(config=self.state.deepspeed_config,
model=self.state.model,
optimizer=optimizer)
# Since the DeepSpeed ZeRO optimizer does not inherit torch.optim.Optimizer, the schedulers must be
# compiled and bound BEFORE DeepSpeed initialization. However, this is OK, as the the DeepSpeed Zero
# optimizer uses the same underlying parameter groups as the original optimizer. See
# * https://github.com/microsoft/DeepSpeed/blob/fee73135980e78f8be7e1a3ff556751623ef6aaa/deepspeed/runtime/zero/stage_1_and_2.py#L1911-L1917
# * https://github.com/microsoft/DeepSpeed/blob/ef17c89570ceae5b26a5f886e9d8cd0941afc0ac/deepspeed/runtime/zero/stage3.py#L2532-L2538
# In addition, the deepspeed engine is responsible for serializing the model and optimizer state,
# so these attributes should not be serialized with the composer state.
if 'model' in self.state.serialized_attributes:
self.state.serialized_attributes.remove('model')
if 'optimizers' in self.state.serialized_attributes:
self.state.serialized_attributes.remove('optimizers')
# If using DeepSpeed, the model must be loaded from checkpoint after the engine has been
# initialized, but if using PyTorch DDP, the model must be loaded before it is wrapped with
# DDP.
# suppressing GradScaler warnings as they are always created
# self._use_grad_scaling() will raise a RuntimeError if grad scaling is not available when it is required
warnings.filterwarnings(action='ignore', message='torch.cuda.amp.GradScaler')
self.state.scaler = ClosureGradScaler() if self._use_closures() else GradScaler()
# suppressing FSDP warning when auto grad accum exits the forward pass before completing
warnings.filterwarnings(action='ignore', message='Forward order differs from that of the first iteration')
# Load Checkpoint
self._rng_state = None
# If autoresume is enabled, first check for existing checkpoints to load
if autoresume:
log.info('Searching for a previous checkpoint to autoresume')
if save_folder is None:
raise ValueError('The `save_folder` must be specified when autoresume is enabled.')
if save_overwrite:
raise ValueError(
'The flag `save_overwrite` must be False when autoresume is enabled as autoresume always loads the '
'latest existing checkpoint in `save_folder`.')
if save_latest_filename is None:
raise ValueError(
'The `save_latest_filename` must be specified so autoresume knows where to load checkpoints from.')
if run_name is None:
raise ValueError(
'The `run_name` must be specified when using autoresume so Event.INIT is run with the correct run name.'
)
remote_ud_has_multiple_concurrent_uploads = [
isinstance(logger_destination, RemoteUploaderDownloader) and
logger_destination._num_concurrent_uploads != 1 for logger_destination in self.logger.destinations
]
if any(remote_ud_has_multiple_concurrent_uploads):
raise ValueError(
'Multiple concurrent uploads is not currently supported when using autoresume. Please set `num_concurrent_uploads` to 1 '
'for all `RemoteUploaderDownloader` instances.')
assert latest_remote_file_name is not None
autoresume_checkpoint_path = self._get_autoresume_checkpoint(
save_folder=save_folder,
save_latest_filename=save_latest_filename,
save_latest_remote_file_name=latest_remote_file_name,
loggers=loggers,
load_progress_bar=load_progress_bar)
# Found latest checkpoint path, load that instead
if autoresume_checkpoint_path:
load_path = autoresume_checkpoint_path
# Disable object_store since _get_autoresume_checkpoint will download the checkpoint
# To the save folder, if needed.
load_object_store = None
# Disable `load_weights_only` since this applies only to the initial training run
load_weights_only = False
log.info('Autoresuming training from checkpoint')
else:
log.info('No previous autoresume checkpoint found')
# Actually load the checkpoint from potentially updated arguments
if load_path is not None:
if load_object_store is None:
load_object_store = maybe_create_object_store_from_uri(load_path)
if isinstance(load_object_store, WandBLogger):
import wandb
if wandb.run is None:
load_object_store.init(self.state, self.logger)
_, _, parsed_load_path = parse_uri(load_path)
self._rng_state = checkpoint.load_checkpoint(
state=self.state,
logger=self.logger,
path=parsed_load_path,
object_store=load_object_store,
load_weights_only=load_weights_only,
strict_model_weights=load_strict_model_weights,
progress_bar=load_progress_bar,
ignore_keys=load_ignore_keys,
exclude_algorithms=load_exclude_algorithms,
algorithm_passes=self.engine.algorithm_passes,
)
self.state.run_name = run_name
self.engine.run_event(Event.AFTER_LOAD)
# reseed here. This helps with a couple of issues:
# 1. rng state may change at Event.INIT/Event.AFTER_LOAD. For example, if an algorithm
# creates a new module and module parameters are initialized randomly, rng state will
# change. This reseeding nullifies such effects.
# 2. While resuming from a checkpoint, we want to spin dataloader and bring it back to the
# same state as at the time of the checkpoint. Therefore, spinning needs to start from the
# same rng state as in the original run.
log.info(f'Setting seed to {self.state.seed}')
reproducibility.seed_all(self.state.seed)
if not (self.deepspeed_enabled or self.fsdp_enabled) and dist.get_world_size() > 1:
# Only wrap the module if required
self.state.model = prepare_ddp_module(self.state.model, self._find_unused_parameters)
@property
def saved_checkpoints(self) -> List[str]:
"""Returns list of saved checkpoints.
.. note::
For DeepSpeed, which saves file on every rank, only the files corresponding to the process's rank
will be shown.
"""
if self._checkpoint_saver is None:
return []
return self._checkpoint_saver.saved_checkpoints
def _try_checkpoint_download(self, latest_checkpoint_path: str, save_latest_remote_file_name: str,
loggers: Sequence[LoggerDestination], load_progress_bar: bool) -> None:
"""Attempts to download the checkpoint from the logger destinations."""
log.debug(
f'Trying to download {save_latest_remote_file_name} to {latest_checkpoint_path} on rank {dist.get_global_rank()}'
)
for logger in loggers:
try:
# Fetch from logger. If it succeeds, stop trying the rest of the loggers
get_file(
path=save_latest_remote_file_name,
destination=latest_checkpoint_path,
object_store=logger,
overwrite=True,
progress_bar=load_progress_bar,
)
break
except (NotImplementedError, FileNotFoundError):
log.info(f'Checkpoint not found in: {logger}')
# Ignore errors caused by no checkpoint saved with logger
pass
def _get_autoresume_checkpoint(
self,
save_folder: str,
save_latest_filename: str,
save_latest_remote_file_name: str,
loggers: Sequence[LoggerDestination],
load_progress_bar: bool,
) -> Optional[str]:
"""Determines the load path when using autoresume.
First, check the ``save_folder`` for the latest checkpoint.
If no latest checkpoint is found locally, then check each logger for the latest checkpoint, and download
it to the ``save_folder``.
Returns:
Optional[str]: The path to the latest checkpoint, if found, otherwise None.
"""
save_latest_filename = format_name_with_dist(save_latest_filename, self.state.run_name)
save_folder = format_name_with_dist(save_folder, self.state.run_name)
save_latest_remote_file_name = format_name_with_dist(save_latest_remote_file_name, self.state.run_name)
latest_checkpoint_path = os.path.join(save_folder, save_latest_filename)
log.info(
f'Looking for autoresume checkpoint: {save_latest_remote_file_name} (remote), {latest_checkpoint_path} (local)'
)
if self.deepspeed_enabled or self.state.fsdp_sharded_state_dict_enabled:
# If latest checkpoint is not saved locally, try to fetch from loggers
if not os.path.exists(latest_checkpoint_path):
log.debug(f'Attempting to download the checkpoint on to rank {dist.get_global_rank()}')
os.makedirs(save_folder, exist_ok=True)
self._try_checkpoint_download(latest_checkpoint_path, save_latest_remote_file_name, loggers,
load_progress_bar)
# List of whether the checkpoint exists on each rank
latest_checkpoint_exists = dist.all_gather_object(os.path.exists(latest_checkpoint_path))
if all(latest_checkpoint_exists): # All paths exist, so return the path.
return latest_checkpoint_path
# Require all ranks to have their own local checkpoint if we wish to restore from it for
# deepspeed or fsdp + sharding
elif any(latest_checkpoint_exists): # Some but not all exist, which is very bad.
missing_ranks = [n for (n, exist) in enumerate(latest_checkpoint_exists) if not exist]
mode = 'Deepspeed' if self.deepspeed_enabled else 'FSDP sharding'
raise RuntimeError(f'{mode} was enabled, but checkpoints missing on ranks: {missing_ranks}')
else: # None of the paths exists, so no autoresume necessary.
return None
else:
# broadcast the local checkpoint path to all ranks
latest_checkpoint_path_list = [os.path.abspath(latest_checkpoint_path)]
dist.broadcast_object_list(latest_checkpoint_path_list, src=0)
latest_checkpoint_path = latest_checkpoint_path_list[0]
# broadcast the remote checkpoint path to all ranks
save_latest_remote_file_name_list = [save_latest_remote_file_name]
dist.broadcast_object_list(save_latest_remote_file_name_list, src=0)
save_latest_remote_file_name = save_latest_remote_file_name_list[0]
# try to download the checkpoint on local rank 0 of all nodes
if dist.get_local_rank() == 0 and not os.path.exists(latest_checkpoint_path):
log.debug(f'Attempting to download the checkpoint {save_latest_remote_file_name} on to all nodes')
os.makedirs(save_folder, exist_ok=True)
self._try_checkpoint_download(latest_checkpoint_path, save_latest_remote_file_name, loggers,
load_progress_bar)
signal_file_path = os.path.join(os.path.dirname(latest_checkpoint_path),
'.local_rank0_completed_autoresume')
if dist.get_local_rank() == 0:
with open(signal_file_path, 'wb') as f:
f.write(b'local_rank0_completed_autoresume')
# avoid the collective call until the local rank zero has finished trying to download the checkpoint
# so that we don't timeout for large downloads
with dist.local_rank_zero_download_and_wait(signal_file_path):
dist.barrier()
# At this point the rank 0 filepath should exist on all ranks if the download succeeded
# list of whether the checkpoint exists on each rank
latest_checkpoint_exists = dist.all_gather_object(os.path.exists(latest_checkpoint_path))
log.debug(
f'Checkpoint {latest_checkpoint_path} exists on rank {dist.get_global_rank()}? {os.path.exists(latest_checkpoint_path)}'
)
if not latest_checkpoint_exists[0]:
# If the checkpoint doesn't exist on rank 0, don't crash, so the initial autoresume run can succeed
return None
elif not all(latest_checkpoint_exists):
raise RuntimeError('Downloading the checkpoint to all nodes failed')
return latest_checkpoint_path
def fit(
self,
*,
# Train Dataloader
train_dataloader: Optional[Union[Iterable, DataSpec, Dict[str, Any]]] = None,
train_dataloader_label: str = 'train',
train_subset_num_batches: Optional[int] = None,
# Timing
duration: Optional[Union[int, str, Time[int]]] = None,
reset_time: bool = False,
# Schedulers
schedulers: Optional[Union[ComposerScheduler, PyTorchScheduler, Sequence[Union[ComposerScheduler,
PyTorchScheduler]]]] = None,
scale_schedule_ratio: float = 1.0,
step_schedulers_every_batch: Optional[bool] = None,
# Evaluation
eval_dataloader: Optional[Union[Iterable, DataSpec, Evaluator, Sequence[Evaluator]]] = None,
eval_subset_num_batches: int = -1,
eval_interval: Union[int, str, Time, Callable[[State, Event], bool]] = 1,
# Numerics
device_train_microbatch_size: Optional[Union[int, str]] = None,
precision: Optional[Union[str, Precision]] = None,
):
"""Train the model.
The Composer :class:`.Trainer` supports multiple calls to :meth:`.fit`. Any arguments specified during
the call to :meth:`.fit` will override the values specified when constructing the :class:`.Trainer`.
All arguments are optional, with the following exceptions:
* The ``train_dataloader`` must be specified here if not provided when constructing the :class:`.Trainer`.
* The ``duration`` must be specified here if not provided when constructing the :class:`.Trainer`,
or if this is a subsequent call to :meth:`.fit`.
For example, the following are equivalent:
.. testcode::
# The `train_dataloader` and `duration` can be specified
# when constructing the Trainer
trainer_1 = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration="1ep",
)
trainer_1.fit()
# Or, these arguments can be specified on `fit()`
trainer_2 = Trainer(model)
trainer_2.fit(
train_dataloader=train_dataloader,
duration="1ep"
)
When invoking :meth:`.fit` for a subsequent time, either ``reset_time`` or ``duration`` must be specified.
Otherwise, it is ambiguous for how long to train.
* If ``reset_time`` is True, then :meth:`.fit` will train for the same amount of time as the previous
call (or for ``duration`` if that parameter is also specified). The :attr:`.State.timestamp` will be reset,
causing :class:`.ComposerScheduler` and :class:`.Algorithm` instances to start from the beginning, as if it
is a new training run. Model gradients, optimizer states, and native PyTorch schedulers will not be reset.
* If ``reset_time`` is False, then :meth:`.fit` will train for the amount of time specified by
``duration``. The :attr:`.State.max_duration` will be incremented by ``duration``.
For example:
.. testcode::
# Construct the trainer
trainer = Trainer(max_duration="1ep")
# Train for 1 epoch
trainer.fit()
assert trainer.state.timestamp.epoch == "1ep"
# Reset the time to 0, then train for 1 epoch
trainer.fit(reset_time=True)
assert trainer.state.timestamp.epoch == "1ep"
# Train for another epoch (2 epochs total)
trainer.fit(duration="1ep")
assert trainer.state.timestamp.epoch == "2ep"
# Train for another batch (2 epochs + 1 batch total)
# It's OK to switch time units!
trainer.fit(duration="1ba")
assert trainer.state.timestamp.epoch == "2ep"
assert trainer.state.timestamp.batch_in_epoch == "1ba"
# Reset the time, then train for 3 epochs
trainer.fit(reset_time=True, duration="3ep")
assert trainer.state.timestamp.epoch == "3ep"
Args:
train_dataloader (Iterable | DataSpec | Dict[str, Any], optional): See :class:`.Trainer`.
train_dataloader_label (str, optional): See :class:`.Trainer`.
train_subset_num_batches (int, optional): See :class:`.Trainer`.
reset_time (bool): Whether to reset the :attr:`.State.timestamp` to zero values. Defaults to False.
If ``True``, the timestamp will be zeroed out, causing :class:`.ComposerScheduler` and
:class:`.Algorithm` instances to start from the beginning, as if it is a new training run. The model
will be trained for ``duration``, if specified, or for :attr:`.State.max_duration`, which would have
been provided when constructing the :class:`.Trainer` or by a previous call to :meth:`.fit`.
.. note::
Model gradients, optimizer states, and native PyTorch schedulers will not be reset.
If ``False`` (the default), training time will be incremented from where the previous call to
:meth:`.fit` finished (or from zero, if a new training run).
The :attr:`~.State.max_duration` will be incremented by the ``duration`` parameter.
duration (Time[int] | str | int, optional): The duration to train. Can be an integer, which will be
interpreted to be epochs, a str (e.g. ``1ep``, or ``10ba``), or a :class:`.Time` object.
If ``reset_time`` is False (the default), then :attr:`.State.max_duration` will be converted
into the same units as this parameter (if necessary), and then the max duration incremented by the
value of this parameter.
If ``reset_time`` is True, then :attr:`.State.max_duration` will be set to this parameter.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional): See :class:`.Trainer`.
schedulers (PyTorchScheduler | ComposerScheduler | Sequence[PyTorchScheduler | ComposerScheduler], optional): See :class:`.Trainer`.
scale_schedule_ratio (float, optional): See :class:`.Trainer`.
step_schedulers_every_batch (bool, optional): See :class:`.Trainer`.
eval_dataloader (Iterable | DataSpec | Evaluator | Sequence[Evaluator], optional): See :class:`.Trainer`.
eval_subset_num_batches (int, optional): See :class:`.Trainer`.
eval_interval (int | str | Time | (State, Event) -> bool, optional): See :class:`.Trainer`.
device_train_microbatch_size (int | str, optional): See :class:`.Trainer`.
precision (Precision | str, optional): See :class:`.Trainer`.
"""
# Train Dataloader
if train_dataloader is not None:
self._train_data_spec = ensure_data_spec(train_dataloader)
self.state.set_dataloader(self._train_data_spec.dataloader, train_dataloader_label)
self.state.train_dataloader = self.state.dataloader
self.state.device_train_microbatch_size = _get_initial_device_train_microbatch_size(
self.state.device_train_microbatch_size, self.state.auto_microbatching, self.state.train_dataloader)
if self._train_data_spec is None:
_raise_missing_argument_exception('train_dataloader')
if train_subset_num_batches is not None:
self.state.dataloader_len = train_subset_num_batches
# Reset Time
if reset_time:
self.state.timestamp = Timestamp()
# Max Duration
if duration is not None:
duration = ensure_time(duration, TimeUnit.EPOCH)
# Effectively increment the max duration (if not resetting the Time)
# or set the max_duration (if resetting the time -- self.state.timestamp.get(duration.unit) will be 0)
# It is important to set the duration, rather than incrementing it, as ``duration`` could be in
# different units than ``max_duration``
self.state.max_duration = duration + self.state.timestamp.get(duration.unit)
if self.state.max_duration is None:
_raise_missing_argument_exception('max_duration')
if self.state.dataloader_len is None and self.state.max_duration.unit == TimeUnit.EPOCH:
raise ValueError(
('max_duration cannot be specified in epochs when using an infinite dataloader. Please either '
'provide a dataloader with a length, specify max_duration in batches, samples, or tokens, or provide '
'train_subset_num_batches.'))
if self.state.max_duration <= self.state.timestamp.get(self.state.max_duration.unit) and not reset_time:
raise ValueError(
(f'The max_duration ({self.state.max_duration}) is less than or equal to the elapsed training duration '
f'({self.state.timestamp.get(self.state.max_duration.unit)}). No training would occur. '
'Please provide the `duration` or specify `reset_time=True` in Trainer.fit().'))
# Scale Schedule Ratio and Schedulers
if scale_schedule_ratio != 1.0:
# Not scaling the schedulers if the ratio is 1.0 in case if the scheduler cannot be scaled
# (e.g. a custom LambdaLR). However, since 1.0 implies no scaling, it is still possible
# to train with it.
self.state.max_duration = _scale_max_duration_by_ssr(scale_schedule_ratio, self.state.max_duration)
if schedulers is not None:
self.state.schedulers = _compile_schedulers(schedulers, self.state, scale_schedule_ratio)
if step_schedulers_every_batch is None:
self._scheduler_step_frequency = _get_default_scheduler_frequency(schedulers)
else:
self._scheduler_step_frequency = TimeUnit.BATCH if step_schedulers_every_batch else TimeUnit.EPOCH
else:
if scale_schedule_ratio != 1.0:
raise ValueError('Specifying `scale_schedule_ratio` without `schedulers` has no effect.')
if step_schedulers_every_batch is not None:
raise ValueError('Specifying `step_schedulers_every_batch` without `schedulers` has no effect.')
if step_schedulers_every_batch is not None:
raise ValueError('Specifying `step_schedulers_every_batch` without `schedulers` has no effect.')
# Evaluators
if eval_dataloader is not None:
# Need to use the `original_model` rather than `state.model`, as `state.model`
# could be DDP / DeepSpeed wrapped.
eval_metrics = self._original_model.get_metrics(is_train=False)
metric_names = [str(k) for k in eval_metrics.keys()]
evaluators = [
ensure_evaluator(evaluator, default_metric_names=metric_names)
for evaluator in ensure_tuple(eval_dataloader)
]
# match metric names to model metrics
self.state.eval_metrics = {
evaluator.label: _filter_metrics(eval_metrics, evaluator.metric_names) for evaluator in evaluators
}
_set_evaluator_interval_and_subset_num_batches(
evaluators=evaluators,
eval_interval=eval_interval,
subset_num_batches=eval_subset_num_batches,
)
for evaluator in evaluators:
validate_eval_automicrobatching(evaluator.auto_microbatching, self.state.device)
if len(evaluators) == 0:
if eval_subset_num_batches != -1:
raise ValueError('Specifying `eval_subset_num_batches` without an `eval_dataloader` has no effect.')
if eval_interval != 1:
raise ValueError('Specifying `eval_interval` without an `eval_dataloader` has no effect.')
self.state.evaluators = evaluators
# Microbatching
if device_train_microbatch_size is not None:
self.state.auto_microbatching = _is_auto_microbatching(device_train_microbatch_size,
device=self.state.device)
if self.state.auto_microbatching and self.state.profiler:
raise ValueError("`device_train_microbatch_size='auto'` is not compatible with the profiler. It is "
"recommended to run a mini-run with `device_train_microbatch_size='auto'` to identify "
'the optimal device_train_microbatch_size value and then manually specify that in a '
'second run with profiler.')
self.state.device_train_microbatch_size = _get_initial_device_train_microbatch_size(
device_train_microbatch_size, self.state.auto_microbatching, self.state.train_dataloader)
# Precision
if precision is not None:
if Precision(precision) != self.state.precision:
if self.deepspeed_enabled:
raise ValueError('Changing the precision when using DeepSpeed is not supported')
precision = Precision(precision)
_validate_precision(precision, self.state.device)
self.state.precision = precision
# update scaler since precision was provided
self.state.scaler = ClosureGradScaler() if self._use_closures() else GradScaler()
self._train_loop()
def close(self):
"""Shutdown the trainer.
.. seealso:: :meth:`.Engine.close` for additional information.
"""
self.engine.close()
dist.barrier()
def _ensure_metrics_device_and_dtype(self, metrics: Dict[str, Metric]):
# HACK: DeepSpeed somehow manages to convert metric internal states to its own dtype. When
# running with FP16, this tends to result in overflows. Let's assume FP32 is good enough.
for name, metric in metrics.items():
# Safety check to ensure the metric and data are on the same device. Normally not
# needed because the metric is automatically on the same device as the model.
# See https://torchmetrics.readthedocs.io/en/latest/pages/overview.html for details.
metrics[name] = self.state.device.module_to_device(metric)
metric.set_dtype(torch.float32) # type: ignore
return metrics
def _compute_and_log_metrics(self, dataloader_label: str, metrics: Dict[str, Metric]):
"""Computes metrics, logs the results, and updates the state with the deep-copied metrics.
Args:
dataloader_label (str): The dataloader label.
metrics (Dict[str, Metric]): The metrics to compute.
"""
metrics = deepcopy(metrics)
# log computed metrics
computed_metrics = {}
for metric_name, metric in metrics.items():
computed_metrics[metric_name] = metric.compute()
self.logger.log_metrics(
{f'metrics/{dataloader_label}/{name}': val for (name, val) in computed_metrics.items()},)
# store metric instances
for metric_name, metric in metrics.items():
assert isinstance(metric, Metric)
if dataloader_label == 'train':
self.state.train_metrics[metric_name] = metric
self.state.train_metric_values[metric_name] = computed_metrics[metric_name]
else:
if dataloader_label not in self.state.eval_metrics:
self.state.eval_metrics[dataloader_label] = {}
self.state.eval_metrics[dataloader_label][metric_name] = metric
self.state.eval_metric_values[metric_name] = computed_metrics[metric_name]
def _spin_dataloaders(self):
"""Spin the dataloaders to restore sampler state.
Only one batch must be loaded to seed the sampler's generator. since only the first batch is being loaded, the
dataloader may not be completely iterated through.
"""
log.debug('Spinning the dataloaders')
# spin the evaluator dataloaders once to initialize its sampler deterministically
# so it does not affect any other RNG reads
eval_state = self.state.dataset_resumption.get('eval', {})
for evaluator in self.state.evaluators:
dataloader = evaluator.dataloader.dataloader
if isinstance(dataloader, DataLoader) and isinstance(dataloader.sampler, DistributedSampler):
dataloader.sampler.set_epoch(0)
if evaluator.label not in eval_state:
for _ in dataloader:
break
# spin the train dataloader's sampler to get to the state of the desired epoch
dataloader = self.state.dataloader
assert dataloader is not None, 'train dataloader is set on state after FIT_START'
if 'train' not in self.state.dataset_resumption:
for epoch in range(int(self.state.timestamp.epoch)):
if isinstance(dataloader, DataLoader) and isinstance(dataloader.sampler, DistributedSampler):
dataloader.sampler.set_epoch(epoch)
for _ in dataloader:
break
def _accumulate_time_across_ranks(
self,
num_samples: int,
num_tokens: int,
batch_time: datetime.timedelta,
) -> Tuple[int, int, datetime.timedelta]:
"""Accumulate the number of samples and tokens across ranks.
Returns a (num_samples, num_tokens, batch_time) tuple.
"""
# Samples and tokens should be summed
# Batch time should be the value from rank 0
sample_token_tensor = self.state.device.tensor_to_device(
torch.tensor([num_samples, num_tokens], dtype=torch.int))
dist.all_reduce(sample_token_tensor, reduce_operation='SUM')
batch_time_tensor = self.state.device.tensor_to_device(
torch.tensor([batch_time.total_seconds()], dtype=torch.float32))
dist.broadcast(batch_time_tensor, src=0)
batch_time = datetime.timedelta(seconds=batch_time_tensor[0].cpu().item())
return int(sample_token_tensor[0].cpu().item()), int(sample_token_tensor[1].cpu().item()), batch_time
def _train_loop(self) -> None:
"""Run training for the specified number of epochs and log results."""
# print training start
log.info('Using precision %s', self.state.precision)
self.logger.log_hyperparameters(
{'enabled_algorithms/' + algo.__class__.__name__: True for algo in self.state.algorithms})
assert self.state.dataloader is not None, 'dataloader is set in __init__() or fit()'
assert self._train_data_spec is not None, 'The train data spec is set in __init__() or fit()'
assert self.state.scaler is not None, 'scaler should have been set in __init__()'
self.engine.run_event(Event.FIT_START)
use_grad_scaling = self._use_grad_scaling(self.state.precision, self.state.scaler)
self._spin_dataloaders()
if self.state.timestamp.batch_in_epoch == 0 and self._rng_state is not None:
# only restore the rng state here if the step in the current epoch is zero.
reproducibility.load_rng_state(self._rng_state)
self._rng_state = None
self.state.model.train()
finished_epoch_early = False
last_wct = datetime.datetime.now()
while self.state.timestamp < self.state.max_duration:
try:
if int(self.state.timestamp.batch_in_epoch) == 0:
self.engine.run_event(Event.EPOCH_START)
self.logger.log_metrics({'trainer/epoch': int(self.state.timestamp.epoch)})
dataloader = self.state.dataloader
if isinstance(dataloader, DataLoader) and isinstance(dataloader.sampler, DistributedSampler):
dataloader.sampler.set_epoch(int(self.state.timestamp.epoch))
for batch_idx, self.state.batch in enumerate(self._iter_dataloader(TrainerMode.TRAIN)):
# Don't spin if dataloader handles it internally. Otherwise, if resuming, skip dataloader forward
if 'train' not in self.state.dataset_resumption and batch_idx < int(
self.state.timestamp.batch_in_epoch):
# Restore the RNG state immediately before the next batch is yielded from the dataloader
if batch_idx + 1 == int(self.state.timestamp.batch_in_epoch) and self._rng_state is not None:
reproducibility.load_rng_state(self._rng_state)
self._rng_state = None
continue
self.state.batch = self.state.device.batch_to_device(self.state.batch)
self.state.batch = self._train_data_spec.device_transforms(self.state.batch)
rank_num_samples = self._train_data_spec.get_num_samples_in_batch(self.state.batch)
rank_num_tokens = self._train_data_spec.get_num_tokens_in_batch(self.state.batch)
if self.deepspeed_enabled:
self.state.batch = _fix_batch_precision_for_deepspeed(self.state.batch, self.state.precision)
self.engine.run_event(Event.AFTER_DATALOADER)
self.engine.run_event(Event.BATCH_START)
self.logger.log_metrics({
'trainer/global_step': int(self.state.timestamp.batch),
'trainer/batch_idx': self.state.timestamp.batch_in_epoch.value,
})
total_loss_dict = self._train_batch(use_grad_scaling)
if use_grad_scaling:
self.state.scaler.update()
# total_loss_dict can be None if gradient scaling failed
if total_loss_dict is not None:
map_collection(total_loss_dict, dist.all_reduce)
total_loss_dict = {
k: loss.cpu().item() / dist.get_world_size() for k, loss in total_loss_dict.items()
}
self.state.total_loss_dict = total_loss_dict
self.logger.log_metrics(total_loss_dict)
# The scheduler step.step() and compute_and_log_metrics() are going to be included in the
# next batch's wall clock time. The time accumulation must be done here so schedulers
# have the latest timing information
now = datetime.datetime.now()
batch_time = now - last_wct
total_num_samples, total_num_tokens, batch_time = self._accumulate_time_across_ranks(
rank_num_samples,
rank_num_tokens,
batch_time,
)
# `now` is actually in the past, but want to include the time it takes to perform this reduction
last_wct = now
if self._scheduler_step_frequency == TimeUnit.BATCH:
for scheduler in self.state.schedulers:
scheduler.step()
if self.state.train_metrics is not None:
self._compute_and_log_metrics(
dataloader_label='train',
metrics=self.state.train_metrics,
)
self.state.timestamp = self.state.timestamp.to_next_batch(
samples=total_num_samples,
tokens=total_num_tokens,
duration=batch_time,
)
self.engine.run_event(Event.BATCH_END)
# Pause the timing during evaluation
# Evaluation time is tracked separately in state.eval_timestamp
duration = datetime.datetime.now() - last_wct
self._run_evaluators(Event.BATCH_END)
last_wct = datetime.datetime.now() - duration
self.engine.run_event(Event.BATCH_CHECKPOINT)
if self.state.timestamp >= self.state.max_duration:
# If max_duration is specified in batches, samples, or tokens, and
# and the max_duration is reached mid-epoch, then break out of the dataloader
# to finish the epoch early and finish training.
finished_epoch_early = True
break
if not finished_epoch_early or self.state.dataloader_len == self.state.timestamp.batch_in_epoch:
# Trigger the epoch end events if the dataloader was exhausted.
# This happens if the "break" did not trigger above, or if it
# did (e.g. duration specified in samples/batches/tokens), but it is still
# the end of the dataloader (i.e. next(dataloader) would raise StopIteration)
if self.state.train_metrics is not None:
self._compute_and_log_metrics(
dataloader_label='train',
metrics=self.state.train_metrics,
)
if self._scheduler_step_frequency == TimeUnit.EPOCH:
for scheduler in self.state.schedulers:
scheduler.step()
self.state.timestamp = self.state.timestamp.to_next_epoch()
self.engine.run_event(Event.EPOCH_END)
# Pause the timing during evaluation
# Evaluation time is tracked separately in state.eval_timestamp
duration = datetime.datetime.now() - last_wct
self._run_evaluators(Event.EPOCH_END)
last_wct = datetime.datetime.now() - duration
self.engine.run_event(Event.EPOCH_CHECKPOINT)
except BreakEpochException:
log.info(f'Skipping the rest of Epoch {int(self.state.timestamp.epoch)}')
self.engine.run_event(Event.FIT_END)
self._run_evaluators(Event.FIT_END)
def _eval_train_metrics(self, device_batch):
assert self._train_data_spec is not None, 'The train data spec should be set on __init__ or fit()'
assert self.state.train_metrics is not None, 'The train metrics should be set on __init__ or fit()'
with torch.no_grad(),\
model_eval_mode(self.state.model),\
_get_precision_context(self.state.precision, self.deepspeed_enabled):
eval_outputs = self._original_model.eval_forward(device_batch, self.state.outputs)
for _, metric in self.state.train_metrics.items():
self._original_model.update_metric(
device_batch,
eval_outputs,
metric,
)
def _run_evaluators(self, event: Event):
"""Runs evaluators periodically during training."""
for evaluator in self.state.evaluators:
assert evaluator.eval_interval is not None, 'eval_interval should have been set on __init__() or fit()'
assert evaluator.subset_num_batches is not None, 'subset_num_batches should have been set on __init__() or fit()'
if evaluator.eval_interval(self.state, event):
self._eval_loop(
evaluator=evaluator,
subset_num_batches=evaluator.subset_num_batches,
metrics=self.state.eval_metrics[evaluator.label],
)
def _train_batch(self, use_grad_scaling: bool) -> Dict[str, torch.Tensor]:
"""Compute loss by training on a full batch of data.
Adaptively change microbatch size if enabled to maximize GPU usage.
Args:
use_grad_scaling (bool): Enables gradient scaling.
Returns:
Dict[str, torch.Tensor]: a dictionary containing the total loss and individual losses if available.
"""
assert self._train_data_spec is not None, 'The train data spec should be set on __init__ or fit()'
# Cache the device batch, because `self.state.batch` gets overridden in microbatching loop.
# Any in-place changes to a microbatch will be reflected in the device batch.
device_batch = self.state.batch
# Retry until we successfully complete training and return loss
while True:
# Reset train_metrics on every batch
# Placing reset here ensures that if auto grad accum catches an OOM, incomplete metric state is cleared
if self.state.train_metrics is not None:
for _, metric in self.state.train_metrics.items():
metric.reset()
total_loss_dict = {'loss/train/total': self.state.device.tensor_to_device(torch.zeros(size=(1,)))}
found_cuda_oom = 0 # int since bool BOR not supported on all torch.distributed backends
try:
assert self.state.scaler is not None
assert self.state.device_train_microbatch_size is not None
microbatches = self._train_data_spec.split_batch(device_batch, self.state.device_train_microbatch_size)
if self._use_closures():
for optimizer in self.state.optimizers:
if use_grad_scaling:
self.state.scaler.step(optimizer,
closure=lambda loss_dict=total_loss_dict, **kwargs: self.
_train_microbatches(microbatches, loss_dict, **kwargs))
else:
optimizer.step(closure=lambda **kwargs: self._train_microbatches(
microbatches, total_loss_dict, **kwargs).item())
else:
self._train_microbatches(microbatches, total_loss_dict)
if not self.deepspeed_enabled:
for optimizer in self.state.optimizers:
if use_grad_scaling:
self.state.scaler.step(optimizer)
else:
if isinstance(self.state.device, DeviceTPU):
xm.optimizer_step(optimizer, barrier=True)
else:
optimizer.step()
except RuntimeError as e:
if self.state.auto_microbatching and _is_cuda_oom(e):
log.debug((f"Rank {dist.get_global_rank()} OOM'd."))
found_cuda_oom = 1
else:
raise
if self.state.auto_microbatching:
# Propagate across all ranks if any rank hit CUDA OOM
found_cuda_oom = self.state.device.tensor_to_device(torch.tensor([found_cuda_oom], dtype=torch.uint8))
dist.all_reduce(found_cuda_oom, reduce_operation='MAX')
if found_cuda_oom.item() == 1:
_adjust_device_train_microbatch_size(self.state)
# Skip return and rerun after handling oom
continue
# Log microbatch and return loss if we've completed without OOMing.
assert self.state.device_train_microbatch_size is not None
self.logger.log_metrics({'trainer/device_train_microbatch_size': self.state.device_train_microbatch_size})
return total_loss_dict
def _train_microbatches(self,
microbatches: Sequence[Batch],
total_loss_dict: Dict[str, torch.Tensor],
ddp_sync: bool = True) -> torch.Tensor:
"""Iterate over microbatches and compute the loss that will be used to step the optimizer.
Args:
microbatches (Sequence[Batch]): The microbatches which make up the batch.
total_loss_dict (Dict[str, torch.tensor]): Dictionary containing individual losses and their sum aggregated across all
microbatches.
ddp_sync (bool): True to sync gradients between devices on every backwards
pass and False to only sync gradients after each device has finished
computing a gradient on it's entire set of microbatches. (default: ``True``)
"""
if ddp_sync or not isinstance(self.state.model, DistributedDataParallel):
context = contextlib.nullcontext
else:
context = cast(Callable[[], ContextManager], self.state.model.no_sync)
assert self._train_data_spec is not None
with context():
self.engine.run_event(Event.BEFORE_TRAIN_BATCH)
assert self.state.optimizers is not None
assert self.state.scaler is not None
use_grad_scaling = self._use_grad_scaling(self.state.precision, self.state.scaler)
if not self.deepspeed_enabled:
for optimizer in self.state.optimizers:
try:
optimizer.zero_grad(set_to_none=True)
except TypeError:
optimizer.zero_grad()
# Tracker for gradient accumulation
current_batch_size = sum([self._train_data_spec.get_num_samples_in_batch(batch) for batch in microbatches])
# Cache batch, which will be overwritten by microbatches. Restore after microbatches complete
current_batch = self.state.batch
for microbatch_idx, self.state.batch in enumerate(microbatches):
is_final_microbatch = microbatch_idx + 1 == len(microbatches)
microbatch_loss_dict = self._train_microbatch(use_grad_scaling, current_batch_size, is_final_microbatch)
# Aggregate each loss in microbatch_loss_dict into total_loss_dict
for k, microbatch_loss in microbatch_loss_dict.items():
loss_key = f'loss/train/{k}'
if loss_key not in total_loss_dict:
total_loss_dict[loss_key] = self.state.device.tensor_to_device(torch.zeros(size=(1,)))
total_loss_dict[loss_key] += microbatch_loss
# Restore batch
self.state.batch = current_batch
# Unscale gradients before `Event.AFTER_TRAIN_BATCH`
if use_grad_scaling:
for optimizer in ensure_tuple(self.state.optimizers):
self.state.scaler.unscale_(optimizer)
self.engine.run_event(Event.AFTER_TRAIN_BATCH)
return total_loss_dict['loss/train/total']
def _train_microbatch(self, use_grad_scaling: bool, current_batch_size: int,
is_final_microbatch: bool) -> Dict[str, torch.Tensor]:
"""Train and compute the loss of ``state.batch``, which is assumed to be a single microbatch.
Args:
use_grad_scaling (bool): Whether to use gradient scaling.
current_batch_size (int): The current batch size.
minibatch_num_samples (int): Number of samples in the minibatch.
is_final_microbatch (bool): If current microbatch is the last one.
"""
assert self.state.scaler is not None
assert self._train_data_spec is not None
# Cache the device batch, because `self.state.batch` gets overridden in microbatching loop
device_batch = deepcopy(self.state.batch)
microbatch_num_samples = self._train_data_spec.get_num_samples_in_batch(self.state.batch)
sync_context = contextlib.nullcontext() if self.deepspeed_enabled else ddp_sync_context(
self.state,
is_final_microbatch,
self._ddp_sync_strategy,
)
with sync_context:
# forward pass
self.engine.run_event(Event.BEFORE_FORWARD)
with _get_precision_context(self.state.precision, self.deepspeed_enabled):
self.state.outputs = self.state.model(self.state.batch)
self.engine.run_event(Event.AFTER_FORWARD)
# loss
self.engine.run_event(Event.BEFORE_LOSS)
with _get_precision_context(self.state.precision, self.deepspeed_enabled):
self.state.loss = self._original_model.loss(self.state.outputs, self.state.batch)
assert self.state.loss is not None
self.engine.run_event(Event.AFTER_LOSS)
# backward
self.engine.run_event(Event.BEFORE_BACKWARD)
microbatch_loss_dict = {}
# If total loss key is present, copy loss
if isinstance(self.state.loss, dict) and ('total' in self.state.loss):
microbatch_loss = self.state.loss['total'] # type: ignore
microbatch_loss_dict = self.state.loss.copy()
# If total loss key is not present, sum individual losses
else:
microbatch_loss = self.state.device.tensor_to_device(torch.zeros(size=(1,)))
for loss in ensure_tuple(self.state.loss):
microbatch_loss.add_(loss.mean())
# Copy the loss if it is a dictionary
if isinstance(self.state.loss, dict):
microbatch_loss_dict = self.state.loss.copy()
# If not, create a dictionary with generic loss names
elif len(ensure_tuple(self.state.loss)) > 1:
microbatch_loss_dict = {f'loss{i}': loss for i, loss in enumerate(ensure_tuple(self.state.loss))}
# Include total loss
microbatch_loss_dict['total'] = microbatch_loss
# For each loss to log: detach, clone, mean, then multiply by (microbatch size) / (batch size)
for k, loss in microbatch_loss_dict.items():
microbatch_loss_dict[k] = loss.detach().clone().mean() * (microbatch_num_samples / current_batch_size)
if use_grad_scaling:
microbatch_loss = cast(torch.Tensor, self.state.scaler.scale(microbatch_loss))
if self.deepspeed_enabled:
self.state.deepspeed_model.backward(microbatch_loss)
else:
# Scale loss based on the number of samples in the microbatch to maintain gradient numerics
microbatch_loss.mul_(microbatch_num_samples / current_batch_size)
microbatch_loss.backward(create_graph=self._backwards_create_graph)
self.engine.run_event(Event.AFTER_BACKWARD)
# Use microbatch outputs to update training metrics
if self.state.train_metrics is not None:
self.state.train_metrics = self._ensure_metrics_device_and_dtype(self.state.train_metrics)
self._eval_train_metrics(device_batch)
if self.deepspeed_enabled:
self.state.deepspeed_model.step()
return microbatch_loss_dict
def predict(
self,
dataloader: Union[DataLoader, DataSpec],
subset_num_batches: int = -1,
*,
return_outputs: bool = True,
):
"""Output model prediction on the provided data.
There are two ways to access the prediction outputs.
1. With ``return_outputs`` set to True, the batch predictions will be collected into a list and returned.
2. Via a custom callback, which can be used with ``return_outputs`` set to False.
This technique can be useful if collecting all the outputs from the dataloader would exceed available memory,
and you want to write outputs directly to files. For example:
.. testsetup::
predict_dl = train_dataloader
.. testcode::
import os
import torch
from torch.utils.data import DataLoader
from composer import Trainer, Callback
from composer.loggers import Logger
class PredictionSaver(Callback):
def __init__(self, folder: str):
self.folder = folder
os.makedirs(self.folder, exist_ok=True)
def predict_batch_end(self, state: State, logger: Logger) -> None:
name = f'batch_{int(state.predict_timestamp.batch)}.pt'
filepath = os.path.join(self.folder, name)
torch.save(state.outputs, filepath)
# Also upload the files
logger.upload_file(remote_file_name=name, file_path=filepath)
trainer = Trainer(
...,
callbacks=PredictionSaver('./predict_outputs'),
)
trainer.predict(predict_dl, return_outputs=False)
print(sorted(os.listdir('./predict_outputs')))
.. testoutput::
['batch_1.pt', ...]
Args:
dataloader (DataLoader | DataSpec): The :class:`.DataLoader` or
:class:`.DataSpec` for the prediction data.
subset_num_batches (int, optional): If specified, only perform model prediction
on this many batches. This parameter has no effect if it is greater than ``len(dataloader)``.
If ``-1``, then the entire loader will be iterated over. (default: ``-1``)
return_outputs (bool, optional): If True (the default), then prediction outputs will be (recursively)
moved to cpu and accumulated into a list. Otherwise, prediction outputs are discarded after each
batch.
Returns:
List: A list of batch outputs, if ``return_outputs`` is True. Otherwise, an empty list.
"""
if isinstance(dataloader, DataSpec):
data_spec = dataloader
else:
data_spec = DataSpec(dataloader)
# Bind the dataloader to the state, but be able to restore the previous dataloader afterwards
original_dataloader = self.state.dataloader
original_dataloader_label = self.state.dataloader_label
original_dataloader_len = self.state.dataloader_len
self.state.set_dataloader(data_spec.dataloader, 'predict', subset_num_batches)
assert self.state.dataloader is not None, 'Already set the dataloader'
# Reset the predict timestamp
self.state.predict_timestamp = Timestamp()
last_wct = datetime.datetime.now()
outputs = []
cpu_device = DeviceCPU()
with torch.no_grad(), model_eval_mode(self.state.model):
self.engine.run_event(Event.PREDICT_START)
for self.state.batch in self._iter_dataloader(TrainerMode.PREDICT):
# Move the batch onto the device
self.state.batch = self.state.device.batch_to_device(self.state.batch)
# Perform any device transforms
if data_spec.device_transforms is not None:
self.state.batch = data_spec.device_transforms(self.state.batch)
# Count the batch size and num tokens before any events run
rank_num_samples = data_spec.get_num_samples_in_batch(self.state.batch)
rank_num_tokens = data_spec.get_num_tokens_in_batch(self.state.batch)
# Fix the batch if using DeepSpeed
if self.deepspeed_enabled:
self.state.batch = _fix_batch_precision_for_deepspeed(self.state.batch, self.state.precision)
self.engine.run_event(Event.PREDICT_BATCH_START)
self.engine.run_event(Event.PREDICT_BEFORE_FORWARD)
with _get_precision_context(self.state.precision, self.deepspeed_enabled):
self.state.outputs = self.state.model(self.state.batch)
self.engine.run_event(Event.PREDICT_AFTER_FORWARD)
if return_outputs:
outputs.append(cpu_device.batch_to_device(self.state.outputs))
now = datetime.datetime.now()
batch_time = now - last_wct
total_num_samples, total_num_tokens, batch_time = self._accumulate_time_across_ranks(
num_samples=rank_num_samples,
num_tokens=rank_num_tokens,
batch_time=batch_time,
)
last_wct = now
self.state.predict_timestamp = self.state.predict_timestamp.to_next_batch(samples=total_num_samples,
tokens=total_num_tokens,
duration=batch_time)
self.engine.run_event(Event.PREDICT_BATCH_END)
self.engine.run_event(Event.PREDICT_END)
# Restore the dataloader
self.state.set_dataloader(original_dataloader, original_dataloader_label)
if original_dataloader_len is not None:
self.state.dataloader_len = original_dataloader_len
return outputs
def eval(
self,
eval_dataloader: Optional[Union[Iterable, DataSpec, Evaluator, Sequence[Evaluator]]] = None,
subset_num_batches: int = -1,
):
"""Run evaluation loop.
Results are stored in ``trainer.state.eval_metrics``. The ``eval_dataloader`` can be provided to
either the eval() method or during training init().
Examples:
.. testcode::
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration="2ep",
device="cpu",
)
trainer.fit()
# run eval
trainer.eval(
eval_dataloader=eval_dataloader,
)
Or, if the ``eval_dataloader`` is provided during init:
.. testcode::
trainer = Trainer(
model=model,
eval_dataloader=eval_dataloader,
train_dataloader=train_dataloader,
max_duration="2ep",
device="cpu",
)
trainer.fit()
# eval_dataloader already provided:
trainer.eval()
For multiple metrics or dataloaders, use :class:`.Evaluator` to provide
identifier names. For example, to run the GLUE task:
.. code:: python
from composer.core import Evaluator
from composer.models.nlp_metrics import BinaryF1Score
glue_mrpc_task = Evaluator(
label='glue_mrpc',
dataloader=mrpc_dataloader,
metric_names=['BinaryF1Score', 'MulticlassAccuracy']
)
glue_mnli_task = Evaluator(
label='glue_mnli',
dataloader=mnli_dataloader,
metric_names=['MulticlassAccuracy']
)
trainer = Trainer(
...,
eval_dataloader=[glue_mrpc_task, glue_mnli_task],
...
)
The metrics used are defined in your model's ``get_metrics()`` method. For more information,
see :doc:`/trainer/evaluation`.
Args:
eval_dataloader (DataLoader | DataSpec | Evaluator | Sequence[Evaluator], optional): Dataloaders
for evaluation. If not provided, defaults to using the
``eval_dataloader`` provided to the trainer init().
subset_num_batches (int, optional): Evaluate on this many batches. Default to ``-1`` (the entire
dataloader. Can also be provided in the trainer.__init__() as ``eval_subset_num_batches``.
"""
if eval_dataloader is not None:
eval_passed_in = True
eval_metrics = deepcopy(self._original_model.get_metrics(is_train=False))
metric_names = [str(k) for k in eval_metrics.keys()]
evaluators = [
ensure_evaluator(evaluator, default_metric_names=metric_names)
for evaluator in ensure_tuple(eval_dataloader)
]
if self.state.eval_metrics:
for evaluator in evaluators:
if evaluator.label in self.state.eval_metrics:
warnings.warn(
f'eval_dataloader label \'{evaluator.label}\' was already provided in'
'trainer initialization. Existing data for that label will be overwritten.'
'To prevent this in the future, assign unique label names.',
category=UserWarning)
# match metric names to model metrics
log.info(f'Added {[e.label for e in evaluators]} to eval_metrics.')
self.state.eval_metrics.update({e.label: _filter_metrics(eval_metrics, e.metric_names) for e in evaluators})
_set_evaluator_interval_and_subset_num_batches(
evaluators=evaluators,
eval_interval='1ep', # ignored
subset_num_batches=subset_num_batches,
)
for evaluator in evaluators:
validate_eval_automicrobatching(evaluator.auto_microbatching, self.state.device)
self.state.evaluators.extend(evaluators) # Add evaluators to state.evaluators
else:
eval_passed_in = False
if not self.state.evaluators:
raise ValueError('eval_dataloader must be provided to either Trainer init() or eval().')
evaluators = self.state.evaluators
for evaluator in evaluators:
eval_subset_num_batches = evaluator.subset_num_batches if subset_num_batches == -1 else subset_num_batches
self._eval_loop(
evaluator=evaluator,
metrics=self.state.eval_metrics[evaluator.label],
subset_num_batches=eval_subset_num_batches,
)
if eval_passed_in:
self.state.evaluators.remove(evaluator) # Remove them from state once eval is finished.
def _eval_loop(
self,
evaluator: Evaluator,
metrics: Dict[str, Metric],
subset_num_batches: Optional[int] = None,
):
"""Evaluate the model and log appropriate metrics.
Args:
evaluator (Evaluator): The evaluator to use for evaluation.
metrics (Dict[str, Metric]): Dictionary mapping metric names to metrics to evaluate against.
subset_num_batches (int, optional): If specified, evaluate on this many batches. Defaults to ``-1``,
which means to iterate over the entire dataloader.
"""
if subset_num_batches is None:
subset_num_batches = -1
# back up the original dataloader on the state, so we can restore it after evaluation is finished
original_dataloader = self.state.dataloader
original_dataloader_label = self.state.dataloader_label
original_num_batches = self.state.dataloader_len
# Unpack data_spec
data_spec = evaluator.dataloader
# Reset the eval timestamp
self.state.eval_timestamp = Timestamp()
last_wct = datetime.datetime.now()
with torch.no_grad(), model_eval_mode(self.state.model):
self.state.set_dataloader(data_spec.dataloader, evaluator.label, subset_num_batches)
assert self.state.dataloader is not None, 'dataloader is set'
self.engine.run_event(Event.EVAL_START)
metrics = self._ensure_metrics_device_and_dtype(metrics)
for _, metric in metrics.items():
metric.reset()
dataloader = self.state.dataloader
if isinstance(dataloader, DataLoader) and isinstance(dataloader.sampler, DistributedSampler):
# The distributed sampler uses `set_epoch` to set the random seed
# Because evaluation can run on each batch, we use the batch to seed the sampler
# so each evaluation will get a proper shuffle.
# The epoch provided to `set_epoch` need not be sequential, so this is fine.
dataloader.sampler.set_epoch(int(self.state.timestamp.batch))
for self.state.batch in self._iter_dataloader(TrainerMode.EVAL):
self.state.batch = self.state.device.batch_to_device(self.state.batch)
if data_spec.device_transforms is not None:
self.state.batch = data_spec.device_transforms(self.state.batch)
# Count the batch size and num tokens before any events run
rank_num_samples = data_spec.get_num_samples_in_batch(self.state.batch)
rank_num_tokens = data_spec.get_num_tokens_in_batch(self.state.batch)
if self.deepspeed_enabled:
self.state.batch = _fix_batch_precision_for_deepspeed(self.state.batch, self.state.precision)
self.engine.run_event(Event.EVAL_BATCH_START)
# Cache the device batch, because `self.state.batch` gets overridden in microbatching loop
device_batch = self.state.batch
# Retry until we successfully complete evaluation
while True:
# Note: We use uint8 instead of bool as BOR is not supported on all torch.distributed backends
found_cuda_oom = 0
try:
for self.state.batch in data_spec.split_batch(device_batch,
evaluator.device_eval_microbatch_size):
self.engine.run_event(Event.EVAL_BEFORE_FORWARD)
with _get_precision_context(self.state.precision, self.deepspeed_enabled):
self.state.outputs = self._original_model.eval_forward(self.state.batch)
target = None
self.engine.run_event(Event.EVAL_AFTER_FORWARD)
# Run in same precision context to avoid NaNs
with _get_precision_context(self.state.precision, self.deepspeed_enabled):
if isinstance(self.state.device, DeviceMPS):
# torchmetrics math has numerical errors on M1 devices
# running the compute on CPU instead
outputs = self.state.outputs.cpu()
else:
outputs = self.state.outputs
if hasattr(self._original_model, 'validate'):
for _, metric in self.state.train_metrics.items():
metric.update(outputs, target)
else:
for _, metric in metrics.items():
self._original_model.update_metric(
self.state.batch,
outputs,
metric,
)
except RuntimeError as e:
if evaluator.auto_microbatching and _is_cuda_oom(e):
log.debug((f"Rank {dist.get_global_rank()} OOM'd."))
found_cuda_oom = 1
else:
raise
if evaluator.auto_microbatching:
# Propagate across all ranks if any rank hit CUDA OOM
found_cuda_oom = self.state.device.tensor_to_device(
torch.tensor([found_cuda_oom], dtype=torch.uint8))
dist.all_reduce(found_cuda_oom, reduce_operation='MAX')
if found_cuda_oom.item() == 1:
_adjust_device_eval_microbatch_size(evaluator)
# Skip return and rerun after handling oom
continue
# Log device_eval_microbatch_size if auto_microbatching is enabled
self.logger.log_metrics({
f'trainer/{evaluator.label}/device_eval_microbatch_size':
evaluator.device_eval_microbatch_size
})
# Break if we've successfully completed eval without OOMing.
break
now = datetime.datetime.now()
batch_time = now - last_wct
total_num_samples, total_num_tokens, batch_time = self._accumulate_time_across_ranks(
num_samples=rank_num_samples,
num_tokens=rank_num_tokens,
batch_time=batch_time,
)
self.state.eval_timestamp = self.state.eval_timestamp.to_next_batch(
samples=total_num_samples,
tokens=total_num_tokens,
duration=batch_time,
)
last_wct = now
self.engine.run_event(Event.EVAL_BATCH_END)
self._compute_and_log_metrics(dataloader_label=evaluator.label, metrics=metrics)
self.engine.run_event(Event.EVAL_END)
self.state.set_dataloader(original_dataloader, original_dataloader_label)
if original_num_batches is not None:
self.state.dataloader_len = original_num_batches
def _use_grad_scaling(self, precision: Union[str, Precision], scaler: Optional[GradScaler]) -> bool:
"""Determines based on precision when to use grad scaling.
By default, the pytorch GradScaler is a no-op if running on
unsupported hardware. Here we raise a RuntimeError instead.
Args:
precision (Precision): Numerical precision, based on the Precision Enum.
scaler (GradScaler): Used to make sure that the scaler is enabled when
using grad scaling.
Raises:
RuntimeError:
Occurs when attempting to use grad scaling without the scaler
enabled. Likely due to hardware not supporting the provided precision.
"""
if self.deepspeed_enabled:
return False
precision = Precision(precision)
use_grad_scaling = precision == Precision.AMP_FP16
if use_grad_scaling and (scaler is None or not scaler.is_enabled()):
raise RuntimeError(f'Attempting to use grad scaling with {precision}, but scaler is not enabled.'
f'Potentially your hardware does not support Precision {precision}.')
return use_grad_scaling
def _iter_dataloader(self, trainer_mode: TrainerMode):
"""Helper method to iterate over the dataloader.
This method yields up to :attr:`.State.dataloader_len`` batches from the dataloader. In addition, if the
profiler is enabled, the dataloader latency recorded via the :class:`.Marker` API.
Args:
trainer_mode (TrainerMode): Specifies which mode the trainer is in.
"""
assert self.state.dataloader is not None, 'the dataloader should be set before calling this method'
if self.state.dataloader_len is None:
dataloader_iter = iter(self.state.dataloader)
else:
dataloader_iter = itertools.islice(self.state.dataloader, int(self.state.dataloader_len))
while True:
try:
# [BEFORE/AFTER]_DATALOADER only runs while training
if trainer_mode == TrainerMode.TRAIN:
self.engine.run_event(Event.BEFORE_DATALOADER)
batch = next(dataloader_iter)
except StopIteration:
# [BEFORE/AFTER]_DATALOADER only runs while training
if trainer_mode == TrainerMode.TRAIN:
# Event.AFTER_DATALOADER is normally called in the train loop. However, if we
# encounter StopIteration, the train loop will not run. Accordingly, we need to
# explicitly call the engine to run marker.finish() for the dataloader marker.
# Otherwise, we will encounter an error at the start of the next epoch when
# Event.BEFORE_DATALOADER tries to start an unfinished marker.
self.engine.run_marker_only_event(Event.AFTER_DATALOADER)
break
yield batch
def _use_closures(self) -> bool:
"""Determines based on precision and optimizers whether to use closures.
We default to using closures unless AMP is enabled, in which case we only allow closures when using optimizers
with the _step_supports_amp_closure flag.
"""
if self.deepspeed_enabled:
return False
if isinstance(self.state.device, DeviceTPU):
return False
if self.state.precision != Precision.AMP_FP16:
return True
if self.state.optimizers is None:
raise RuntimeError('state.optimizers must be set before `_use_closures` can be determined')
return all(
getattr(optimizer, '_step_supports_amp_closure', False)
for optimizer in ensure_tuple(self.state.optimizers))
def save_checkpoint(
self,
name: str = 'ep{epoch}-ba{batch}-rank{rank}',
*,
weights_only: bool = False,
):
"""Checkpoint the training :class:`~.State`.
Args:
name (str, optional): See :func:`.save_checkpoint`.
weights_only (bool, optional): See :func:`.save_checkpoint`.
Returns:
str or None: See :func:`.save_checkpoint`.
"""
return checkpoint.save_checkpoint(
state=self.state,
filename=name,
weights_only=weights_only,
)
def export_for_inference(
self,
save_format: Union[str, ExportFormat],
save_path: str,
save_object_store: Optional[ObjectStore] = None,
sample_input: Optional[Any] = None,
transforms: Optional[Sequence[Transform]] = None,
):
"""Export a model for inference.
Args:
save_format (Union[str, ExportFormat]): Format to export to. Either ``"torchscript"`` or ``"onnx"``.
save_path: (str): The path for storing the exported model. It can be a path to a file on the local disk,
a URL, or if ``save_object_store`` is set, the object name
in a cloud bucket. For example, ``my_run/exported_model``.
save_object_store (ObjectStore, optional): If the ``save_path`` is in an object name in a cloud bucket
(i.e. AWS S3 or Google Cloud Storage), an instance of
:class:`~.ObjectStore` which will be used
to store the exported model. If this is set to ``None``, will save to ``save_path`` using the trainer's
logger. (default: ``None``)
sample_input (Any, optional): Example model inputs used for tracing. This is needed for "onnx" export.
The ``sample_input`` need not match the batch size you intend to use for inference. However, the model
should accept the ``sample_input`` as is. (default: ``None``)
transforms (Sequence[Transform], optional): transformations (usually optimizations) that should
be applied to the model. Each Transform should be a callable that takes a model and returns a modified model.
Returns:
None
"""
export_model = self.state.model.module if self.state.is_model_ddp else self.state.model
if not isinstance(export_model, nn.Module):
raise ValueError(f'Exporting Model requires type torch.nn.Module, got {type(export_model)}')
if sample_input == None and save_format == 'onnx':
sample_input = self.state.batch
export_with_logger(model=export_model,
save_format=save_format,
save_path=save_path,
logger=self.logger,
save_object_store=save_object_store,
sample_input=(sample_input, {}),
transforms=transforms)
| composer-dev | composer/trainer/trainer.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.