python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Base class for logger callback."""
from __future__ import annotations
import pathlib
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Union
import numpy as np
import torch
from composer.core.callback import Callback
if TYPE_CHECKING:
from composer.core import State
__all__ = ['LoggerDestination']
class LoggerDestination(Callback, ABC):
"""Base class for logger destination.
As this class extends :class:`~.callback.Callback`, logger destinations can run on any training loop
:class:`.Event`. For example, it may be helpful to run on
:attr:`.Event.EPOCH_END` to perform any flushing at the end of every epoch.
Example:
.. doctest::
>>> from composer.loggers import LoggerDestination
>>> from composer.trainer import Trainer
>>> class MyLogger(LoggerDestination):
... def log_hyperparameters(self, data):
... print(f'Batch {int(state.timestamp.batch)}: {data}')
>>> logger = MyLogger()
>>> trainer = Trainer(
... ...,
... loggers=[logger]
... )
Batch 0: {'num_nodes': ...}
Batch 0: {'rank_zero_seed': ...}
"""
def log_hyperparameters(self, hyperparameters: Dict[str, Any]):
"""Log hyperparameters, configurations, and settings.
Logs any parameter/configuration/setting that doesn't vary during the run.
Args:
hyperparameters (Dict[str, Any]): A dictionary mapping hyperparameter names
(strings) to their values (Any).
"""
del hyperparameters # unused
pass
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
"""Log metrics or parameters that vary during training.
Args:
metrics (Dict[str, float]): Dictionary mapping metric name (str) to metric
scalar value (float)
step (Optional[int], optional): The current step or batch of training at the
time of logging. Defaults to None. If not specified the specific
LoggerDestination implementation will choose a step (usually a running
counter).
"""
del metrics, step # unused
pass
def log_traces(self, traces: Dict[str, Any]):
"""Log traces. Logs any debug-related data like algorithm traces.
Args:
traces (Dict[str, float]): Dictionary mapping trace names (str) to trace
(Any).
"""
del traces
pass
def log_images(
self,
images: Union[np.ndarray, torch.Tensor, Sequence[Union[np.ndarray, torch.Tensor]]],
name: str = 'Images',
channels_last: bool = False,
step: Optional[int] = None,
masks: Optional[Dict[str, Union[np.ndarray, torch.Tensor, Sequence[Union[np.ndarray, torch.Tensor]]]]] = None,
mask_class_labels: Optional[Dict[int, str]] = None,
use_table: bool = True,
):
"""Log images. Logs any tensors or arrays as images.
Args:
images (np.ndarray | torch.Tensor | Sequence[np.ndarray | torch.Tensor]): Dictionary mapping
image(s)' names (str) to an image of array of images.
name (str): The name of the image(s). (Default: ``'Images'``)
channels_last (bool): Whether the channel dimension is first or last.
(Default: ``False``)
step (Optional[int], optional): The current step or batch of training at the
time of logging. Defaults to None. If not specified the specific
LoggerDestination implementation will choose a step (usually a running
counter).
masks (Dict[str, np.ndarray | torch.Tensor | Sequence[np.ndarray | torch.Tensor]], optional): A dictionary
mapping the mask name (e.g. predictions or ground truth) to a sequence of masks.
mask_class_labels (Dict[int, str], optional): Dictionary mapping label id to its name. Used for labelling
each color in the mask.
use_table (bool): Whether to make a table of the images or not. (default: ``True``). Only for use
with WandB.
"""
del images, name, channels_last, step, masks, mask_class_labels, use_table
pass
def upload_file(
self,
state: State,
remote_file_name: str,
file_path: pathlib.Path,
*,
overwrite: bool,
):
"""Handle uploading a file stored at ``file_path`` to a file named ``remote_file_name``.
Subclasses should implement this method to store logged files (e.g. copy it to another folder or upload it to
an object store). However, not all loggers need to implement this method.
For example, the :class:`.TQDMLogger` does not implement this method, as it cannot
handle file uploads.
.. note::
* This method will block the training loop. For optimal performance, it is recommended that this
method copy the file to a temporary directory, enqueue the copied file for processing, and return.
Then, use a background thread(s) or process(s) to read from this queue to perform any I/O.
* After this method returns, training can resume, and the contents of ``file_path`` may change (or be may
deleted). Thus, if processing the file in the background (as is recommended), it is necessary to first
copy the file to a temporary directory. Otherwise, the original file may no longer exist, or the logged
file can be corrupted (e.g., if the logger destination is reading from file while the training loop
is writing to it).
.. seealso:: :doc:`Uploading Files</trainer/file_uploading>` for notes for file uploading.
Args:
state (State): The training state.
remote_file_name (str): The name of the file.
file_path (pathlib.Path): The file path.
overwrite (bool, optional): Whether to overwrite an existing file with the same ``remote_file_name``.
(default: ``False``)
"""
del state, remote_file_name, file_path, overwrite # unused
pass
def download_file(
self,
remote_file_name: str,
destination: str,
overwrite: bool = False,
progress_bar: bool = True,
):
"""Handle downloading a file named ``remote_file_name`` to ``destination``.
Args:
remote_file_name (str): The name of the file.
destination (str): The destination filepath.
overwrite (bool): Whether to overwrite an existing file at ``destination``. Defaults to ``False``.
progress_bar (bool, optional): Whether to show a progress bar. Ignored if ``path`` is a local file.
(default: ``True``)
"""
del remote_file_name, destination, overwrite, progress_bar # unused
raise NotImplementedError
def can_upload_files(self) -> bool:
"""Indicates whether LoggerDestination can upload files.
Defaults to false, should return True for derived logger classes that implement upload_file().
Returns:
bool: Whether the class supports uploading files.
"""
return False
| composer-dev | composer/loggers/logger_destination.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Logs metrics to the console and show a progress bar."""
from __future__ import annotations
import os
import sys
from typing import TYPE_CHECKING, Any, Dict, List, Optional, TextIO, Union
import tqdm.auto
import yaml
from composer.core.time import TimeUnit
from composer.loggers.logger import Logger, format_log_data_value
from composer.loggers.logger_destination import LoggerDestination
from composer.utils import dist, is_notebook
if TYPE_CHECKING:
from composer.core import State, Timestamp
__all__ = ['ProgressBarLogger']
_IS_TRAIN_TO_KEYS_TO_LOG = {
True: ['loss/train'],
False: [
'metrics/eval/Accuracy',
'metrics/eval/BinaryAccuracy',
'metrics/eval/MulticlassAccuracy',
'metrics/eval/MultilabelAccuracy',
],
}
class _ProgressBar:
def __init__(
self,
total: Optional[int],
position: Optional[int],
bar_format: str,
file: TextIO,
metrics: Dict[str, Any],
keys_to_log: List[str],
timestamp_key: str,
unit: str = 'it',
) -> None:
self.keys_to_log = keys_to_log
self.metrics = metrics
self.position = position
self.timestamp_key = timestamp_key
self.file = file
is_atty = is_notebook() or os.isatty(self.file.fileno())
self.pbar = tqdm.auto.tqdm(
total=total,
position=position,
bar_format=bar_format,
file=file,
ncols=None if is_atty else 120,
dynamic_ncols=is_atty,
# We set `leave=False` so TQDM does not jump around, but we emulate `leave=True` behavior when closing
# by printing a dummy newline and refreshing to force tqdm to print to a stale line
# But on k8s, we need `leave=True`, as it would otherwise overwrite the pbar in place
# If in a notebook, then always set leave=True, as otherwise jupyter would remote the progress bars
leave=True if is_notebook() else not is_atty,
postfix=metrics,
unit=unit,
)
def log_data(self, data: Dict[str, Any]):
formatted_data = {}
for (k, v) in data.items():
# Check if any substring of the key matches the keys to log
if any(key_to_log in k for key_to_log in self.keys_to_log):
formatted_data[k] = format_log_data_value(v)
self.metrics.update(formatted_data)
self.pbar.set_postfix(self.metrics)
def update(self, n=1):
self.pbar.update(n=n)
def update_to_timestamp(self, timestamp: Timestamp):
n = int(getattr(timestamp, self.timestamp_key))
n = n - self.pbar.n
self.update(int(n))
def close(self):
if is_notebook():
# If in a notebook, always refresh before closing, so the
# finished progress is displayed
self.pbar.refresh()
else:
if self.position != 0:
# Force a (potentially hidden) progress bar to re-render itself
# Don't render the dummy pbar (at position 0), since that will clear a real pbar (at position 1)
self.pbar.refresh()
# Create a newline that will not be erased by leave=False. This allows for the finished pbar to be cached in the terminal
# This emulates `leave=True` without progress bar jumping
if not self.file.closed:
print('', file=self.file, flush=True)
self.pbar.close()
def state_dict(self) -> Dict[str, Any]:
pbar_state = self.pbar.format_dict
return {
'total': pbar_state['total'],
'position': self.position,
'bar_format': pbar_state['bar_format'],
'metrics': self.metrics,
'keys_to_log': self.keys_to_log,
'n': pbar_state['n'],
'timestamp_key': self.timestamp_key,
}
class ProgressBarLogger(LoggerDestination):
"""Log metrics to the console and optionally show a progress bar.
.. note::
This logger is automatically instantiated by the trainer via the ``progress_bar``,
and ``console_stream`` options. This logger does not need to be created manually.
`TQDM <https://github.com/tqdm/tqdm>`_ is used to display progress bars.
During training, the progress bar logs the batch and training loss.
During validation, the progress bar logs the batch and validation accuracy.
Example progress bar output::
Epoch 1: 100%|ββββββββββ| 64/64 [00:01<00:00, 53.17it/s, loss/train=2.3023]
Epoch 1 (val): 100%|ββββββββββ| 20/20 [00:00<00:00, 100.96it/s, accuracy/val=0.0995]
Args:
stream (str | TextIO, optional): The console stream to use. If a string, it can either be ``'stdout'`` or
``'stderr'``. (default: :attr:`sys.stderr`)
log_traces (bool): Whether to log traces or not. (default: ``False``)
"""
def __init__(
self,
stream: Union[str, TextIO] = sys.stderr,
log_traces: bool = False,
) -> None:
# The dummy pbar is to fix issues when streaming progress bars over k8s, where the progress bar in position 0
# doesn't update until it is finished.
# Need to have a dummy progress bar in position 0, so the "real" progress bars in position 1 doesn't jump around
self.dummy_pbar: Optional[_ProgressBar] = None
self.train_pbar: Optional[_ProgressBar] = None
self.eval_pbar: Optional[_ProgressBar] = None
# set the stream
if isinstance(stream, str):
if stream.lower() == 'stdout':
stream = sys.stdout
elif stream.lower() == 'stderr':
stream = sys.stderr
else:
raise ValueError(f'stream must be one of ("stdout", "stderr", TextIO-like), got {stream}')
self.should_log_traces = log_traces
self.stream = stream
self.state: Optional[State] = None
self.hparams: Dict[str, Any] = {}
self.hparams_already_logged_to_console: bool = False
@property
def show_pbar(self) -> bool:
return dist.get_local_rank() == 0
def log_hyperparameters(self, hyperparameters: Dict[str, Any]):
# Lazy logging of hyperparameters.
self.hparams.update(hyperparameters)
def _log_hparams_to_console(self):
if dist.get_local_rank() == 0:
self._log_to_console('*' * 30)
self._log_to_console('Config:')
self._log_to_console(yaml.dump(self.hparams))
self._log_to_console('*' * 30)
self.hparams_already_logged_to_console = True
def log_traces(self, traces: Dict[str, Any]):
if self.should_log_traces:
for trace_name, trace in traces.items():
trace_str = format_log_data_value(trace)
self._log_to_console(f'[trace]: {trace_name}:' + trace_str + '\n')
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
for metric_name, metric_value in metrics.items():
# Only log metrics and losses to pbar.
if 'metric' in metric_name or 'loss' in metric_name:
self.log_to_pbar(data={metric_name: metric_value})
def log_to_pbar(self, data: Dict[str, Any]):
# log to progress bar
current_pbar = self.eval_pbar if self.eval_pbar is not None else self.train_pbar
if current_pbar:
# Logging outside an epoch
current_pbar.log_data(data)
def _log_to_console(self, log_str: str):
"""Logs to the console, avoiding interleaving with a progress bar."""
current_pbar = self.eval_pbar if self.eval_pbar is not None else self.train_pbar
if current_pbar:
# use tqdm.write to avoid interleaving
current_pbar.pbar.write(log_str)
else:
# write directly to self.stream; no active progress bar
print(log_str, file=self.stream, flush=True)
def _build_pbar(self, state: State, is_train: bool) -> _ProgressBar:
"""Builds a pbar.
* If ``state.max_duration.unit`` is :attr:`.TimeUnit.EPOCH`, then a new progress bar will be created for each epoch.
Mid-epoch evaluation progress bars will be labeled with the batch and epoch number.
* Otherwise, one progress bar will be used for all of training. Evaluation progress bars will be labeled
with the time (in units of ``max_duration.unit``) at which evaluation runs.
"""
# Always using position=1 to avoid jumping progress bars
# In jupyter notebooks, no need for the dummy pbar, so use the default position
position = None if is_notebook() else 1
desc = f'{state.dataloader_label:15}'
max_duration_unit = None if state.max_duration is None else state.max_duration.unit
if max_duration_unit == TimeUnit.EPOCH or max_duration_unit is None:
total = int(state.dataloader_len) if state.dataloader_len is not None else None
timestamp_key = 'batch_in_epoch'
unit = TimeUnit.BATCH
n = state.timestamp.epoch.value
if self.train_pbar is None and not is_train:
# epochwise eval results refer to model from previous epoch (n-1)
n = n - 1 if n > 0 else 0
if self.train_pbar is None:
desc += f'Epoch {n:3}'
else:
# For evaluation mid-epoch, show the total batch count
desc += f'Batch {int(state.timestamp.batch):3}'
desc += ': '
else:
if is_train:
assert state.max_duration is not None, 'max_duration should be set if training'
unit = max_duration_unit
total = state.max_duration.value
# pad for the expected length of an eval pbar -- which is 14 characters (see the else logic below)
desc = desc.ljust(len(desc) + 14)
else:
unit = TimeUnit.BATCH
total = int(state.dataloader_len) if state.dataloader_len is not None else None
value = int(state.timestamp.get(max_duration_unit))
# Longest unit name is sample (6 characters)
desc += f'{max_duration_unit.name.capitalize():6} {value:5}: '
timestamp_key = unit.name.lower()
return _ProgressBar(
file=self.stream,
total=total,
position=position,
keys_to_log=_IS_TRAIN_TO_KEYS_TO_LOG[is_train],
# In a notebook, the `bar_format` should not include the {bar}, as otherwise
# it would appear twice.
bar_format=desc + ' {l_bar}' + ('' if is_notebook() else '{bar:25}') + '{r_bar}{bar:-1b}',
unit=unit.value.lower(),
metrics={},
timestamp_key=timestamp_key,
)
def init(self, state: State, logger: Logger) -> None:
del logger # unused
if not is_notebook():
# Notebooks don't need the dummy progress bar; otherwise, it would be visible.
self.dummy_pbar = _ProgressBar(
file=self.stream,
position=0,
total=1,
metrics={},
keys_to_log=[],
bar_format='{bar:-1b}',
timestamp_key='',
)
self.state = state
def fit_start(self, state: State, logger: Logger) -> None:
if not self.hparams_already_logged_to_console:
self._log_hparams_to_console()
def predict_start(self, state: State, logger: Logger) -> None:
if not self.hparams_already_logged_to_console:
self._log_hparams_to_console()
def epoch_start(self, state: State, logger: Logger) -> None:
if self.show_pbar and not self.train_pbar:
self.train_pbar = self._build_pbar(state=state, is_train=True)
def eval_start(self, state: State, logger: Logger) -> None:
if not self.hparams_already_logged_to_console:
self._log_hparams_to_console()
if self.show_pbar:
self.eval_pbar = self._build_pbar(state, is_train=False)
def batch_end(self, state: State, logger: Logger) -> None:
if self.train_pbar:
self.train_pbar.update_to_timestamp(state.timestamp)
def eval_batch_end(self, state: State, logger: Logger) -> None:
if self.eval_pbar:
self.eval_pbar.update_to_timestamp(state.eval_timestamp)
def epoch_end(self, state: State, logger: Logger) -> None:
# Only close progress bars at epoch end if the duration is in epochs, since
# a new pbar will be created for each epoch
# If the duration is in other units, then one progress bar will be used for all of training.
assert state.max_duration is not None, 'max_duration should be set'
if self.train_pbar and state.max_duration.unit == TimeUnit.EPOCH:
self.train_pbar.close()
self.train_pbar = None
def close(self, state: State, logger: Logger) -> None:
del state, logger # unused
# Close any open progress bars
if self.eval_pbar:
self.eval_pbar.close()
self.eval_pbar = None
if self.train_pbar:
self.train_pbar.close()
self.train_pbar = None
if self.dummy_pbar:
self.dummy_pbar.close()
self.dummy_pbar = None
def eval_end(self, state: State, logger: Logger) -> None:
if self.eval_pbar:
self.eval_pbar.close()
self.eval_pbar = None
def state_dict(self) -> Dict[str, Any]:
return {
'train_pbar': self.train_pbar.state_dict() if self.train_pbar else None,
'eval_pbar': self.eval_pbar.state_dict() if self.eval_pbar else None,
}
def load_state_dict(self, state: Dict[str, Any]) -> None:
if state['train_pbar']:
n = state['train_pbar'].pop('n')
train_pbar = self._ensure_backwards_compatibility(state['train_pbar'])
self.train_pbar = _ProgressBar(file=self.stream, **train_pbar)
self.train_pbar.update(n=n)
if state['eval_pbar']:
n = state['train_pbar'].pop('n')
eval_pbar = self._ensure_backwards_compatibility(state['eval_pbar'])
self.eval_pbar = _ProgressBar(file=self.stream, **eval_pbar)
self.eval_pbar.update(n=n)
def _ensure_backwards_compatibility(self, state: Dict[str, Any]) -> Dict[str, Any]:
# ensure backwards compatible with mosaicml<=v0.8.0 checkpoints
state.pop('epoch_style', None)
# old checkpoints do not have timestamp_key
if 'timestamp_key' not in state:
if 'unit' not in state:
raise ValueError('Either unit or timestamp_key must be in pbar state of checkpoint.')
unit = state['unit']
assert isinstance(unit, TimeUnit)
state['timestamp_key'] = unit.name.lower()
# new format expects unit as str, not TimeUnit
if 'unit' in state and isinstance(state['unit'], TimeUnit):
state['unit'] = state['unit'].value.lower()
return state
| composer-dev | composer/loggers/progress_bar_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Base classes, functions, and variables for logger."""
from __future__ import annotations
import collections.abc
import operator
import pathlib
from functools import reduce
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Union
import numpy as np
import torch
from composer.utils import ensure_tuple, format_name_with_dist
if TYPE_CHECKING:
from composer.core import State
from composer.loggers.logger_destination import LoggerDestination
__all__ = ['LoggerDestination', 'Logger', 'format_log_data_value']
class Logger:
"""An interface to record training data.
The :class:`.Trainer`, instances of :class:`.Callback`, and
instances of :class:`~composer.core.algorithm.Algorithm` invoke the logger to record data such as
the epoch, training loss, and custom metrics as provided by individual callbacks and algorithms.
This class does not store any data itself; instead, it routes all data to the ``destinations``.
Each destination (e.g. the :class:`~composer.loggers.file_logger.FileLogger`,
:class:`~composer.loggers.in_memory_logger.InMemoryLogger`) is responsible for storing the data itself
(e.g. writing it to a file or storing it in memory).
Args:
state (State): The training state.
destinations (LoggerDestination | Sequence[LoggerDestination], optional):
The logger destinations, to where logging data will be sent. (default: ``None``)
Attributes:
destinations (Sequence[LoggerDestination]):
A sequence of :class:`~.LoggerDestination` to where logging calls will be sent.
"""
def __init__(
self,
state: State,
destinations: Optional[Union[LoggerDestination, Sequence[LoggerDestination]]] = None,
):
self.destinations = ensure_tuple(destinations)
self._state = state
def log_traces(self, traces: Dict[str, Any]):
for destination in self.destinations:
destination.log_traces(traces)
def log_hyperparameters(self, parameters: Dict[str, Any]):
for destination in self.destinations:
destination.log_hyperparameters(parameters)
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
if step is None:
step = self._state.timestamp.batch.value
for destination in self.destinations:
destination.log_metrics(metrics, step)
def log_images(
self,
images: Union[np.ndarray, torch.Tensor, Sequence[Union[np.ndarray, torch.Tensor]]],
name: str = 'Images',
channels_last: bool = False,
step: Optional[int] = None,
masks: Optional[Dict[str, Union[np.ndarray, torch.Tensor, Sequence[Union[np.ndarray, torch.Tensor]]]]] = None,
mask_class_labels: Optional[Dict[int, str]] = None,
use_table: bool = True,
):
"""Log images. Logs any tensors or arrays as images.
Args:
images (np.ndarray | torch.Tensor | Sequence[np.ndarray | torch.Tensor]): Dictionary mapping
image(s)' names (str) to an image of array of images.
name (str): The name of the image(s). (Default: ``'Images'``)
channels_last (bool): Whether the channel dimension is first or last.
(Default: ``False``)
step (int, optional): The current step or batch of training at the
time of logging. Defaults to None. If not specified the specific
LoggerDestination implementation will choose a step (usually a running
counter).
masks (Dict[str, np.ndarray | torch.Tensor | Sequence[np.ndarray | torch.Tensor]], optional): A dictionary
mapping the mask name (e.g. predictions or ground truth) to a sequence of masks.
mask_class_labels (Dict[int, str], optional): Dictionary mapping label id to its name. Used for labelling
each color in the mask.
use_table (bool): Whether to make a table of the images or not. (default: ``True``). Only for use
with WandB.
"""
if step is None:
step = self._state.timestamp.batch.value
for destination in self.destinations:
destination.log_images(images, name, channels_last, step, masks, mask_class_labels, use_table)
def upload_file(
self,
remote_file_name: str,
file_path: Union[pathlib.Path, str],
*,
overwrite: bool = False,
):
"""Upload ``file_path`` as a file named ``remote_file_name``.
Both ``file_path`` and ``remote_file_name`` can be specified as format strings.
See :func:`~.composer.utils.file_helpers.format_name_with_dist` for more information.
.. seealso:: :doc:`Uploading Files</trainer/file_uploading>` for notes for file uploading.
Args:
remote_file_name (str): A format string for the name of the file.
file_path (str | pathlib.Path): A format string for the file path.
overwrite (bool, optional): Whether to overwrite an existing file with the same ``remote_file_name``.
(default: ``False``)
"""
file_path = format_name_with_dist(format_str=str(file_path), run_name=self._state.run_name)
file_path = pathlib.Path(file_path)
for destination in self.destinations:
destination.upload_file(
state=self._state,
remote_file_name=format_name_with_dist(format_str=remote_file_name, run_name=self._state.run_name),
file_path=file_path,
overwrite=overwrite,
)
def has_file_upload_destination(self) -> bool:
"""Determines if the logger has a destination which supports uploading files.
Needed for checking if a model can be exported via this logger.
Returns:
bool: Whether any of the destinations support uploading files.
"""
for destination in self.destinations:
if destination.can_upload_files():
return True
return False
def format_log_data_value(data: Any) -> str:
"""Recursively formats a given log data value into a string.
Args:
data: Data to format.
Returns:
str: ``data`` as a string.
"""
if data is None:
return 'None'
if isinstance(data, str):
return f"\"{data}\""
if isinstance(data, int):
return str(data)
if isinstance(data, float):
return f'{data:.4f}'
if isinstance(data, torch.Tensor):
if data.shape == () or reduce(operator.mul, data.shape, 1) == 1:
return format_log_data_value(data.cpu().item())
return 'Tensor of shape ' + str(data.shape)
if isinstance(data, collections.abc.Mapping):
output = ['{ ']
for k, v in data.items():
assert isinstance(k, str)
v = format_log_data_value(v)
output.append(f"\"{k}\": {v}, ")
output.append('}')
return ''.join(output)
if isinstance(data, collections.abc.Iterable):
return '[' + ', '.join(format_log_data_value(v) for v in data) + ']'
# Unknown format catch-all
return str(data)
| composer-dev | composer/loggers/logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Log files to an object store."""
from __future__ import annotations
import logging
import multiprocessing
import os
import pathlib
import queue
import shutil
import tempfile
import threading
import time
import uuid
import warnings
from multiprocessing.context import SpawnProcess
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
from urllib.parse import urlparse
from composer.loggers.logger import Logger
from composer.loggers.logger_destination import LoggerDestination
from composer.utils import (LibcloudObjectStore, ObjectStore, ObjectStoreTransientError, OCIObjectStore, S3ObjectStore,
SFTPObjectStore, dist, format_name_with_dist, get_file, retry)
if TYPE_CHECKING:
from composer.core import State
log = logging.getLogger(__name__)
__all__ = ['RemoteUploaderDownloader']
def _build_remote_backend(remote_backend_name: str, backend_kwargs: Dict[str, Any]):
remote_backend_name_to_cls = {
's3': S3ObjectStore,
'oci': OCIObjectStore,
'sftp': SFTPObjectStore,
'libcloud': LibcloudObjectStore
}
remote_backend_cls = remote_backend_name_to_cls.get(remote_backend_name, None)
if remote_backend_cls is None:
raise ValueError(
f'The remote backend {remote_backend_name} is not supported. Please use one of ({list(remote_backend_name_to_cls.keys())})'
)
return remote_backend_cls(**backend_kwargs)
class RemoteUploaderDownloader(LoggerDestination):
r"""Logger destination that uploads (downloads) files to (from) a remote backend.
This logger destination handles calls to :meth:`.Logger.upload_file`
and uploads files to :class:`.ObjectStore`, such as AWS S3 or Google Cloud Storage. To minimize the training
loop performance hit, it supports background uploads.
.. testcode:: composer.loggers.remote_uploader_downloader.RemoteUploaderDownloader.__init__
from composer.loggers import RemoteUploaderDownloader
from composer.utils import LibcloudObjectStore
remote_uploader_downloader = RemoteUploaderDownloader(
bucket_uri="s3://my-bucket",
)
# Construct the trainer using this logger
trainer = Trainer(
...,
loggers=[remote_uploader_downloader],
)
or
.. testcode:: composer.loggers.remote_uploader_downloader.RemoteUploaderDownloader.__init__
from composer.loggers import RemoteUploaderDownloader
from composer.utils import LibcloudObjectStore
remote_uploader_downloader = RemoteUploaderDownloader(
bucket_uri="libcloud://my-bucket",
backend_kwargs={
'provider': 's3',
'container': 'my-bucket',
'provider_kwargs=': {
'key': 'AKIA...',
'secret': '*********',
'region': 'ap-northeast-1',
},
},
)
# Construct the trainer using this logger
trainer = Trainer(
...,
loggers=[remote_uploader_downloader],
)
or
.. testcode:: composer.loggers.remote_uploader_downloader.RemoteUploaderDownloader.__init__
from composer.loggers import RemoteUploaderDownloader
from composer.trainer import Trainer
remote_uploader_downloader = RemoteUploaderDownloader(
bucket_uri="libcloud://my-gcs-bucket",
backend_kwargs={
"provider": "google_storage",
"container": "my-gcs-bucket",
"key_environ": "MY_HMAC_ACCESS_ID", # Name of env variable for HMAC access id.
"secret_environ": "MY_HMAC_SECRET", # Name of env variable for HMAC secret.
},
)
# Construct the trainer using this logger
trainer = Trainer(
...,
loggers=[remote_uploader_downloader],
)
.. note::
This callback blocks the training loop to upload each file, as
the uploading happens in the background. Here are some additional tips for minimizing the performance impact:
* Set ``use_procs=True`` (the default) to use background processes, instead of threads, to perform the file
uploads. Processes are recommended to ensure that the GIL is not blocking the training loop when
performing CPU operations on uploaded files (e.g. computing and comparing checksums). Network I/O happens
always occurs in the background.
* Provide a RAM disk path for the ``upload_staging_folder`` parameter. Copying files to stage on RAM will be
faster than writing to disk. However, there must have sufficient excess RAM, or :exc:`MemoryError`\s may
be raised.
Args:
bucket_uri (str): The remote uri for the bucket to use (e.g. s3://my-bucket).
As individual :class:`.ObjectStore` instances are not necessarily thread safe, each worker will construct
its own :class:`.ObjectStore` instance from ``remote_backend`` and ``backend_kwargs``.
backend_kwargs (Dict[str, Any]): The keyword arguments to construct the remote backend indicated by ``bucket_uri``.
As individual :class:`.ObjectStore` instances are not necessarily thread safe, each worker will construct
its own :class:`.ObjectStore` instance from ``remote_backend`` and ``backend_kwargs``.
file_path_format_string (str, optional): A format string used to determine the remote file path (within the specified bucket).
The following format variables are available:
+------------------------+-------------------------------------------------------+
| Variable | Description |
+========================+=======================================================+
| ``{remote_file_name}`` | The name of the file being logged. |
+------------------------+-------------------------------------------------------+
| ``{run_name}`` | The name of the training run. See |
| | :attr:`.State.run_name`. |
+------------------------+-------------------------------------------------------+
| ``{rank}`` | The global rank, as returned by |
| | :func:`~composer.utils.dist.get_global_rank`. |
+------------------------+-------------------------------------------------------+
| ``{local_rank}`` | The local rank of the process, as returned by |
| | :func:`~composer.utils.dist.get_local_rank`. |
+------------------------+-------------------------------------------------------+
| ``{world_size}`` | The world size, as returned by |
| | :func:`~composer.utils.dist.get_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{local_world_size}`` | The local world size, as returned by |
| | :func:`~composer.utils.dist.get_local_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{node_rank}`` | The node rank, as returned by |
| | :func:`~composer.utils.dist.get_node_rank`. |
+------------------------+-------------------------------------------------------+
Leading slashes (``'/'``) will be stripped.
Consider the following example, which subfolders the remote files by their rank:
.. testsetup:: composer.loggers.remote_uploader_downloader.RemoteUploaderDownloader.__init__.file_path_format_string
import os
os.makedirs('path/to', exist_ok=True)
with open('path/to/file.txt', 'w+') as f:
f.write('hi')
.. doctest:: composer.loggers.remote_uploader_downloader.RemoteUploaderDownloader.__init__.file_path_format_string
>>> remote_uploader_downloader = RemoteUploaderDownloader(..., file_path_format_string='rank_{rank}/{remote_file_name}')
>>> trainer = Trainer(..., save_latest_filename=None, run_name='foo', loggers=[remote_uploader_downloader])
>>> trainer.logger.upload_file(
... remote_file_name='bar.txt',
... file_path='path/to/file.txt',
... )
.. testcleanup:: composer.loggers.remote_uploader_downloader.RemoteUploaderDownloader.__init__.file_path_format_string
# Shut down the uploader
remote_uploader_downloader._check_workers()
remote_uploader_downloader.post_close()
Assuming that the process's rank is ``0``, the remote backend would store the contents of
``'path/to/file.txt'`` in at ``'rank0/bar.txt'``.
Default: ``'{remote_file_name}'``
num_concurrent_uploads (int, optional): Maximum number of concurrent uploads. Defaults to 1.
upload_staging_folder (str, optional): A folder to use for staging uploads.
If not specified, defaults to using a :func:`~tempfile.TemporaryDirectory`.
use_procs (bool, optional): Whether to perform file uploads in background processes (as opposed to threads).
Defaults to True.
num_attempts (int, optional): For operations that fail with a transient error, the number of attempts to make.
Defaults to 3.
"""
def __init__(self,
bucket_uri: str,
backend_kwargs: Optional[Dict[str, Any]] = None,
file_path_format_string: str = '{remote_file_name}',
num_concurrent_uploads: int = 1,
upload_staging_folder: Optional[str] = None,
use_procs: bool = True,
num_attempts: int = 3) -> None:
parsed_remote_bucket = urlparse(bucket_uri)
self.remote_backend_name, self.remote_bucket_name = parsed_remote_bucket.scheme, parsed_remote_bucket.netloc
self.backend_kwargs = backend_kwargs if backend_kwargs is not None else {}
if self.remote_backend_name in ['s3', 'oci'] and 'bucket' not in self.backend_kwargs:
self.backend_kwargs['bucket'] = self.remote_bucket_name
elif self.remote_backend_name == 'sftp' and 'host' not in self.backend_kwargs:
self.backend_kwargs['host'] = f'sftp://{self.remote_bucket_name}'
elif self.remote_backend_name == 'libcloud' and 'container' not in self.backend_kwargs:
self.backend_kwargs['container'] = self.remote_bucket_name
self.file_path_format_string = file_path_format_string
self.num_attempts = num_attempts
self._run_name = None
if upload_staging_folder is None:
self._tempdir = tempfile.TemporaryDirectory()
self._upload_staging_folder = self._tempdir.name
else:
self._tempdir = None
self._upload_staging_folder = upload_staging_folder
if num_concurrent_uploads < 1:
raise ValueError('num_concurrent_uploads must be >= 1. Blocking uploads are not supported.')
self._num_concurrent_uploads = num_concurrent_uploads
# There could be multiple upload workers uploading to the same object
# If multiple workers are uploading to the same object simultaneously (e.g. the checkpoint latest symlink file), then
# The object store might keep the earlier file rather than the latter file as the "latest" version
# To work around this, each object name can appear at most once in `self._file_upload_queue`
# The main separately keeps track of {file_path_format_string: tempfile_path} for each API call to self.upload_file
# and then periodically transfers items from this dictionary onto the file upload queue
# Lock for modifying `logged_objects` or `enqueued_objects`
# These objects are used by threads on the main process only
self._object_lock = threading.Lock()
# Files that were logged but yet to be enqueued. Mapping of the object name to the (tempfile path, overwrite) for that object
self._logged_objects: Dict[str, Tuple[str, bool]] = {}
# Set of enqueued objects. This should keep track of everything in self._file_upload_queue with O(1) lookup
self._enqueued_objects: Set[str] = set()
# Thread that runs `self._enqueue_uploads`
self._enqueue_thread = None
# Event to signal the enqueue thread to shut down.
self._enqueue_thread_flag = None
if use_procs:
mp_ctx = multiprocessing.get_context('spawn')
self._file_upload_queue: Union[queue.Queue[Tuple[str, str, bool]],
multiprocessing.JoinableQueue[Tuple[str, str,
bool]],] = mp_ctx.JoinableQueue()
self._completed_queue: Union[queue.Queue[str], multiprocessing.JoinableQueue[str],] = mp_ctx.JoinableQueue()
self._exception_queue: Union[queue.Queue[Exception],
multiprocessing.JoinableQueue[Exception],] = mp_ctx.JoinableQueue()
self._finished_cls: Union[Callable[[], multiprocessing._EventType], Type[threading.Event]] = mp_ctx.Event
self._proc_class = mp_ctx.Process
else:
self._file_upload_queue = queue.Queue()
self._completed_queue = queue.Queue()
self._exception_queue = queue.Queue()
self._finished_cls = threading.Event
self._proc_class = threading.Thread
self._worker_flag: Optional[Union[multiprocessing._EventType, threading.Event]] = None
self._workers: List[Union[SpawnProcess, threading.Thread]] = []
# the object store instance for the main thread. Deferring the construction of the object_store to first use.
self._remote_backend = None
@property
def remote_backend(self) -> ObjectStore:
"""The :class:`.ObjectStore` instance for the main thread."""
if self._remote_backend is None:
self._remote_backend = _build_remote_backend(self.remote_backend_name, self.backend_kwargs)
return self._remote_backend
def init(self, state: State, logger: Logger) -> None:
del logger # unused
if self._worker_flag is not None:
raise RuntimeError('The RemoteUploaderDownloader is already initialized.')
self._worker_flag = self._finished_cls()
self._run_name = state.run_name
file_name_to_test = self._remote_file_name('.credentials_validated_successfully')
# Create the enqueue thread
self._enqueue_thread_flag = self._finished_cls()
self._enqueue_thread = threading.Thread(target=self._enqueue_uploads, daemon=True)
self._enqueue_thread.start()
if dist.get_global_rank() == 0:
retry(ObjectStoreTransientError,
self.num_attempts)(lambda: _validate_credentials(self.remote_backend, file_name_to_test))()
assert len(self._workers) == 0, 'workers should be empty if self._worker_flag was None'
for _ in range(self._num_concurrent_uploads):
worker = self._proc_class(
target=_upload_worker,
kwargs={
'file_queue': self._file_upload_queue,
'is_finished': self._worker_flag,
'remote_backend_name': self.remote_backend_name,
'backend_kwargs': self.backend_kwargs,
'num_attempts': self.num_attempts,
'completed_queue': self._completed_queue,
'exception_queue': self._exception_queue,
},
# The worker threads are joined in the shutdown procedure, so it is OK to set the daemon status
# Setting daemon status prevents the process from hanging if close was never called (e.g. in doctests)
daemon=True,
)
worker.start()
self._workers.append(worker)
def batch_end(self, state: State, logger: Logger) -> None:
del state, logger # unused
self._check_workers()
def epoch_end(self, state: State, logger: Logger) -> None:
del state, logger # unused
self._check_workers()
@property
def _all_workers_alive(self):
"""Whether all workers are alive."""
return all(worker.is_alive() for worker in self._workers)
def _check_workers(self):
# Periodically check to see if any of the upload workers crashed
# They would crash if:
# a) A file could not be uploaded, and the retry counter failed, or
# b) allow_overwrite=False, but the file already exists,
if not self._all_workers_alive:
if not self._exception_queue.empty():
exception = self._exception_queue.get_nowait()
raise exception
else:
raise RuntimeError('Upload worker crashed. Please check the logs.')
def upload_file(
self,
state: State,
remote_file_name: str,
file_path: pathlib.Path,
*,
overwrite: bool,
):
copied_path = os.path.join(self._upload_staging_folder, str(uuid.uuid4()))
os.makedirs(self._upload_staging_folder, exist_ok=True)
shutil.copy2(file_path, copied_path)
formatted_remote_file_name = self._remote_file_name(remote_file_name)
with self._object_lock:
if formatted_remote_file_name in self._logged_objects and not overwrite:
raise FileExistsError(
f'Object {formatted_remote_file_name} was already enqueued to be uploaded, but overwrite=False.')
self._logged_objects[formatted_remote_file_name] = (copied_path, overwrite)
def can_upload_files(self) -> bool:
"""Whether the logger supports uploading files."""
return True
def _enqueue_uploads(self):
"""Worker thread to enqueue uploads.
This thread does two things:
1. It enqueues objects from ``self._logged_objects`` onto ``self._file_upload_queue``.
2. It keeps ``self._enqueued_objects`` in sync with ``self._file_upload_queue`` by listening to ``self._completed_uploads``.
"""
assert self._enqueue_thread_flag is not None
while True:
with self._object_lock:
# Remove all objects from self._enqueued_objects that have been successfully uploaded
while True:
try:
object_name = self._completed_queue.get_nowait()
except queue.Empty:
break
self._enqueued_objects.remove(object_name)
self._completed_queue.task_done()
# Enqueue all objects that are in self._logged_objects but not in self._file_upload_queue
objects_to_delete = []
for object_name, (copied_path, overwrite) in self._logged_objects.items():
if object_name in self._enqueued_objects:
continue
self._file_upload_queue.put_nowait((copied_path, object_name, overwrite))
objects_to_delete.append(object_name)
self._enqueued_objects.add(object_name)
for object_name in objects_to_delete:
del self._logged_objects[object_name]
# Shutdown if the enqueue thread flag is set, which means that no more objects will be added to
# self._logged_objects
if self._enqueue_thread_flag.is_set():
if self._all_workers_alive:
if len(self._logged_objects) == 0:
# If finished (i.e. no more objects to be added to self._logged_objects) and all logged objects are
# enqueued, then break
break
else:
# If any worker died, then it's impossible to recover since the file was already popped off of the queue,
# so break. Some files may not be uploaded.
break
time.sleep(0.2) # Yield lock for `self.upload_file`
def download_file(
self,
remote_file_name: str,
destination: str,
overwrite: bool = False,
progress_bar: bool = True,
):
get_file(path=remote_file_name,
destination=destination,
object_store=self.remote_backend,
overwrite=overwrite,
progress_bar=progress_bar)
def fit_end(self, state: State, logger: Logger):
self.wait_for_workers()
def eval_end(self, state: State, logger: Logger):
self.wait_for_workers()
def predict_end(self, state: State, logger: Logger):
self.wait_for_workers()
def wait_for_workers(self):
"""Wait for all tasks to be completed.
This is called after fit/eval/predict. If we don't wait, then a worker might not schedule
an upload before the interpreter is shutdown and garbage collection begins. While
post_close logic ensures existing uploads are completed, trying to schedule new uploads
during this time will error.
"""
# Verify enqueue thread has processed all tasks unless a worker threw an exception
while self._exception_queue.empty():
with self._object_lock:
if len(self._logged_objects) == 0:
break
time.sleep(0.2) # Yield lock for enqueue thread
# Verify all tasks have been completed unless a worker threw an exception
while not self._file_upload_queue.empty() and self._exception_queue.empty():
time.sleep(0.2)
if not self._exception_queue.empty():
e = self._exception_queue.get_nowait()
raise e
def post_close(self):
# Shutdown logic:
# 1. Signal to the enqueue thread that all uploads are enqueued. Specifically.
# set a flag indicating that that no more objects will be added to self._logged_objects.
# 2. Wait for the enqueue thread to shut down. It will only shut down once all objects are added to
# self._file_upload_queue. This will mean that self._logged_objects is empty.
# 3. Send a flag to the workers that all uploads are enqueued in self._file_upload_queue.
# 4. Wait for the workers to shut down. This means that all files have been uploaded
if self._enqueue_thread_flag is not None:
self._enqueue_thread_flag.set()
if self._enqueue_thread is not None:
self._enqueue_thread.join()
if self._worker_flag is not None:
self._worker_flag.set()
# Then, ensure all workers have finished all uploads
for worker in self._workers:
worker.join()
# Clean up the tempdir
if self._tempdir is not None:
self._tempdir.cleanup()
# Empty the completed queue
# This cleanup will not be done by the enqueue_thread anymore, as that thread has been shut down
while True:
try:
object_name = self._completed_queue.get_nowait()
except queue.Empty:
break
self._enqueued_objects.remove(object_name)
self._completed_queue.task_done()
if len(self._enqueued_objects) > 0 or len(self._logged_objects) > 0:
# Warn on all objects that have not been uploaded
object_names = list(self._enqueued_objects)
object_names.extend(self._logged_objects.keys())
warnings.warn(
RuntimeWarning('The following objects may not have been uploaded, likely due to a worker crash: ' +
', '.join(self._enqueued_objects)))
# Reset all variables
self._logged_objects.clear()
self._enqueued_objects.clear()
self._enqueue_thread = None
self._tempdir = None
self._worker_flag = None
self._enqueue_thread_flag = None
self._workers.clear()
def get_uri_for_file(self, remote_file_name: str) -> str:
"""Get the object store provider uri for a remote file.
Args:
remote_file_name (str): The name of a remote file.
Returns:
str: The uri corresponding to the uploaded location of the remote file.
"""
formatted_remote_file_name = self._remote_file_name(remote_file_name)
return self.remote_backend.get_uri(formatted_remote_file_name.lstrip('/'))
def _remote_file_name(self, remote_file_name: str):
"""Format the ``remote_file_name`` according to the ``file_path_format_string``."""
if self._run_name is None:
raise RuntimeError('The run name is not set. It should have been set on Event.INIT.')
key_name = format_name_with_dist(
self.file_path_format_string,
run_name=self._run_name,
remote_file_name=remote_file_name,
)
key_name = key_name.lstrip('/')
return key_name
def _validate_credentials(
remote_backend: ObjectStore,
remote_file_name_to_test: str,
) -> None:
# Validates the credentials by attempting to touch a file in the bucket
# raises an error if there was a credentials failure.
with tempfile.NamedTemporaryFile('wb') as f:
f.write(b'credentials_validated_successfully')
remote_backend.upload_object(
object_name=remote_file_name_to_test,
filename=f.name,
)
def _upload_worker(
file_queue: Union[queue.Queue[Tuple[str, str, bool]], multiprocessing.JoinableQueue[Tuple[str, str, bool]]],
completed_queue: Union[queue.Queue[str], multiprocessing.JoinableQueue[str]],
exception_queue: Union[queue.Queue[Exception], multiprocessing.JoinableQueue[Exception]],
is_finished: Union[multiprocessing._EventType, threading.Event],
remote_backend_name: str,
backend_kwargs: Dict[str, Any],
num_attempts: int,
):
"""A long-running function to handle uploading files to the object store.
The worker will continuously poll ``file_queue`` for files to upload. Once ``is_finished`` is set, the worker will
exit once ``file_queue`` is empty.
"""
remote_backend = _build_remote_backend(remote_backend_name, backend_kwargs)
while True:
try:
file_path_to_upload, remote_file_name, overwrite = file_queue.get(block=True, timeout=0.5)
except queue.Empty:
if is_finished.is_set():
break
else:
continue
uri = remote_backend.get_uri(remote_file_name)
# defining as a function-in-function to use decorator notation with num_attempts as an argument
@retry(ObjectStoreTransientError, num_attempts=num_attempts)
def upload_file():
if not overwrite:
try:
remote_backend.get_object_size(remote_file_name)
except FileNotFoundError:
# Good! It shouldn't exist.
pass
else:
# Exceptions will be detected on the next batch_end or epoch_end event
e = FileExistsError(f'Object {uri} already exists, but allow_overwrite was set to False.')
exception_queue.put_nowait(e)
raise e
log.info('Uploading file %s to %s', file_path_to_upload, uri)
try:
remote_backend.upload_object(
object_name=remote_file_name,
filename=file_path_to_upload,
)
except Exception as e:
exception_queue.put_nowait(e)
raise e
os.remove(file_path_to_upload)
file_queue.task_done()
completed_queue.put_nowait(remote_file_name)
upload_file()
| composer-dev | composer/loggers/remote_uploader_downloader.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Logs to a file."""
from __future__ import annotations
import os
import queue
import sys
import textwrap
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, TextIO
from composer.loggers.logger import Logger, format_log_data_value
from composer.loggers.logger_destination import LoggerDestination
from composer.utils import FORMAT_NAME_WITH_DIST_TABLE, format_name_with_dist
if TYPE_CHECKING:
from composer.core import State
__all__ = ['FileLogger']
class FileLogger(LoggerDestination): # noqa: D101
__doc__ = f"""Log data to a file.
Example usage:
.. testcode::
from composer.loggers import FileLogger
from composer.trainer import Trainer
file_logger = FileLogger(
filename="{{run_name}}/logs-rank{{rank}}.txt",
buffer_size=1,
flush_interval=50
)
trainer = Trainer(
...,
loggers=[file_logger]
)
.. testcleanup::
import os
trainer.engine.close()
path = os.path.join(trainer.state.run_name, "logs-rank0.txt")
try:
os.remove(file_logger.filename)
except FileNotFoundError as e:
pass
Example output::
[FIT][step=2]: {{ "logged_metric": "logged_value", }}
[EPOCH][step=2]: {{ "logged_metric": "logged_value", }}
[BATCH][step=2]: {{ "logged_metric": "logged_value", }}
[EPOCH][step=3]: {{ "logged_metric": "logged_value", }}
Args:
filename (str, optional): Format string for the filename.
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_TABLE, prefix=' ')}
.. note::
When training with multiple devices (i.e. GPUs), ensure that ``'{{rank}}'`` appears in the format.
Otherwise, multiple processes may attempt to write to the same file.
Consider the following example when using default value of '{{run_name}}/logs-rank{{rank}}.txt':
>>> file_logger = FileLogger(filename='{{run_name}}/logs-rank{{rank}}.txt')
>>> trainer = Trainer(loggers=[file_logger], run_name='my-awesome-run')
>>> file_logger.filename
'my-awesome-run/logs-rank0.txt'
Default: `'{{run_name}}/logs-rank{{rank}}.txt'`
remote_file_name (str, optional): Format string for the logfile's name.
The logfile will be periodically logged (according to the ``flush_interval``) as a file.
The file name will be determined by this format string.
.. seealso:: :doc:`Uploading Files</trainer/file_uploading>` for notes for file uploading.
The same format variables for ``filename`` are available. Setting this parameter to ``None``
(the default) will use the same format string as ``filename``. It is sometimes helpful to deviate
from this default. For example, when ``filename`` contains an absolute path, it is recommended to
set this parameter explicitely, so the absolute path does not appear in any remote file stores.
Leading slashes (``'/'``) will be stripped.
Default: ``None`` (which uses the same format string as ``filename``)
capture_stdout (bool, optional): Whether to include the ``stdout``in ``filename``. (default: ``True``)
capture_stderr (bool, optional): Whether to include the ``stderr``in ``filename``. (default: ``True``)
buffer_size (int, optional): Buffer size. See :py:func:`open`.
Default: ``1`` for line buffering.
log_traces (bool, optional): Whether to log algorithm traces. See :class:`~.Engine` for more detail.
flush_interval (int, optional): How frequently to flush the log to the file in batches
Default: ``100``.
overwrite (bool, optional): Whether to overwrite an existing logfile. (default: ``False``)
"""
def __init__(
self,
filename: str = '{run_name}/logs-rank{rank}.txt',
remote_file_name: Optional[str] = None,
*,
capture_stdout: bool = True,
capture_stderr: bool = True,
buffer_size: int = 1,
log_traces: bool = True,
flush_interval: int = 100,
overwrite: bool = False,
) -> None:
self.filename_format = filename
if remote_file_name is None:
remote_file_name = filename.replace(os.path.sep, '/')
self.remote_file_name_format = remote_file_name
self.buffer_size = buffer_size
self.should_log_traces = log_traces
self.flush_interval = flush_interval
self.is_batch_interval = False
self.is_epoch_interval = False
self.file: Optional[TextIO] = None
self.overwrite = overwrite,
self._queue: queue.Queue[str] = queue.Queue()
self._run_name = None
# Track whether the next line is on a newline
# (and if so, then the prefix should be appended)
self._is_newline = True
self._closed = False
if capture_stdout:
sys.stdout.write = self._get_new_writer('[stdout]: ', sys.stdout.write)
if capture_stderr:
sys.stderr.write = self._get_new_writer('[stderr]: ', sys.stderr.write)
def _get_new_writer(self, prefix: str, original_writer: Callable[[str], int]):
"""Returns a writer that intercepts calls to the ``original_writer``."""
def new_write(s: str) -> int:
if not self._closed:
self.write(prefix, s)
return original_writer(s)
return new_write
@property
def filename(self) -> str:
"""The filename for the logfile."""
if self._run_name is None:
raise RuntimeError('The run name is not set. The engine should have been set on Event.INIT')
name = format_name_with_dist(self.filename_format, run_name=self._run_name)
return name
@property
def remote_file_name(self) -> str:
"""The remote file name for the logfile."""
if self._run_name is None:
raise RuntimeError('The run name is not set. The engine should have been set on Event.INIT')
name = format_name_with_dist(self.remote_file_name_format, run_name=self._run_name)
name.lstrip('/')
return name
def epoch_start(self, state: State, logger: Logger) -> None:
# Flush any log calls that occurred during INIT or FIT_START
self._flush_file(logger)
def log_traces(self, traces: Dict[str, Any]):
if self.should_log_traces:
for trace_name, trace in traces.items():
trace_str = format_log_data_value(trace)
self.write(
f'[trace]: {trace_name}:',
trace_str + '\n',
)
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
for metric_name, metric in metrics.items():
metric_str = format_log_data_value(metric)
self.write(
f'[metric][batch={step}]: ',
f'{metric_name}: {metric_str} \n',
)
def log_hyperparameters(self, hyperparameters: Dict[str, Any]):
for hparam_name, hparam_value in hyperparameters.items():
hparam_str = format_log_data_value(hparam_value)
self.write(
f'[hyperparameter]: ',
f'{hparam_name}: {hparam_str} \n',
)
def init(self, state: State, logger: Logger) -> None:
del logger # unused
self._is_newline = True
self._run_name = state.run_name
if self.file is not None:
raise RuntimeError('The file logger is already initialized')
file_dirname = os.path.dirname(self.filename)
if file_dirname:
os.makedirs(file_dirname, exist_ok=True)
mode = 'w+' if self.overwrite else 'x+'
self.file = open(self.filename, mode, buffering=self.buffer_size)
self._flush_queue()
def batch_end(self, state: State, logger: Logger) -> None:
assert self.file is not None
if int(state.timestamp.batch) % self.flush_interval == 0:
self._flush_file(logger)
def eval_start(self, state: State, logger: Logger) -> None:
# Flush any log calls that occurred during INIT when using the trainer in eval-only mode
self._flush_file(logger)
def epoch_end(self, state: State, logger: Logger) -> None:
if int(state.timestamp.epoch) % self.flush_interval == 0:
self._flush_file(logger)
def write(self, prefix: str, s: str):
"""Write to the logfile.
.. note::
If the ``write`` occurs before the :attr:`.Event.INIT` event,
the write will be enqueued, as the file is not yet open.
Args:
prefix (str): A prefix for each line in the logfile.
s (str): The string to write. Each line will be prefixed with ``prefix``.
"""
formatted_lines = []
for line in s.splitlines(True):
if self._is_newline:
# Only print the prefix if it is a newline
# and the line is not empty
if line == os.linesep:
formatted_lines.append(line)
else:
formatted_lines.append(f'{prefix}{line}')
self._is_newline = False
else:
# Otherwise, append the line without the prefix
formatted_lines.append(line)
if line.endswith(os.linesep):
# if the line ends with newline, record that the next
# line should start with the prefix
self._is_newline = True
formatted_s = ''.join(formatted_lines)
if self.file is None:
self._queue.put_nowait(formatted_s)
else:
# Flush the queue, so all prints will be in order
self._flush_queue()
# Then, write to the file
print(formatted_s, file=self.file, flush=False, end='')
def _flush_queue(self):
while True:
try:
s = self._queue.get_nowait()
except queue.Empty:
break
print(s, file=self.file, flush=False, end='')
def _flush_file(self, logger: Logger) -> None:
assert self.file is not None
self._flush_queue()
self.file.flush()
os.fsync(self.file.fileno())
logger.upload_file(self.remote_file_name, self.file.name, overwrite=True)
def fit_end(self, state: State, logger: Logger) -> None:
# Flush the file on fit_end, in case if was not flushed on epoch_end and the trainer is re-used
# (which would defer when `self.close()` would be invoked)
self._flush_file(logger)
def close(self, state: State, logger: Logger) -> None:
del state # unused
self._closed = True # Stop intercepting calls to stdout/stderr
if self.file is not None:
self._flush_file(logger)
self.file.close()
self.file = None
| composer-dev | composer/loggers/file_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Logs metrics to the console and without a progress bar."""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, TextIO, Union
import numpy as np
import yaml
from composer.core.time import Time, TimeUnit
from composer.loggers.logger import Logger, format_log_data_value
from composer.loggers.logger_destination import LoggerDestination
from composer.utils import dist
if TYPE_CHECKING:
from composer.core import State
# We use deciles here, so 11 events because deciles including 0.
NUM_EVAL_LOGGING_EVENTS = 11
class ConsoleLogger(LoggerDestination):
"""Log metrics to the console.
.. note::
This logger is automatically instantiated by the trainer via the ``log_to_console``,
and ``console_stream`` options. This logger does not need to be created manually.
Args:
log_interval (int | str | Time): How frequently to log to console. (default: ``'1ep'``)
stream (str | TextIO, optional): The console stream to use. If a string, it can either be ``'stdout'`` or
``'stderr'``. (default: :attr:`sys.stderr`)
log_traces (bool): Whether to log traces or not. (default: ``False``)
"""
def __init__(self,
log_interval: Union[int, str, Time] = '1ba',
stream: Union[str, TextIO] = sys.stderr,
log_traces: bool = False) -> None:
if isinstance(log_interval, int):
log_interval = Time(log_interval, TimeUnit.EPOCH)
if isinstance(log_interval, str):
log_interval = Time.from_timestring(log_interval)
if log_interval.unit not in (TimeUnit.EPOCH, TimeUnit.BATCH):
raise ValueError('The `console_log_interval` argument must have units of EPOCH or BATCH.')
self.log_interval = log_interval
# set the stream
if isinstance(stream, str):
if stream.lower() == 'stdout':
stream = sys.stdout
elif stream.lower() == 'stderr':
stream = sys.stderr
else:
raise ValueError(f'stream must be one of ("stdout", "stderr", TextIO-like), got {stream}')
self.should_log_traces = log_traces
self.stream = stream
self.hparams: Dict[str, Any] = {}
self.hparams_already_logged_to_console: bool = False
self.logged_metrics: Dict[str, float] = {}
self.eval_batch_idxs_to_log: Sequence[int] = []
def log_traces(self, traces: Dict[str, Any]):
if self.should_log_traces:
for trace_name, trace in traces.items():
trace_str = format_log_data_value(trace)
self._log_to_console(f'[trace]: {trace_name}:' + trace_str + '\n')
def log_hyperparameters(self, hyperparameters: Dict[str, Any]):
# Lazy logging of hyperparameters.
self.hparams.update(hyperparameters)
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
del step
# Lazy logging of metrics.
# Stores all metrics logged until they are cleared with a log_to_console call
self.logged_metrics.update(metrics)
def _log_hparams_to_console(self):
if dist.get_local_rank() == 0:
self._log_to_console('*' * 30)
self._log_to_console('Config:')
self._log_to_console(yaml.dump(self.hparams))
self._log_to_console('*' * 30)
def epoch_end(self, state: State, logger: Logger) -> None:
cur_epoch = int(state.timestamp.epoch) # epoch gets incremented right before EPOCH_END
unit = self.log_interval.unit
if unit == TimeUnit.EPOCH and (cur_epoch % int(self.log_interval) == 0 or cur_epoch == 1):
self.log_to_console(self.logged_metrics, prefix='Train ', state=state)
# Always clear logged metrics so they don't get logged in a subsequent eval call. The
# metrics will be recomputed and overridden in future batches so they can be safely
# discarded.
self.logged_metrics = {}
def batch_end(self, state: State, logger: Logger) -> None:
cur_batch = int(state.timestamp.batch)
unit = self.log_interval.unit
if unit == TimeUnit.BATCH and (cur_batch % int(self.log_interval) == 0 or cur_batch == 1):
self.log_to_console(self.logged_metrics, prefix='Train ', state=state)
# Clear logged metrics.
self.logged_metrics = {}
def eval_batch_end(self, state: State, logger: Logger) -> None:
cur_batch = int(state.eval_timestamp.batch)
if cur_batch in self.eval_batch_idxs_to_log:
self.log_to_console({}, prefix='Eval ', state=state, is_train=False)
def eval_end(self, state: State, logger: Logger) -> None:
# Log to the console at the end of eval no matter what log interval is selected.
self.log_to_console(self.logged_metrics, prefix='Eval ', state=state, is_train=False)
self.logged_metrics = {}
def fit_start(self, state: State, logger: Logger) -> None:
if not self.hparams_already_logged_to_console:
self.hparams_already_logged_to_console = True
self._log_hparams_to_console()
def predict_start(self, state: State, logger: Logger) -> None:
if not self.hparams_already_logged_to_console:
self.hparams_already_logged_to_console = True
self._log_hparams_to_console()
def eval_start(self, state: State, logger: Logger) -> None:
total_eval_batches = self._get_total_eval_batches(state)
deciles = np.linspace(0, 1, NUM_EVAL_LOGGING_EVENTS)
batch_idxs = np.arange(1, total_eval_batches + 1)
if total_eval_batches < NUM_EVAL_LOGGING_EVENTS:
self.eval_batch_idxs_to_log = list(batch_idxs)
else:
self.eval_batch_idxs_to_log = list(np.quantile(batch_idxs, deciles).round().astype(dtype=int))
# Remove index of last batch, so that we don't print progress at end of last batch and then
# at eval end.
last_batch_idx = total_eval_batches
self.eval_batch_idxs_to_log.remove(last_batch_idx)
if not self.hparams_already_logged_to_console:
self.hparams_already_logged_to_console = True
self._log_hparams_to_console()
def _get_eval_progress_string(self, state: State):
eval_batch = state.eval_timestamp.batch.value
eval_dataloader_label = state.dataloader_label
total_eval_batches = self._get_total_eval_batches(state)
curr_progress = f'[Eval batch={eval_batch}/{total_eval_batches}] Eval on {eval_dataloader_label} data'
return curr_progress
def _get_total_eval_batches(self, state: State) -> int:
cur_evaluator = [evaluator for evaluator in state.evaluators if evaluator.label == state.dataloader_label][0]
total_eval_batches = int(
state.dataloader_len) if state.dataloader_len is not None else cur_evaluator.subset_num_batches
# To please pyright. Based on _set_evaluator_interval_and_subset_num_batches, total_eval_batches can't be None
assert total_eval_batches is not None
return total_eval_batches
def _get_progress_string(self, state: State):
if state.max_duration is None:
training_progress = ''
elif state.max_duration.unit == TimeUnit.EPOCH:
cur_batch = int(state.timestamp.batch_in_epoch)
cur_epoch = int(state.timestamp.epoch)
if cur_batch == 0 and cur_epoch != 0:
cur_epoch -= 1
cur_batch = int(state.dataloader_len) if state.dataloader_len is not None else cur_batch
if state.dataloader_len is None:
curr_progress = f'[batch={cur_batch}]'
else:
total = int(state.dataloader_len)
curr_progress = f'[batch={cur_batch}/{total}]'
training_progress = f'[epoch={cur_epoch + 1}]{curr_progress}'
else:
unit = state.max_duration.unit
curr_duration = int(state.timestamp.get(unit))
total = state.max_duration.value
training_progress = f'[{unit.name.lower()}={curr_duration}/{total}]'
return training_progress
def log_to_console(self, data: Dict[str, Any], state: State, prefix: str = '', is_train=True) -> None:
# log to console
if is_train:
progress = self._get_progress_string(state)
else:
progress = self._get_eval_progress_string(state)
log_str = f'{progress}' + (':' if len(data) > 0 else '')
for data_name, data in data.items():
data_str = format_log_data_value(data)
log_str += f'\n\t {prefix}{data_name}: {data_str}'
self._log_to_console(log_str)
def _log_to_console(self, log_str: str):
"""Logs to the console, avoiding interleaving with a progress bar."""
# write directly to self.stream; no active progress bar
print(log_str, file=self.stream, flush=True)
| composer-dev | composer/loggers/console_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Log to `Comet <https://www.comet.com/?utm_source=mosaicml&utm_medium=partner&utm_campaign=mosaicml_comet_integration>`_."""
from __future__ import annotations
import textwrap
from typing import Any, Dict, Optional, Sequence, Union
import numpy as np
import torch
from torch import nn
from torchvision.utils import draw_segmentation_masks
from composer.core.state import State
from composer.loggers.logger import Logger
from composer.loggers.logger_destination import LoggerDestination
from composer.utils import MissingConditionalImportError, dist
__all__ = ['CometMLLogger']
class CometMLLogger(LoggerDestination):
"""Log to `Comet <https://www.comet.com/?utm_source=mosaicml&utm_medium=partner&utm_campaign=mosaicml_comet_integration>`_.
Args:
workspace (str, optional): The name of the workspace which contains the project
you want to attach your experiment to. If nothing specified will default to your
default workspace as configured in your comet account settings.
project_name (str, optional): The name of the project to categorize your experiment in.
A new project with this name will be created under the Comet workspace if one
with this name does not exist. If no project name specified, the experiment will go
under Uncategorized Experiments.
log_code (bool): Whether to log your code in your experiment (default: ``False``).
log_graph (bool): Whether to log your computational graph in your experiment
(default: ``False``).
name (str, optional): The name of your experiment. If not specified, it will be set
to :attr:`.State.run_name`.
rank_zero_only (bool, optional): Whether to log only on the rank-zero process.
(default: ``True``).
exp_kwargs (Dict[str, Any], optional): Any additional kwargs to
comet_ml.Experiment(see
`Comet documentation <https://www.comet.com/docs/v2/api-and-sdk/python-sdk/reference/Experiment/?utm_source=mosaicml&utm_medium=partner&utm_campaign=mosaicml_comet_integration>`_).
"""
def __init__(
self,
workspace: Optional[str] = None,
project_name: Optional[str] = None,
log_code: bool = False,
log_graph: bool = False,
name: Optional[str] = None,
rank_zero_only: bool = True,
exp_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
try:
from comet_ml import Experiment
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='comet_ml',
conda_package='comet_ml',
conda_channel='conda-forge') from e
self._enabled = (not rank_zero_only) or dist.get_global_rank() == 0
if exp_kwargs is None:
exp_kwargs = {}
if workspace is not None:
exp_kwargs['workspace'] = workspace
if project_name is not None:
exp_kwargs['project_name'] = project_name
exp_kwargs['log_code'] = log_code
exp_kwargs['log_graph'] = log_graph
self.name = name
self._rank_zero_only = rank_zero_only
self._exp_kwargs = exp_kwargs
self.experiment = None
if self._enabled:
self.experiment = Experiment(**self._exp_kwargs)
self.experiment.log_other('Created from', 'mosaicml-composer')
def init(self, state: State, logger: Logger) -> None:
del logger # unused
# Use the logger run name if the name is not set.
if self.name is None:
self.name = state.run_name
# Adjust name and group based on `rank_zero_only`.
if not self._rank_zero_only:
self.name += f'-rank{dist.get_global_rank()}'
if self._enabled:
assert self.experiment is not None
self.experiment.set_name(self.name)
def log_metrics(self, metrics: Dict[str, Any], step: Optional[int] = None) -> None:
if self._enabled:
assert self.experiment is not None
self.experiment.log_metrics(dic=metrics, step=step)
def log_hyperparameters(self, hyperparameters: Dict[str, Any]):
if self._enabled:
assert self.experiment is not None
self.experiment.log_parameters(hyperparameters)
def log_images(self,
images: Union[np.ndarray, torch.Tensor, Sequence[Union[np.ndarray, torch.Tensor]]],
name: str = 'Image',
channels_last: bool = False,
step: Optional[int] = None,
masks: Optional[Dict[str, Union[np.ndarray, torch.Tensor, Sequence[Union[np.ndarray,
torch.Tensor]]]]] = None,
mask_class_labels: Optional[Dict[int, str]] = None,
use_table: bool = True):
del use_table, mask_class_labels # Unused (only for wandb)
if self._enabled:
image_channels = 'last' if channels_last else 'first'
# Convert to singleton sequences if a single image or mask is specified.
if not isinstance(images, Sequence) and images.ndim <= 3:
images = [images]
# For pyright.
assert self.experiment is not None
if masks is not None:
for mask_name, mask_tensor in masks.items():
if not isinstance(mask_tensor, Sequence) and mask_tensor.ndim == 2:
masks[mask_name] = [mask_tensor]
mask_names = list(masks.keys())
for index, (image, *mask_set) in enumerate(zip(images, *masks.values())):
# Log input image
comet_image = _convert_to_comet_image(image)
self.experiment.log_image(comet_image,
name=f'{name}_{index}',
image_channels=image_channels,
step=step)
# Convert 2D index mask to one-hot boolean mask.
mask_set = [_convert_to_comet_mask(mask) for mask in mask_set]
# Log input image with mask overlay and mask by itself for each type of mask.
for mask_name, mask in zip(mask_names, mask_set):
if channels_last:
# permute to channels_first to be compatible with draw_segmentation_masks.
comet_image = image.permute(2, 0, 1)
# Log input image with mask superimposed.
im_with_mask_overlay = draw_segmentation_masks(comet_image.to(torch.uint8), mask, alpha=0.6)
self.experiment.log_image(im_with_mask_overlay,
name=f'{name}_{index} + {mask_name} mask overlaid',
image_channels='first',
step=step)
# Log mask only.
mask_only = draw_segmentation_masks(torch.zeros_like(comet_image.to(torch.uint8)), mask)
self.experiment.log_image(mask_only,
name=f'{mask_name}_{index} mask',
step=step,
image_channels='first')
else:
for index, image in enumerate(images):
comet_image = _convert_to_comet_image(image)
self.experiment.log_image(comet_image,
name=f'{name}_{index}',
image_channels=image_channels,
step=step)
def post_close(self):
if self._enabled:
assert self.experiment is not None
self.experiment.end()
def _convert_to_comet_image(image: Union[np.ndarray, torch.Tensor]) -> torch.Tensor:
if isinstance(image, torch.Tensor):
image = image.data.cpu()
elif isinstance(image, np.ndarray):
image = torch.from_numpy(image)
# Error out for empty arrays or weird arrays of dimension 0.
if np.any(np.equal(image.shape, 0)):
raise ValueError(f'Got an image (shape {image.shape}) with at least one dimension being 0! ')
image = image.squeeze()
if image.ndim > 3:
raise ValueError(
textwrap.dedent(f'''Input image must be 1, 2, or 3 dimensions, but instead got
{image.ndim} dims at shape: {image.shape} Your input image was
interpreted as a batch of {image.ndim}-dimensional images
because you either specified a {image.ndim + 1}D image or a
list of {image.ndim}D images. Please specify either a 4D
image of a list of 3D images'''))
return image
def _convert_to_comet_mask(mask: Union[np.ndarray, torch.Tensor]):
if isinstance(mask, np.ndarray):
mask = torch.from_numpy(mask)
mask = mask.squeeze()
if mask.ndim != 2:
raise ValueError(
textwrap.dedent(f'''Each input mask must be 2 dimensions, but instead got
{mask.ndim} dims at shape: {mask.shape}. Please specify
a sequence of 2D masks or 3D batch of 2D masks .'''))
num_classes = int(torch.max(mask)) + 1
one_hot_mask = nn.functional.one_hot(mask, num_classes).permute(2, 0, 1).bool()
return one_hot_mask
| composer-dev | composer/loggers/cometml_logger.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Base module for callbacks."""
from __future__ import annotations
import abc
from typing import TYPE_CHECKING, Any
from composer.core.serializable import Serializable
if TYPE_CHECKING:
from composer import Event, State
from composer.loggers import Logger
__all__ = ['Callback']
class Callback(Serializable, abc.ABC):
"""Base class for callbacks.
Callbacks provide hooks that can run at each training loop :class:`.Event`. A callback is similar to
an :class:`.Algorithm` in that they are run on specific events, but it differs from an :class:`.Algorithm`
in that it should not modify the training of the model. By convention, callbacks should not modify the
:class:`.State`. They are typically used to for non-essential recording functions such as logging or timing.
Callbacks can be implemented in two ways:
#. Override the individual methods named for each :class:`.Event`.
For example,
.. doctest::
>>> class MyCallback(Callback):
... def epoch_start(self, state: State, logger: Logger):
... print(f'Epoch: {int(state.timestamp.epoch)}')
>>> # construct trainer object with your callback
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... optimizers=optimizer,
... max_duration="1ep",
... callbacks=[MyCallback()],
... )
>>> # trainer will run MyCallback whenever the EPOCH_START
>>> # is triggered, like this:
>>> _ = trainer.engine.run_event(Event.EPOCH_START)
Epoch: 0
#. Override :meth:`run_event` if you want a single method to handle all events. If this method is overridden, then
the individual methods corresponding to each event name (such as :meth:`epoch_start`) will no longer be
automatically invoked. For example, if you override :meth:`run_event`, then :meth:`epoch_start` will not be called
on the :attr:`.Event.EPOCH_START` event, :meth:`batch_start` will not be called on the
:attr:`.Event.BATCH_START`, etc. However, you can invoke :meth:`epoch_start`, :meth:`batch_start`, etc. in your
overriding implementation of :meth:`run_event`.
For example,
.. doctest::
>>> class MyCallback(Callback):
... def run_event(self, event: Event, state: State, logger: Logger):
... if event == Event.EPOCH_START:
... print(f'Epoch: {int(state.timestamp.epoch)}')
>>> # construct trainer object with your callback
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... optimizers=optimizer,
... max_duration="1ep",
... callbacks=[MyCallback()],
... )
>>> # trainer will run MyCallback whenever the EPOCH_START
>>> # is triggered, like this:
>>> _ = trainer.engine.run_event(Event.EPOCH_START)
Epoch: 0
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
# Stub signature for pyright
del args, kwargs # unused
pass
def run_event(self, event: Event, state: State, logger: Logger) -> None:
"""Called by the engine on each event.
Args:
event (Event): The event.
state (State): The state.
logger (Logger): The logger.
"""
event_cb = getattr(self, event.value)
return event_cb(state, logger)
def init(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.INIT` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def after_load(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.AFTER_LOAD` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def fit_start(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.FIT_START` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def epoch_start(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.EPOCH_START` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def before_dataloader(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.BEFORE_DATALOADER` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def after_dataloader(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.AFTER_DATALOADER` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def batch_start(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.BATCH_START` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def before_train_batch(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.BEFORE_TRAIN_BATCH` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def before_forward(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.BEFORE_FORWARD` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def after_forward(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.AFTER_FORWARD` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def before_loss(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.BEFORE_LOSS` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def after_loss(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.AFTER_LOSS` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def before_backward(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.BEFORE_BACKWARD` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def after_backward(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.AFTER_BACKWARD` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def after_train_batch(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.AFTER_TRAIN_BATCH` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def batch_end(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.BATCH_END` event.
.. note::
The following :attr:`.State.timestamp` member variables are
incremented immediately before the :attr:`.Event.BATCH_END` event.
+------------------------------------+
| :attr:`.Timestamp.batch` |
+------------------------------------+
| :attr:`.Timestamp.batch_in_epoch` |
+------------------------------------+
| :attr:`.Timestamp.sample` |
+------------------------------------+
| :attr:`.Timestamp.sample_in_epoch` |
+------------------------------------+
| :attr:`.Timestamp.token` |
+------------------------------------+
| :attr:`.Timestamp.token_in_epoch` |
+------------------------------------+
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def batch_checkpoint(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.BATCH_CHECKPOINT` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def epoch_end(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.EPOCH_END` event.
.. note::
:attr:`.State.timestamp` member variable :attr:`.Timestamp.epoch`
is incremented immediately before :attr:`.Event.EPOCH_END`.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def epoch_checkpoint(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.EPOCH_CHECKPOINT` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def predict_start(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.PREDICT_START` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def predict_batch_start(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.PREDICT_BATCH_START` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def predict_before_forward(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.PREDICT_BATCH_FORWARD` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def predict_after_forward(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.PREDICT_AFTER_FORWARD` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def predict_batch_end(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.PREDICT_BATCH_END` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def predict_end(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.PREDICT_END` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def eval_start(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.EVAL_START` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def eval_batch_start(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.EVAL_BATCH_START` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def eval_before_forward(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.EVAL_BATCH_FORWARD` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def eval_after_forward(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.EVAL_AFTER_FORWARD` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def eval_batch_end(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.EVAL_BATCH_END` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def eval_end(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.EVAL_END` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def fit_end(self, state: State, logger: Logger) -> None:
"""Called on the :attr:`.Event.FIT_END` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
del state, logger # unused
pass
def close(self, state: State, logger: Logger) -> None:
"""Called whenever the trainer finishes training, even when there is an exception.
It should be used for clean up tasks such as flushing I/O streams and/or
closing any files that may have been opened during the :attr:`.Event.INIT` event.
Args:
state (State): The training state.
logger (Logger): The logger.
"""
pass
def post_close(self) -> None:
"""Called after :meth:`close` has been invoked for each callback.
Very few callbacks should need to implement :meth:`post_close`.
This callback can be used to back up any data that may have
been written by other callbacks during :meth:`close`.
"""
pass
| composer-dev | composer/core/callback.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Base class for algorithms that improve a model's quality or efficiency."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Dict, Optional
from composer.core.serializable import Serializable
if TYPE_CHECKING:
from composer.core import Event, State
from composer.loggers import Logger
__all__ = ['Algorithm']
class Algorithm(Serializable, ABC):
"""Base class for algorithms.
Algorithms are pieces of code which run at specific events (see :class:`.Event`) in the training loop.
Algorithms modify the trainer's :class:`.State`, generally with the effect of improving the model's quality
or increasing the efficiency and throughput of the training loop.
Algorithms must implement the following two methods:
+----------------+-------------------------------------------------------------------------------+
| Method | Description |
+================+===============================================================================+
| :func:`match` | returns whether the algorithm should be run given the current |
| | :class:`.Event` and :class:`.State`. |
+----------------+-------------------------------------------------------------------------------+
| :func:`apply` | Executes the algorithm's code and makes an in-place change |
| | to the :class:`.State`. |
+----------------+-------------------------------------------------------------------------------+
"""
def __init__(self, *args, **kwargs): # Stub signature for PyRight
del args, kwargs # unused
pass
@property
def find_unused_parameters(self) -> bool:
"""Indicates whether this algorithm may cause some model parameters to be unused. Defaults to False.
For example, it is used to tell :class:`torch.nn.parallel.DistributedDataParallel` (DDP) that some parameters
will be frozen during training, and hence it should not expect gradients from them. All algorithms which do any
kind of parameter freezing should override this function to return ``True``.
.. note::
DeepSpeed integration with this function returning True is not tested. It may not work as expected.
"""
return False
@property
def backwards_create_graph(self) -> bool:
"""Return ``True`` to indicate this algorithm requires a second derivative to be computed. Defaults to ``False``.
If it returns ``True``, ``create_graph=True`` will be passed to :meth:`torch.Tensor.backward` which will result in
the graph of the gradient also being constructed. This allows the computation of second order derivatives.
"""
return False
@staticmethod
def required_on_load() -> bool:
"""Return `True` to indicate this algorithm is required when loading from a checkpoint which used it."""
return False
def state_dict(self) -> Dict[str, Any]:
return {'repr': self.__repr__()}
@abstractmethod
def match(self, event: Event, state: State) -> bool:
"""Determines whether this algorithm should run given the current :class:`.Event` and :class:`.State`.
Examples:
To only run on a specific event (e.g., on :attr:`.Event.BEFORE_LOSS`), override match as shown below:
>>> class MyAlgorithm:
... def match(self, event, state):
... return event == Event.BEFORE_LOSS
>>> MyAlgorithm().match(Event.BEFORE_LOSS, state)
True
To run based on some value of a :class:`.State` attribute, override match as shown below:
>>> class MyAlgorithm:
... def match(self, event, state):
... return state.timestamp.epoch > 30
>>> MyAlgorithm().match(Event.BEFORE_LOSS, state)
False
See :class:`.State` for accessible attributes.
Args:
event (Event): The current event.
state (State): The current state.
Returns:
bool: True if this algorithm should run now.
"""
raise NotImplementedError(f'implement match() required for {self.__class__.__name__}')
@abstractmethod
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
"""Applies the algorithm to make an in-place change to the :class:`.State`.
Can optionally return an exit code to be stored in a :class:`.Trace`.
This exit code is made accessible for debugging.
Args:
event (Event): The current event.
state (State): The current state.
logger (Logger): A logger to use for logging algorithm-specific metrics.
Returns:
int or None: exit code that will be stored in :class:`.Trace` and made accessible for debugging.
"""
raise NotImplementedError(f'implement apply() required for {self.__class__.__name__}')
| composer-dev | composer/core/algorithm.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Utilities to track training progress in terms of epochs, batches, samples, and tokens.
Callbacks, algorithms, and schedulers can use the current training time to fire at certain points in the training
process.
The :class:`~.time.Timestamp` class tracks the total number of epochs, batches, samples, and tokens. The trainer is
responsible for updating it at the end of every epoch and batch. There is only one instance of the
:class:`~.time.Timestamp`, which is attached to the :class:`~.state.State`.
The :class:`~.time.Time` class represents static durations of training time or points in the training process in terms
of a specific :class:`~.time.TimeUnit` enum. This class supports comparisons, arithmetic, and conversions.
See the :doc:`Time Guide </trainer/time>` for more details on tracking time during training.
"""
from __future__ import annotations
import datetime
import re
from typing import Any, Dict, Generic, Optional, TypeVar, Union, cast
from composer.core.serializable import Serializable
from composer.utils import StringEnum
__all__ = ['TimeUnit', 'Time', 'Timestamp', 'ensure_time']
class TimeUnit(StringEnum):
"""Enum class to represent units of time for the training process.
Attributes:
EPOCH (str): Epochs.
BATCH (str): Batches (i.e. number of optimization steps)
SAMPLE (str): Samples.
TOKEN (str): Tokens. Applicable for natural language processing (NLP) models.
DURATION (str): Fraction of the training process complete, on ``[0.0, 1.0)``
"""
EPOCH = 'ep'
BATCH = 'ba'
SAMPLE = 'sp'
TOKEN = 'tok'
DURATION = 'dur'
# regex for parsing integers / decimals / scientific notation
_NUM_REGEX = r'-?[\d.]+(?:e-?\d+)?'
# regex for parsing a time string.
_TIME_STR_REGEX = re.compile(r'^(?:' + r'|'.join(fr'(?:({_NUM_REGEX})({time_unit.value}))' for time_unit in TimeUnit) +
r')$',
flags=re.IGNORECASE)
TValue = TypeVar('TValue', int, float)
class Time(Generic[TValue], Serializable):
"""Time represents static durations of training time in terms of a :class:`TimeUnit` enum.
See the :doc:`Time Guide </trainer/time>` for more details on tracking time during training.
To construct an instance of :class:`Time`, you can either:
#. Use a value followed by a :class:`TimeUnit` enum or string. For example,
>>> Time(5, TimeUnit.EPOCH) # describes 5 epochs.
Time(5, TimeUnit.EPOCH)
>>> Time(30_000, "tok") # describes 30,000 tokens.
Time(30000, TimeUnit.TOKEN)
>>> Time(0.5, "dur") # describes 50% of the training process.
Time(0.5, TimeUnit.DURATION)
#. Use one of the helper methods. See:
- :meth:`Time.from_epoch`
- :meth:`Time.from_batch`
- :meth:`Time.from_sample`
- :meth:`Time.from_token`
- :meth:`Time.from_duration`
- :meth:`Time.from_timestring`.
:class:`Time` supports addition and subtraction with other :class:`Time` instances that share the same
:class:`TimeUnit`. For example:
>>> Time(1, TimeUnit.EPOCH) + Time(2, TimeUnit.EPOCH)
Time(3, TimeUnit.EPOCH)
:class:`Time` supports multiplication. The multiplier must be either a number or have units of
:attr:`TimeUnit.DURATION`. The multiplicand is scaled, and its units are kept.
>>> Time(2, TimeUnit.EPOCH) * 0.5
Time(1, TimeUnit.EPOCH)
>>> Time(2, TimeUnit.EPOCH) * Time(0.5, TimeUnit.DURATION)
Time(1, TimeUnit.EPOCH)
:class:`Time` supports division. If the divisor is an instance of :class:`Time`, then it
must have the same units as the dividend, and the result has units of :attr:`TimeUnit.DURATION`.
For example:
>>> Time(4, TimeUnit.EPOCH) / Time(2, TimeUnit.EPOCH)
Time(2.0, TimeUnit.DURATION)
If the divisor is number, then the dividend is scaled, and it keeps its units. For example:
>>> Time(4, TimeUnit.EPOCH) / 2
Time(2, TimeUnit.EPOCH)
Args:
value (int | float): The amount of time.
unit (str | TimeUnit): The :class:`TimeUnit` for ``value``.
"""
def __init__(
self,
value: TValue,
unit: Union[str, TimeUnit],
):
unit = TimeUnit(unit)
if unit == TimeUnit.DURATION:
value = cast(TValue, float(value))
else:
if not isinstance(value, int):
raise TypeError(f'value {value} is of type {type(value)}. Units {unit} require integer values.')
self._value, self._unit = value, TimeUnit(unit)
@classmethod
def from_epoch(cls, epoch: int) -> Time:
"""Create a :class:`Time` with units of :attr:`TimeUnit.EPOCH`.
Equivalent to ``Time(epoch, TimeUnit.EPOCH)``.
Args:
epoch (int): Number of epochs.
Returns:
Time: :class:`Time` instance, in epochs.
"""
return cls(epoch, TimeUnit.EPOCH)
@classmethod
def from_batch(cls, batch: int) -> Time:
"""Create a :class:`Time` with units of :attr:`TimeUnit.BATCH`.
Equivalent to ``Time(batch, TimeUnit.BATCH)``.
Args:
batch (int): Number of batches.
Returns:
Time: :class:`Time` instance, in batches.
"""
return cls(batch, TimeUnit.BATCH)
@classmethod
def from_sample(cls, sample: int) -> Time:
"""Create a :class:`Time` with units of :attr:`TimeUnit.SAMPLE`.
Equivalent to ``Time(sample, TimeUnit.SAMPLE)``.
Args:
sample (int): Number of samples.
Returns:
Time: :class:`Time` instance, in samples.
"""
return cls(sample, TimeUnit.SAMPLE)
@classmethod
def from_token(cls, token: int) -> Time:
"""Create a :class:`Time` with units of :attr:`TimeUnit.TOKEN`.
Equivalent to ``Time(sample, TimeUnit.TOKEN)``.
Args:
token (int): Number of tokens.
Returns:
Time: :class:`Time` instance, in tokens.
"""
return cls(token, TimeUnit.TOKEN)
@classmethod
def from_duration(cls, duration: float) -> Time:
"""Create a :class:`Time` with units of :attr:`TimeUnit.DURATION`.
Equivalent to ``Time(duration, TimeUnit.DURATION)``.
Args:
duration (float): Duration of the training process. Should be on ``[0, 1)``
where ``0`` represents the beginning of the training process and ``1``
represents a completed training process.
Returns:
Time: :class:`Time` instance, in duration.
"""
return cls(duration, TimeUnit.DURATION)
@property
def value(self) -> TValue:
"""The value of the time, as a number."""
return self._value
@property
def unit(self) -> TimeUnit:
"""The unit of the time."""
return self._unit
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.value}, {self.unit})'
def __str__(self) -> str:
return f'{self.value}{self.unit.value}'
def to_timestring(self):
"""Get the time-string representation.
For example:
>>> Time(5, TimeUnit.EPOCH).to_timestring()
'5ep'
Returns:
str: The time-string representation.
"""
return str(self)
def _parse(self, other: object) -> Time:
# parse ``other`` into a Time object
if isinstance(other, Time):
return other
if isinstance(other, int):
return Time(other, self.unit)
if isinstance(other, str):
other_parsed = Time.from_timestring(other)
return other_parsed
raise TypeError(f'Cannot convert type {other} to {self.__class__.__name__}')
def _cmp(self, other: Union[int, float, Time, str]) -> int:
# When doing comparisions, and other is an integer (or float), we can safely infer
# the unit from self.unit
# E.g. calls like this should be allowed: if batch < 42: do_something()
# This eliminates the need to call .value everywhere
if not isinstance(other, (int, float, Time, str)):
return NotImplemented
if isinstance(other, (int, float)):
other = type(self)(other, self.unit)
other = self._parse(other)
if self.unit != other.unit:
raise RuntimeError(f'Cannot compare {self} to {other} since they have different units.')
if self.value < other.value:
return -1
if self.value == other.value:
return 0
assert self.value > other.value
return 1
def __eq__(self, other: Union[int, float, Time, str]):
return self._cmp(other) == 0
def __ne__(self, other: Union[int, float, Time, str]):
return self._cmp(other) != 0
def __lt__(self, other: Union[int, float, Time, str]):
return self._cmp(other) < 0
def __le__(self, other: Union[int, float, Time, str]):
return self._cmp(other) <= 0
def __gt__(self, other: Union[int, float, Time, str]):
return self._cmp(other) > 0
def __ge__(self, other: Union[int, float, Time, str]):
return self._cmp(other) >= 0
def __add__(self, other: Union[int, float, Time, str]) -> Time[TValue]:
other = self._parse(other)
if self.unit != other.unit:
raise RuntimeError(f'Cannot add {self} to {other} since they have different units.')
return Time(self.value + other.value, self.unit)
def __radd__(self, other: Union[int, float, Time, str]) -> Time[TValue]:
return self + other
def __sub__(self, other: Union[int, float, Time, str]) -> Time[TValue]:
other = self._parse(other)
if self.unit != other.unit:
raise RuntimeError(f'Cannot subtract {other} from {self} since they have different units.')
return Time(self.value - other.value, self.unit)
def __rsub__(self, other: Union[int, float, Time, str]) -> Time[TValue]:
return (-self) + other
def __neg__(self) -> Time[TValue]:
return Time(cast(TValue, -self.value), self.unit)
def __pos__(self) -> Time[TValue]:
return Time(self.value, self.unit)
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __truediv__(self, other: object) -> Time[float]:
if isinstance(other, (float, int)):
return Time(type(self.value)(self.value / other), self.unit)
other = self._parse(other)
if self.unit != other.unit:
raise RuntimeError(f'Cannot divide {self} by {other} since they have different units.')
return Time(self.value / other.value, TimeUnit.DURATION)
def __mul__(self, other: object):
if isinstance(other, (float, int)):
# Scale by the value.
return Time(type(self.value)(self.value * other), self.unit)
other = self._parse(other)
if other.unit != TimeUnit.DURATION and self.unit != TimeUnit.DURATION:
raise RuntimeError(f'Multiplication is supported only if one of the units is Duration')
real_unit = self.unit if other.unit == TimeUnit.DURATION else other.unit
real_type = float if real_unit == TimeUnit.DURATION else int
return Time(real_type(self.value * other.value), real_unit)
def __rmul__(self, other: object):
return self * other
def __hash__(self):
return hash((self.value, self.unit))
@classmethod
def from_timestring(cls, timestring: str) -> Time:
"""Parse a time string into a :class:`Time` instance.
A time string is a numerical value followed by the value of a :class:`TimeUnit` enum. For example:
>>> Time.from_timestring("5ep") # describes 5 epochs.
Time(5, TimeUnit.EPOCH)
>>> Time.from_timestring("3e4tok") # describes 30,000 tokens.
Time(30000, TimeUnit.TOKEN)
>>> Time.from_timestring("0.5dur") # describes 50% of the training process.
Time(0.5, TimeUnit.DURATION)
Returns:
Time: An instance of :class:`Time`.
"""
match = _TIME_STR_REGEX.findall(timestring)
if len(match) != 1:
raise ValueError(f'Invalid time string: {timestring}')
match = match[0]
match = [x for x in match if x != '']
assert len(match) == 2, 'each match should have a number followed by the key'
value = match[0]
unit = TimeUnit(match[1])
value = float(value) # always parsing first as float b/c it could be scientific notation
if unit != TimeUnit.DURATION:
if int(value) != value:
raise TypeError(f'value {value} is not an integer. Units {unit} require integer values.')
value = int(value)
return cls(value, unit)
class Timestamp(Serializable):
"""Timestamp represents a snapshot of the current training progress.
The timestamp measures training progress in terms of epochs, batches, samples, tokens, and wall clock time.
Timestamps are not updated in-place.
See the :doc:`Time Guide </trainer/time>` for more details on tracking time during training.
Args:
epoch (int | Time[int], optional): The epoch.
batch (int | Time[int], optional): the batch.
sample (int | Time[int], optional): The sample.
token (int | Time[int], optional): The token.
batch_in_epoch (int | Time[int], optional): The batch in the epoch.
sample_in_epoch (int | Time[int], optional): The sample in the epoch.
token_in_epoch (int | Time[int], optional): The token in the epoch.
total_wct (datetime.timedelta, optional): The total wall-clock duration.
epoch_wct (datetime.timedelta, optional): The wall-clock duration of the last epoch.
batch_wct (datetime.timedelta, optional): The wall-clock duration of the last batch.
"""
def __init__(
self,
epoch: Union[int, Time[int]] = 0,
batch: Union[int, Time[int]] = 0,
sample: Union[int, Time[int]] = 0,
token: Union[int, Time[int]] = 0,
batch_in_epoch: Union[int, Time[int]] = 0,
sample_in_epoch: Union[int, Time[int]] = 0,
token_in_epoch: Union[int, Time[int]] = 0,
total_wct: Optional[datetime.timedelta] = None,
epoch_wct: Optional[datetime.timedelta] = None,
batch_wct: Optional[datetime.timedelta] = None,
):
epoch = ensure_time(epoch, TimeUnit.EPOCH)
if epoch.unit != TimeUnit.EPOCH:
raise ValueError(f'The `epoch` argument has units of {epoch.unit}; not {TimeUnit.EPOCH}.')
self._epoch = epoch
batch = ensure_time(batch, TimeUnit.BATCH)
if batch.unit != TimeUnit.BATCH:
raise ValueError(f'The `batch` argument has units of {batch.unit}; not {TimeUnit.BATCH}.')
self._batch = batch
sample = ensure_time(sample, TimeUnit.SAMPLE)
if sample.unit != TimeUnit.SAMPLE:
raise ValueError(f'The `sample` argument has units of {sample.unit}; not {TimeUnit.SAMPLE}.')
self._sample = sample
token = ensure_time(token, TimeUnit.TOKEN)
if token.unit != TimeUnit.TOKEN:
raise ValueError(f'The `token` argument has units of {token.unit}; not {TimeUnit.TOKEN}.')
self._token = token
batch_in_epoch = ensure_time(batch_in_epoch, TimeUnit.BATCH)
if batch_in_epoch.unit != TimeUnit.BATCH:
raise ValueError((f'The `batch_in_epoch` argument has units of {batch_in_epoch.unit}; '
f'not {TimeUnit.BATCH}.'))
self._batch_in_epoch = batch_in_epoch
sample_in_epoch = ensure_time(sample_in_epoch, TimeUnit.SAMPLE)
if sample_in_epoch.unit != TimeUnit.SAMPLE:
raise ValueError((f'The `sample_in_epoch` argument has units of {sample_in_epoch.unit}; '
f'not {TimeUnit.SAMPLE}.'))
self._sample_in_epoch = sample_in_epoch
token_in_epoch = ensure_time(token_in_epoch, TimeUnit.TOKEN)
if token_in_epoch.unit != TimeUnit.TOKEN:
raise ValueError((f'The `token_in_epoch` argument has units of {token_in_epoch.unit}; '
f'not {TimeUnit.TOKEN}.'))
self._token_in_epoch = token_in_epoch
if total_wct is None:
total_wct = datetime.timedelta(seconds=0)
self._total_wct = total_wct
if epoch_wct is None:
epoch_wct = datetime.timedelta(seconds=0)
self._epoch_wct = epoch_wct
if batch_wct is None:
batch_wct = datetime.timedelta(seconds=0)
self._batch_wct = batch_wct
def state_dict(self) -> Dict[str, Any]:
return {
'epoch': self.epoch.value,
'batch': self.batch.value,
'sample': self.sample.value,
'token': self.token.value,
'batch_in_epoch': self.batch_in_epoch.value,
'sample_in_epoch': self.sample_in_epoch.value,
'token_in_epoch': self.token_in_epoch.value,
'total_wct': self.total_wct,
'epoch_wct': self.epoch_wct,
'batch_wct': self.batch_wct,
}
def get_state(self) -> Dict[str, Union[Time[int], datetime.timedelta]]:
"""Returns all values of the timestamp object in a dictionary.
Returns:
Dict[str, Union[Time[int], datetime.timedelta]]: All values of the timestamp object.
"""
return {
'epoch': self.epoch,
'batch': self.batch,
'sample': self.sample,
'token': self.token,
'batch_in_epoch': self.batch_in_epoch,
'sample_in_epoch': self.sample_in_epoch,
'token_in_epoch': self.token_in_epoch,
'total_wct': self.total_wct,
'epoch_wct': self.epoch_wct,
'batch_wct': self.batch_wct,
}
def load_state_dict(self, state: Dict[str, Any]) -> None:
self._epoch = Time(state['epoch'], TimeUnit.EPOCH)
self._batch = Time(state['batch'], TimeUnit.BATCH)
self._sample = Time(state['sample'], TimeUnit.SAMPLE)
self._token = Time(state['token'], TimeUnit.TOKEN)
self._batch_in_epoch = Time(state['batch_in_epoch'], TimeUnit.BATCH)
self._sample_in_epoch = Time(state['sample_in_epoch'], TimeUnit.SAMPLE)
self._token_in_epoch = Time(state['token_in_epoch'], TimeUnit.TOKEN)
# Wall clock time tracking was added in composer v0.7.0
# Using conditional checks as not to break old checkpoints
if 'total_wct' in state:
self._total_wct = state['total_wct']
if 'epoch_wct' in state:
self._epoch_wct = state['epoch_wct']
if 'batch_wct' in state:
self._batch_wct = state['batch_wct']
@property
def epoch(self) -> Time[int]:
"""The total epoch count."""
return self._epoch
@property
def batch(self) -> Time[int]:
"""The total batch count."""
return self._batch
@property
def sample(self) -> Time[int]:
"""The total sample count."""
return self._sample
@property
def token(self) -> Time[int]:
"""The total token count."""
return self._token
@property
def batch_in_epoch(self) -> Time[int]:
"""The batch count in the current epoch (resets at 0 at the beginning of every epoch)."""
return self._batch_in_epoch
@property
def sample_in_epoch(self) -> Time[int]:
"""The sample count in the current epoch (resets at 0 at the beginning of every epoch)."""
return self._sample_in_epoch
@property
def token_in_epoch(self) -> Time[int]:
"""The token count in the current epoch (resets at 0 at the beginning of every epoch)."""
return self._token_in_epoch
@property
def total_wct(self) -> datetime.timedelta:
"""The wall-clock duration (in seconds) from the beginning of training."""
return self._total_wct
@property
def epoch_wct(self) -> datetime.timedelta:
"""The wall-clock duration (in seconds) for the current epoch."""
return self._epoch_wct
@property
def batch_wct(self) -> datetime.timedelta:
"""The wall-clock duration (in seconds) for the last batch."""
return self._batch_wct
def get(self, unit: Union[str, TimeUnit]) -> Time[int]:
"""Returns the current time in the specified unit.
Args:
unit (str | TimeUnit): The desired unit.
Returns:
Time: The current time, in the specified unit.
"""
unit = TimeUnit(unit)
if unit == TimeUnit.EPOCH:
return self.epoch
if unit == TimeUnit.BATCH:
return self.batch
if unit == TimeUnit.SAMPLE:
return self.sample
if unit == TimeUnit.TOKEN:
return self.token
raise ValueError(f'Invalid unit: {unit}')
def _parse(self, other: object) -> Time:
# parse ``other`` into a Time object
if isinstance(other, Time):
return other
if isinstance(other, str):
other_parsed = Time.from_timestring(other)
return other_parsed
raise TypeError(f'Cannot convert type {other} to {self.__class__.__name__}')
def __eq__(self, other: object):
if not isinstance(other, (Time, Timestamp, str)):
return NotImplemented
if isinstance(other, Timestamp):
return self.state_dict() == other.state_dict()
other = self._parse(other)
self_counter = self.get(other.unit)
return self_counter == other
def __ne__(self, other: object):
if not isinstance(other, (Time, Timestamp, str)):
return NotImplemented
if isinstance(other, Timestamp):
return self.state_dict() != other.state_dict()
other = self._parse(other)
self_counter = self.get(other.unit)
return self_counter != other
def __lt__(self, other: object):
if not isinstance(other, (Time, str)):
return NotImplemented
other = self._parse(other)
self_counter = self.get(other.unit)
return self_counter < other
def __le__(self, other: object):
if not isinstance(other, (Time, str)):
return NotImplemented
other = self._parse(other)
self_counter = self.get(other.unit)
return self_counter <= other
def __gt__(self, other: object):
if not isinstance(other, (Time, str)):
return NotImplemented
other = self._parse(other)
self_counter = self.get(other.unit)
return self_counter > other
def __ge__(self, other: object):
if not isinstance(other, (Time, str)):
return NotImplemented
other = self._parse(other)
self_counter = self.get(other.unit)
return self_counter >= other
def to_next_batch(
self,
samples: Union[int, Time] = 0,
tokens: Union[int, Time] = 0,
duration: Optional[datetime.timedelta] = None,
):
"""Create a new :class:`.Timestamp`, advanced to the next batch.
Equivalent to:
.. testsetup::
from composer.core.time import Timestamp
import datetime
timestamp = Timestamp()
samples = 1
tokens = 2
duration = datetime.timedelta(seconds=0)
.. doctest::
>>> timestamp.copy(
... batch=timestamp.batch + 1,
... batch_in_epoch=timestamp.batch_in_epoch + 1,
... sample=timestamp.sample + samples,
... sample_in_epoch=timestamp.sample_in_epoch + samples,
... token = timestamp.token + tokens,
... token_in_epoch=timestamp.token_in_epoch + tokens,
... total_wct=timestamp.total_wct + duration,
... epoch_wct=timestamp.epoch_wct + duration,
... batch_wct=duration,
... )
Timestamp(...)
.. note::
For accurate time tracking, when doing distributed training, the ``samples`` and ``tokens`` should be
the total across all ranks for the given batch. This method will not accumulate these counts automatically.
If per-rank sample and token counts are provided, these counts will differ across ranks, which could lead
towards inconsistent behavior by :class:`.Algorithm` or :class:`.Callback` instances that use these counts.
Args:
samples (int | Time, optional): The number of samples trained in the batch. Defaults to 0.
tokens (int | Time, optional): The number of tokens trained in the batch. Defaults to 0.
duration (datetime.timedelta, optional): The duration to train the batch.
"""
if duration is None:
duration = datetime.timedelta(seconds=0)
return self.copy(
batch=self.batch + 1,
batch_in_epoch=self.batch_in_epoch + 1,
sample=self.sample + samples,
sample_in_epoch=self.sample_in_epoch + samples,
token=self.token + tokens,
token_in_epoch=self.token_in_epoch + tokens,
total_wct=self.total_wct + duration,
epoch_wct=self.epoch_wct + duration,
batch_wct=duration,
)
def to_next_epoch(self):
"""Create a new :class:`.Timestamp`, advanced to the next epoch.
Equivalent to:
.. testsetup::
from composer.core.time import Timestamp
import datetime
timestamp = Timestamp()
.. doctest::
>>> timestamp.copy(
... epoch=timestamp.epoch+1,
... batch_in_epoch=0,
... sample_in_epoch=0,
... token_in_epoch=0,
... epoch_wct=datetime.timedelta(seconds=0),
... batch_wct=datetime.timedelta(seconds=0),
... )
Timestamp(...)
"""
return self.copy(
epoch=self.epoch + 1,
batch_in_epoch=0,
sample_in_epoch=0,
token_in_epoch=0,
epoch_wct=datetime.timedelta(seconds=0),
batch_wct=datetime.timedelta(seconds=0),
)
def copy(
self,
epoch: Optional[Union[int, Time[int]]] = None,
batch: Optional[Union[int, Time[int]]] = None,
sample: Optional[Union[int, Time[int]]] = None,
token: Optional[Union[int, Time[int]]] = None,
batch_in_epoch: Optional[Union[int, Time[int]]] = None,
sample_in_epoch: Optional[Union[int, Time[int]]] = None,
token_in_epoch: Optional[Union[int, Time[int]]] = None,
total_wct: Optional[datetime.timedelta] = None,
epoch_wct: Optional[datetime.timedelta] = None,
batch_wct: Optional[datetime.timedelta] = None,
) -> Timestamp:
"""Create a copy of the timestamp.
Any specified values will override the existing values in the returned copy.
Args:
epoch (int | Time[int], optional): The epoch.
batch (int | Time[int], optional): the batch.
sample (int | Time[int], optional): The sample.
token (int | Time[int], optional): The token.
batch_in_epoch (int | Time[int], optional): The batch in the epoch.
sample_in_epoch (int | Time[int], optional): The sample in the epoch.
token_in_epoch (int | Time[int], optional): The token in the epoch.
total_wct (datetime.timedelta, optional): The elapsed duration from the beginning of training.
Returns:
Timestamp: A new timestamp instance, created from a copy, but with any specified values
overriding the existing values.
"""
return Timestamp(
epoch=epoch if epoch is not None else self.epoch,
batch=batch if batch is not None else self.batch,
sample=sample if sample is not None else self.sample,
token=token if token is not None else self.token,
batch_in_epoch=batch_in_epoch if batch_in_epoch is not None else self.batch_in_epoch,
sample_in_epoch=sample_in_epoch if sample_in_epoch is not None else self.sample_in_epoch,
token_in_epoch=token_in_epoch if token_in_epoch is not None else self.token_in_epoch,
total_wct=total_wct if total_wct is not None else self.total_wct,
epoch_wct=epoch_wct if epoch_wct is not None else self.epoch_wct,
batch_wct=batch_wct if batch_wct is not None else self.batch_wct,
)
def __repr__(self) -> str:
return (f'Timestamp('
f'epoch={int(self.epoch)}, '
f'batch={int(self.batch)}, '
f'sample={int(self.sample)}, '
f'token={int(self.token)}, '
f'batch_in_epoch={int(self.batch_in_epoch)}, '
f'sample_in_epoch={int(self.sample_in_epoch)}, '
f'token_in_epoch={int(self.token_in_epoch)}, '
f'total_wct={repr(self.total_wct)}, '
f'epoch_wct={repr(self.epoch_wct)}, '
f'batch_wct={repr(self.batch_wct)}'
')')
def ensure_time(maybe_time: Union[Time, str, int], int_unit: Union[TimeUnit, str]) -> Time:
"""Ensure ``maybe_time`` is an instance of :class:`.Time`.
Args:
maybe_time (Time | str): A time string, integer, or instance of :class:`.Time`.
int_unit (TimeUnit | str): The unit to use if ``maybe_time`` is an integer
Returns:
Time: An instance of :class:`.Time`.
"""
if isinstance(maybe_time, str):
return Time.from_timestring(maybe_time)
if isinstance(maybe_time, int):
return Time(maybe_time, int_unit)
if isinstance(maybe_time, Time):
return maybe_time
raise TypeError(f'Unsupported type for ensure_time: {type(maybe_time)}')
| composer-dev | composer/core/time.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Training Loop Events."""
from composer.utils import StringEnum
__all__ = ['Event']
class Event(StringEnum):
"""Enum to represent training loop events.
Events mark specific points in the training loop where an :class:`~.core.Algorithm` and :class:`~.core.Callback`
can run.
The following pseudocode shows where each event fires in the training loop:
.. code-block:: python
# <INIT>
# <AFTER_LOAD>
# <FIT_START>
for epoch in range(NUM_EPOCHS):
# <EPOCH_START>
while True:
# <BEFORE_DATALOADER>
batch = next(dataloader)
if batch is None:
break
# <AFTER_DATALOADER>
# <BATCH_START>
# <BEFORE_TRAIN_BATCH>
for microbatch in batch.split(device_train_microbatch_size):
# <BEFORE_FORWARD>
outputs = model(batch)
# <AFTER_FORWARD>
# <BEFORE_LOSS>
loss = model.loss(outputs, batch)
# <AFTER_LOSS>
# <BEFORE_BACKWARD>
loss.backward()
# <AFTER_BACKWARD>
# Un-scale and clip gradients
# <AFTER_TRAIN_BATCH>
optimizer.step()
# <BATCH_END>
if should_eval(batch=True):
for eval_dataloader in eval_dataloaders:
# <EVAL_START>
for batch in eval_dataloader:
# <EVAL_BATCH_START>
# <EVAL_BEFORE_FORWARD>
outputs, targets = model(batch)
# <EVAL_AFTER_FORWARD>
metrics.update(outputs, targets)
# <EVAL_BATCH_END>
# <EVAL_END>
# <BATCH_CHECKPOINT>
# <EPOCH_END>
if should_eval(batch=False):
for eval_dataloader in eval_dataloaders:
# <EVAL_START>
for batch in eval_dataloader:
# <EVAL_BATCH_START>
# <EVAL_BEFORE_FORWARD>
outputs, targets = model(batch)
# <EVAL_AFTER_FORWARD>
metrics.update(outputs, targets)
# <EVAL_BATCH_END>
# <EVAL_END>
# <EPOCH_CHECKPOINT>
# <FIT_END>
Attributes:
INIT: Invoked in the constructor of :class:`~.trainer.Trainer`. Model surgery (see
:mod:`~composer.utils.module_surgery`) typically occurs here.
AFTER_LOAD: Immediately after checkpoint is loaded in constructor of :class:`~.trainer.Trainer`.
FIT_START: Invoked at the beginning of each call to :meth:`.Trainer.fit`. Dataset transformations typically
occur here.
EPOCH_START: Start of an epoch.
BEFORE_DATALOADER: Immediately before the dataloader is called.
AFTER_DATALOADER: Immediately after the dataloader is called. Typically used for on-GPU dataloader transforms.
BATCH_START: Start of a batch.
BEFORE_TRAIN_BATCH: Before the forward-loss-backward computation for a training batch. When using gradient
accumulation, this is still called only once.
BEFORE_FORWARD: Before the call to ``model.forward()``.
This is called multiple times per batch when using gradient accumulation.
AFTER_FORWARD: After the call to ``model.forward()``.
This is called multiple times per batch when using gradient accumulation.
BEFORE_LOSS: Before the call to ``model.loss()``.
This is called multiple times per batch when using gradient accumulation.
AFTER_LOSS: After the call to ``model.loss()``.
This is called multiple times per batch when using gradient accumulation.
BEFORE_BACKWARD: Before the call to ``loss.backward()``.
This is called multiple times per batch when using gradient accumulation.
AFTER_BACKWARD: After the call to ``loss.backward()``.
This is called multiple times per batch when using gradient accumulation.
AFTER_TRAIN_BATCH: After the forward-loss-backward computation for a training batch. When using gradient
accumulation, this event still fires only once.
BATCH_END: End of a batch, which occurs after the optimizer step and any gradient scaling.
BATCH_CHECKPOINT: After :attr:`.Event.BATCH_END` and any batch-wise evaluation. Saving checkpoints at this
event allows the checkpoint saver to use the results from any batch-wise evaluation to determine whether
a checkpoint should be saved.
EPOCH_END: End of an epoch.
EPOCH_CHECKPOINT: After :attr:`.Event.EPOCH_END` and any epoch-wise evaluation. Saving checkpoints at this
event allows the checkpoint saver to use the results from any epoch-wise evaluation to determine whether
a checkpointshould be saved.
FIT_END: Invoked at the end of each call to :meth:`.Trainer.fit`. This event exists primarily for logging information
and flushing callbacks. Algorithms should not transform the training state on this event, as any changes will not
be preserved in checkpoints.
EVAL_START: Start of evaluation through the validation dataset.
EVAL_BATCH_START: Before the call to ``model.eval_forward(batch)``
EVAL_BEFORE_FORWARD: Before the call to ``model.eval_forward(batch)``
EVAL_AFTER_FORWARD: After the call to ``model.eval_forward(batch)``
EVAL_BATCH_END: After the call to ``model.eval_forward(batch)``
EVAL_END: End of evaluation through the validation dataset.
"""
INIT = 'init'
AFTER_LOAD = 'after_load'
FIT_START = 'fit_start'
EPOCH_START = 'epoch_start'
BEFORE_DATALOADER = 'before_dataloader'
AFTER_DATALOADER = 'after_dataloader'
BATCH_START = 'batch_start'
BEFORE_TRAIN_BATCH = 'before_train_batch'
BEFORE_FORWARD = 'before_forward'
AFTER_FORWARD = 'after_forward'
BEFORE_LOSS = 'before_loss'
AFTER_LOSS = 'after_loss'
BEFORE_BACKWARD = 'before_backward'
AFTER_BACKWARD = 'after_backward'
AFTER_TRAIN_BATCH = 'after_train_batch'
BATCH_END = 'batch_end'
BATCH_CHECKPOINT = 'batch_checkpoint'
EPOCH_END = 'epoch_end'
EPOCH_CHECKPOINT = 'epoch_checkpoint'
FIT_END = 'fit_end'
EVAL_START = 'eval_start'
EVAL_BATCH_START = 'eval_batch_start'
EVAL_BEFORE_FORWARD = 'eval_before_forward'
EVAL_AFTER_FORWARD = 'eval_after_forward'
EVAL_BATCH_END = 'eval_batch_end'
EVAL_END = 'eval_end'
PREDICT_START = 'predict_start'
PREDICT_BATCH_START = 'predict_batch_start'
PREDICT_BEFORE_FORWARD = 'predict_before_forward'
PREDICT_AFTER_FORWARD = 'predict_after_forward'
PREDICT_BATCH_END = 'predict_batch_end'
PREDICT_END = 'predict_end'
@property
def is_before_event(self) -> bool:
"""Whether the event is an "before" event.
An "before" event (e.g., :attr:`~Event.BEFORE_LOSS`) has a corresponding "after" event
(.e.g., :attr:`~Event.AFTER_LOSS`).
"""
return self in _BEFORE_EVENTS
@property
def is_after_event(self) -> bool:
"""Whether the event is an "after" event.
An "after" event (e.g., :attr:`~Event.AFTER_LOSS`) has a corresponding "before" event
(.e.g., :attr:`~Event.BEFORE_LOSS`).
"""
return self in _AFTER_EVENTS
@property
def canonical_name(self) -> str:
"""The name of the event, without before/after markers.
Events that have a corresponding "before" or "after" event share the same canonical name.
Example:
>>> Event.EPOCH_START.canonical_name
'epoch'
>>> Event.EPOCH_END.canonical_name
'epoch'
Returns:
str: The canonical name of the event.
"""
name: str = self.value
name = name.replace('before_', '')
name = name.replace('after_', '')
name = name.replace('_start', '')
name = name.replace('_end', '')
return name
@property
def is_predict(self) -> bool:
"""Whether the event is during the predict loop."""
return self.value.startswith('predict')
@property
def is_eval(self) -> bool:
"""Whether the event is during the eval loop."""
return self.value.startswith('eval')
_BEFORE_EVENTS = (Event.FIT_START, Event.EPOCH_START, Event.BEFORE_DATALOADER, Event.BATCH_START,
Event.BEFORE_TRAIN_BATCH, Event.BEFORE_FORWARD, Event.BEFORE_LOSS, Event.BEFORE_BACKWARD,
Event.EVAL_START, Event.EVAL_BATCH_START, Event.EVAL_BEFORE_FORWARD, Event.PREDICT_START,
Event.PREDICT_BATCH_START, Event.PREDICT_BEFORE_FORWARD)
_AFTER_EVENTS = (Event.EPOCH_END, Event.BATCH_END, Event.AFTER_DATALOADER, Event.AFTER_TRAIN_BATCH, Event.AFTER_FORWARD,
Event.AFTER_LOSS, Event.AFTER_BACKWARD, Event.EVAL_END, Event.EVAL_BATCH_END, Event.EVAL_AFTER_FORWARD,
Event.FIT_END, Event.PREDICT_END, Event.PREDICT_BATCH_END, Event.PREDICT_AFTER_FORWARD)
| composer-dev | composer/core/event.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Serialization interface used by checkpointing."""
from __future__ import annotations
from typing import Any, Dict
__all__ = ['Serializable']
class Serializable:
"""Interface for serialization; used by checkpointing."""
def state_dict(self) -> Dict[str, Any]:
"""Returns a dictionary representing the internal state.
The returned dictionary must be pickale-able via :func:`torch.save`.
Returns:
Dict[str, Any]: The state of the object.
"""
return {}
def load_state_dict(self, state: Dict[str, Any]) -> None:
"""Restores the state of the object.
Args:
state (Dict[str, Any]): The state of the object, as previously returned by :meth:`.state_dict`.
"""
pass
| composer-dev | composer/core/serializable.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Central components used by other modules.
Central parts of composer such as :class:`~.engine.Engine`, base class for critical components such as
:class:`~.algorithm.Algorithm` and :class:`~.callback.Callback` and other useful functionality such as
:class:`~.time.Timestamp` are implemented under core.
"""
from composer.core.algorithm import Algorithm
from composer.core.callback import Callback
from composer.core.data_spec import DataSpec, ensure_data_spec
from composer.core.engine import Engine, Trace
from composer.core.evaluator import Evaluator, ensure_evaluator, validate_eval_automicrobatching
from composer.core.event import Event
from composer.core.passes import AlgorithmPass
from composer.core.precision import Precision, get_precision_context
from composer.core.serializable import Serializable
from composer.core.state import State
from composer.core.time import Time, Timestamp, TimeUnit, ensure_time
from composer.core.types import JSON, Batch, BreakEpochException, Dataset, MemoryFormat, PyTorchScheduler, TrainerMode
__all__ = [
'Algorithm',
'AlgorithmPass',
'Callback',
'DataSpec',
'Dataset',
'ensure_data_spec',
'Engine',
'Trace',
'Evaluator',
'Event',
'Precision',
'get_precision_context',
'Serializable',
'State',
'Time',
'Timestamp',
'TimeUnit',
'ensure_time',
'ensure_evaluator',
'Batch',
'PyTorchScheduler',
'JSON',
'MemoryFormat',
'TrainerMode',
'BreakEpochException',
'validate_eval_automicrobatching',
]
| composer-dev | composer/core/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Specifications for operating and training on data."""
from __future__ import annotations
import collections.abc
import math
import textwrap
import warnings
from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Mapping, Optional, Sequence, Tuple, Union
import torch
import torch.utils.data
from torch.utils.data.distributed import DistributedSampler
from composer.utils import dist, ensure_tuple
if TYPE_CHECKING:
from composer.core.types import Batch
__all__ = ['DataSpec', 'ensure_data_spec']
def _split_list(l, microbatch_size: int):
if len(l) < microbatch_size:
warnings.warn(f'Cannot split list of length {len(l)} into batches of size {microbatch_size}. '
'As it is smaller, no splitting will be done. This may happen on the last batch '
'of a dataset if it is a smaller size than the microbatch size.')
microbatch_size = len(l)
num_microbatches = math.ceil(len(l) / microbatch_size)
# Note: this is to match the behavior of tensor.chunk, which is used in _split_tensor
chunked_microbatch_size = math.ceil(len(l) / num_microbatches)
return [l[start:start + chunked_microbatch_size] for start in range(0, len(l), chunked_microbatch_size)]
def _split_tensor(t, microbatch_size: int):
if len(t) < microbatch_size:
warnings.warn(f'Cannot split tensor of length {len(t)} into batches of size {microbatch_size}. '
'As it is smaller, no splitting will be done. This may happen on the last batch '
'of a dataset if it is a smaller size than the microbatch size.')
microbatch_size = len(t)
num_microbatches = math.ceil(len(t) / microbatch_size)
return t.chunk(num_microbatches)
def _split_mapping(m, microbatch_size: int):
chunked = {}
for k, v in m.items():
if isinstance(v, torch.Tensor):
chunked[k] = _split_tensor(v, microbatch_size)
elif isinstance(v, (List, Tuple)):
chunked[k] = _split_list(v, microbatch_size)
elif isinstance(v, Mapping):
chunked[k] = _split_mapping(v, microbatch_size)
elif isinstance(v, (int, float, str, bool)):
# Defer broadcasting primitives until we know num_chunks
pass
else:
raise ValueError(f'Unsupported batch type: {type(v)}.')
num_chunks = 1 # Default to 1 chunks if there are no tensors or everything is primitive
if len(chunked.keys()) != 0:
num_chunks = len(list(chunked.values())[0])
# Broadcast primitives to all chunks
for k, v in m.items():
if isinstance(v, (int, float, str, bool)):
chunked[k] = [v] * num_chunks
return [{k: v[idx] for k, v in chunked.items()} for idx in range(num_chunks)]
def _check_list_is_primitives(l):
"""Checks if all elements in a list are the same primitive type."""
if len(l) == 0:
return True
first_type = type(l[0])
if not isinstance(l[0], (int, float, str, bool)):
return False
for item in l:
if type(item) != first_type:
return False
return True
def _default_split_batch(batch: Any, microbatch_size: int) -> Sequence:
"""Splits batch into chunks of size `microbatch_size` for gradient accumulation.
Works with tensors, dictionaries of tensors, (x, y) tuples, and lists where ``batch`` is the 2nd dimension.
Args:
batch (Any): output from the dataloader.
microbatch_size (int): Size of microbatches to batch into.
"""
if isinstance(batch, torch.Tensor): # check for a single stack of tensors
return _split_tensor(batch, microbatch_size)
elif isinstance(batch, Mapping): # check for dictionary (hf style)
return _split_mapping(batch, microbatch_size)
elif isinstance(batch, (Tuple, list)) and _check_list_is_primitives(batch): # check for list of primitives
return _split_list(batch, microbatch_size)
elif isinstance(batch, (Tuple, List)): # check for batch on 2nd dimension
result = []
for item in batch:
if isinstance(item, torch.Tensor):
result.append(_split_tensor(item, microbatch_size))
elif isinstance(item, (List, Tuple)):
result.append(_split_list(item, microbatch_size))
else:
raise ValueError(f'Unsupported batch type: {type(item)}.')
return list(zip(*result))
raise NotImplementedError(
textwrap.dedent("""\
The default `split_fn` is unable to split the output of this dataloader. To enable microbatching,
please and specify a `DataSpec` with `split_batch` for your dataset."""))
class DataSpec:
"""Specifications for operating and training on data.
An example of constructing a :class:`DataSpec` object with a ``device_transforms``
callable (:class:`.NormalizationFn`) and then using it with :class:`~.Trainer`:
.. doctest::
>>> # In this case, we apply NormalizationFn
>>> # Construct DataSpec as shown below to apply this transformation
>>> from composer.datasets.utils import NormalizationFn
>>> CHANNEL_MEAN = (0.485 * 255, 0.456 * 255, 0.406 * 255)
>>> CHANNEL_STD = (0.229 * 255, 0.224 * 255, 0.225 * 255)
>>> device_transform_fn = NormalizationFn(mean=CHANNEL_MEAN, std=CHANNEL_STD)
>>> train_dspec = DataSpec(train_dataloader, device_transforms=device_transform_fn)
>>> # The same function can be used for eval dataloader as well
>>> eval_dspec = DataSpec(eval_dataloader, device_transforms=device_transform_fn)
>>> # Use this DataSpec object to construct trainer
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dspec,
... eval_dataloader=eval_dspec,
... optimizers=optimizer,
... max_duration="1ep",
... )
Args:
dataloader (Union[Iterable, torch.utils.data.DataLoader]): The dataloader, which can be any iterable that yields batches.
num_samples (int, optional): The total number of samples in an epoch, across all ranks. This field is used by
the :class:`.Timestamp` (training progress tracker). If not specified, then ``len(dataloader.dataset)`` is
used (if this property is available). Otherwise, the dataset is assumed to be unsized.
num_tokens (int, optional): The total number of tokens in an epoch. This field is used by the
:class:`.Timestamp` (training progress tracker).
device_transforms ((Batch) -> Batch, optional): Function called by the :class:`.Trainer` to modify the
batch once it has been moved onto the device. For example, this function can be used for GPU-based
normalization. It can modify the batch in-place, and it should return the modified batch. If not specified,
the batch is not modified.
split_batch ((Batch, int) -> Sequence[Batch], optional): Function called by the :class:`.Trainer` to
split a batch (the first parameter) into microbatches of a given size (the second parameter). If
the ``dataloader`` yields batches not of type :class:`torch.Tensor`, Mapping, Tuple, or List, then
this function must be specified.
get_num_samples_in_batch ((Batch) -> int, optional): Function that is called by the :class:`.Trainer`
to get the number of samples in the provided batch.
By default, if the batch contains tensors that all have the same 0th dim, then the value of the 0th dim will
be returned. If the batch contains tensors where the 0th dim differ, then this function must be specified.
get_num_tokens_in_batch ((Batch) -> int, optional): Function that is called by the :class:`.Trainer` to
get the number of tokens in the provided batch.
By default, it returns 0, meaning that number of tokens processed will not be tracked as a part of the
training progress tracking. This function must be specified to track the number of tokens processed during
training.
"""
def __init__(
self,
dataloader: Union[Iterable, torch.utils.data.DataLoader],
num_samples: Optional[int] = None,
num_tokens: Optional[int] = None,
device_transforms: Optional[Callable[[Batch], Batch]] = None,
split_batch: Optional[Callable[[Batch, int], Sequence[Batch]]] = None,
get_num_samples_in_batch: Optional[Callable[[Batch], int]] = None,
get_num_tokens_in_batch: Optional[Callable[[Batch], int]] = None,
) -> None:
self.dataloader: Union[Iterable, torch.utils.data.DataLoader] = dataloader
self.num_tokens = num_tokens
self.device_transforms = self._default_device_transforms if device_transforms is None else device_transforms
self.split_batch = _default_split_batch if split_batch is None else split_batch
self.get_num_samples_in_batch = self._default_get_num_samples_in_batch if get_num_samples_in_batch is None else get_num_samples_in_batch
self.get_num_tokens_in_batch = self._default_get_num_tokens_in_batch if get_num_tokens_in_batch is None else get_num_tokens_in_batch
if num_samples is not None:
self.num_samples = num_samples
else:
if isinstance(dataloader, torch.utils.data.DataLoader) and isinstance(dataloader.dataset,
collections.abc.Sized):
try:
self.num_samples = len(dataloader.dataset)
except (TypeError, NotImplementedError):
self.num_samples = None
else:
self.num_samples = None
if isinstance(dataloader, torch.utils.data.DataLoader):
if dataloader._iterator is not None:
raise ValueError(
('The dataloader has an active iterator. This could occur '
'if `persistent_workers=True` and the dataloader has already been iterated, '
'or if the dataloader is mid-epoch. It is required that the training dataloader '
'does not have an active iterator, so CPU dataset augmentations can be '
'correctly inserted. To fix, please do not iterate over the dataloader before passing it into '
'the Trainer.'))
world_size = dist.get_world_size()
# Check for Distributed Sampler if not using IterableDataset on more than 1 GPU
if world_size > 1 and not isinstance(dataloader.dataset, torch.utils.data.IterableDataset):
is_sampler_distributed = dataloader.sampler is not None and isinstance(
dataloader.sampler, DistributedSampler)
is_batch_sampler_distributed = dataloader.batch_sampler is not None and isinstance(
dataloader.batch_sampler, DistributedSampler)
if not is_sampler_distributed and not is_batch_sampler_distributed:
raise ValueError(
f'The world_size({world_size}) > 1 but dataloader does not use '
'DistributedSampler. This will cause all ranks to train on the same '
'data, removing any benefit from multi-GPU training. To resolve this, '
'create a Dataloader with DistributedSampler. For example, '
'DataLoader(..., sampler=composer.utils.dist.get_sampler(...)).'
'Alternatively, the process group can be instantiated with '
'composer.utils.dist.instantiate_dist(...) and DistributedSampler can '
'directly be created with DataLoader(..., sampler=DistributedSampler(...)). '
'For more information, see https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler.'
)
def _default_device_transforms(self, batch: Batch):
return batch
def _default_get_num_samples_in_batch(self, batch: Batch) -> int:
if isinstance(batch, torch.Tensor):
return batch.shape[0]
dim0_sizes = []
if isinstance(batch, (list, tuple)):
for tensors in batch:
for t in ensure_tuple(tensors):
if not hasattr(t, 'shape'):
raise ValueError('Unable to determine the batch size, batch contains'
f'an element of type {type(t)}, which does not have a'
'shape. Please use a DataSpec and provide a'
'`get_num_samples_in_batch(your_batch) -> int` method.')
dim0_sizes.append(t.shape[0])
elif isinstance(batch, dict):
dim0_sizes = [t.shape[0] for t in batch.values()]
if len(set(dim0_sizes)) == 1:
return dim0_sizes[0]
else:
raise NotImplementedError(
textwrap.dedent(f"""\
Cannot determine the batch size, as multiple Tensors of
different lengths were found in the batch: sizes in batch: {dim0_sizes}.
Please use a DataSpec and specify `get_num_samples_in_batch`."""))
def _default_get_num_tokens_in_batch(self, batch: Batch) -> int:
del batch # unused
return 0
def ensure_data_spec(dataloader: Union[DataSpec, Iterable, dict]) -> DataSpec:
"""Ensures that the ``dataloader`` is a :class:`.DataSpec`.
Args:
dataloader (DataSpec | Iterable | dict): A DataSpec, DataLoader, or Dict of DataSpec kwargs.
Returns:
DataSpec: A DataSpec
"""
if isinstance(dataloader, dict):
# treat as kwargs for DataSpec
dataloader = DataSpec(**dataloader)
if not isinstance(dataloader, DataSpec):
dataloader = DataSpec(dataloader)
return dataloader
| composer-dev | composer/core/data_spec.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Reference for common types used throughout the composer library.
Attributes:
Batch (Any): Alias to type Any.
A batch of data can be represented in several formats, depending on the application.
PyTorchScheduler (torch.optim.lr_scheduler._LRScheduler): Alias for base class of learning rate schedulers such
as :class:`torch.optim.lr_scheduler.ConstantLR`.
JSON (str | float | int | None | List['JSON'] | Dict[str, 'JSON']): JSON Data.
Dataset (torch.utils.data.Dataset[Batch]): Alias for :class:`torch.utils.data.Dataset`.
"""
from __future__ import annotations
from typing import Any, Dict, List, Union
import torch
import torch.utils.data
from composer.utils import StringEnum
__all__ = ['Batch', 'PyTorchScheduler', 'JSON', 'MemoryFormat', 'TrainerMode', 'BreakEpochException']
Batch = Any
Dataset = torch.utils.data.Dataset[Batch]
PyTorchScheduler = torch.optim.lr_scheduler._LRScheduler
JSON = Union[str, float, int, None, List['JSON'], Dict[str, 'JSON']]
class BreakEpochException(Exception):
"""Raising this exception will immediately end the current epoch.
If you're wondering whether you should use this, the answer is no.
"""
pass
class TrainerMode(StringEnum):
"""Enum to represent which mode the Trainer is in.
Attributes:
TRAIN: In training mode.
EVAL: In evaluation mode.
PREDICT: In predict mode.
"""
TRAIN = 'train'
EVAL = 'eval'
PREDICT = 'predict'
class MemoryFormat(StringEnum):
"""Enum class to represent different memory formats.
See :class:`torch.torch.memory_format` for more details.
Attributes:
CONTIGUOUS_FORMAT: Default PyTorch memory format represnting a tensor allocated with consecutive dimensions
sequential in allocated memory.
CHANNELS_LAST: This is also known as NHWC. Typically used for images with 2 spatial dimensions (i.e., Height and
Width) where channels next to each other in indexing are next to each other in allocated memory. For example, if
C[0] is at memory location M_0 then C[1] is at memory location M_1, etc.
CHANNELS_LAST_3D: This can also be referred to as NTHWC. Same as :attr:`CHANNELS_LAST` but for videos with 3
spatial dimensions (i.e., Time, Height and Width).
PRESERVE_FORMAT: A way to tell operations to make the output tensor to have the same memory format as the input
tensor.
"""
CONTIGUOUS_FORMAT = 'contiguous_format'
CHANNELS_LAST = 'channels_last'
CHANNELS_LAST_3D = 'channels_last_3d'
PRESERVE_FORMAT = 'preserve_format'
| composer-dev | composer/core/types.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Enum class for the numerical precision to be used by the model."""
import contextlib
import os
import textwrap
from typing import Generator, Union
import torch
from composer.utils import StringEnum
try:
import transformer_engine.pytorch as te
te_installed = True
except ImportError:
te_installed = False
__all__ = ['Precision', 'get_precision_context']
class Precision(StringEnum):
"""Enum class for the numerical precision to be used by the model.
Attributes:
FP32: Use 32-bit floating-point precision. Compatible with CPUs and GPUs.
AMP_FP16: Use :mod:`torch.cuda.amp` with 16-bit floating-point precision. Only compatible
with GPUs.
AMP_BF16: Use :mod:`torch.cuda.amp` with 16-bit BFloat precision.
AMP_FP8: Use :mod:`transformer_engine.pytorch.fp8_autocast` with 8-bit FP8 precison.
"""
FP32 = 'fp32'
AMP_FP16 = 'amp_fp16'
AMP_BF16 = 'amp_bf16'
AMP_FP8 = 'amp_fp8'
@contextlib.contextmanager
def get_precision_context(precision: Union[str, Precision]) -> Generator[None, None, None]:
"""Returns a context manager to automatically cast to a specific precision.
Args:
precision (str | Precision): Precision for the context
"""
precision = Precision(precision)
if precision == Precision.FP32:
if torch.cuda.is_available():
with torch.cuda.amp.autocast(False):
yield
else:
# Yield here to avoid warnings about cuda not being available
yield
elif precision == Precision.AMP_FP16:
# Retain compatibility with PyTorch < 1.10
with torch.cuda.amp.autocast(True):
yield
elif precision == Precision.AMP_BF16:
if torch.cuda.is_available():
with torch.cuda.amp.autocast(enabled=True, dtype=torch.bfloat16):
yield
else:
os.environ['XLA_USE_BF16'] = '1'
yield
elif precision == Precision.AMP_FP8:
if te_installed and torch.cuda.get_device_capability()[0] > 8:
from transformer_engine.common.recipe import DelayedScaling, Format
# These default values for fp8_recipe are taken from NVidia's docs. We may want to change
# these once we get a chance to do more convergence experiments.
# https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/examples/fp8_primer.html#id1
fp8_format = Format.HYBRID # E4M3 during forward pass, E5M2 during backward pass
fp8_recipe = DelayedScaling(fp8_format=fp8_format, amax_history_len=16, amax_compute_algo='max')
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
yield
else:
if te_installed:
raise RuntimeError('AMP_FP8 precision is used but current device does not support it.')
else:
raise ImportError(
textwrap.dedent("""\
AMP_FP8 precision is used but TransformerEngine is not installed.
After making sure torch is already installed, please install it using
pip install --upgrade git+https://github.com/NVIDIA/TransformerEngine.git@stable"""))
else:
raise ValueError(f'Unsupported precision: {precision}')
| composer-dev | composer/core/precision.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Engine is a coordinator for running algorithms and resolving ordering conflicts among them for composition.
.. currentmodule:: composer
The order in which algorithms are run matters significantly during composition. For example, the
:class:`.SelectiveBackprop` algorithm runs on the :attr:`.Event.AFTER_DATALOADER` event and must run before
any data augmentations. :class:`.Engine` runs re-ordering passes to resolve such ordering issues or conflicts.
These orderings are enforced by algorithm passes. The default passes registered to the Engine are found in
:mod:`composer.core.passes`. To register a new pass, use :meth:`.Engine.register_pass`, e.g.
.. testsetup::
# dummy algorithm
MyAlgorithm = None
.. doctest::
from composer import Engine, Algorithm, Event
from typing import Sequence
def run_last(algorithms: Sequence[Algorithm], event: Event) -> Sequence[Algorithm]:
algorithms = sorted(algorithms, key=lambda x: isinstance(x, MyAlgorithm))
engine = Engine(algorithm_passes=run_last)
.. note::
* An instance of :class:`.Engine` is automatically constructed by the :class:`.Trainer`
constructor. A user need not instantiate the :class:`.Engine` class. Instead, they should
specify algorithm_passes to the :class:`.Trainer` constructor, which will be passed to the
:class:`.Engine` constructor.
.. note::
* The design of :class:`.Engine` is subject to change in future releases
to accommodate more complexity as we investigate composition of algorithms.
To generate verbose debug logs for the engine, set the environment variable ``ENGINE_DEBUG=1``.
Trace
~~~~~
Traces record whether an algorithm ran at a particular step and event combination and also the order of such executions.
These are logged with the key ``<algorithm_name>/<event>``.
For example, the algorithm :class:`.LayerFreezing`, which runs at the end of every epoch on :attr:`.Event.EPOCH_END`,
will emit a series of traces:
.. code-block::
[STEP=0][layer_freezing/INIT=0]
[STEP=1][layer_freezing/EPOCH_START=0]
[STEP=1][layer_freezing/BATCH_START=0]
...
[STEP=2][layer_freezing/BATCH_START=0]
...
[STEP=3][layer_freezing/BATCH_START=0]
...
[STEP=3][layer_freezing/EPOCH_END=1] # <-- layer freezing ran on step 3 here!
"""
from __future__ import annotations
import atexit
import contextlib
import logging
import os
import textwrap
import weakref
from collections import OrderedDict
from dataclasses import dataclass
from typing import Callable, ContextManager, Dict, List, Optional, Sequence, Tuple, TypeVar, Union, cast
from composer.core import passes
from composer.core.algorithm import Algorithm
from composer.core.callback import Callback
from composer.core.event import Event
from composer.core.state import State
from composer.loggers import Logger, LoggerDestination
from composer.profiler import ProfilerAction
from composer.utils import ensure_tuple
log = logging.getLogger(__name__)
__all__ = ['Trace', 'Engine', 'Traces']
T = TypeVar('T')
_ALWAYS_RECORD_EVENTS = [Event.INIT, Event.FIT_START, Event.EPOCH_START, Event.EPOCH_END]
#: The default traces of an entire run is an OrderedDict.
#: The keys are of format ``<algorithm_name>/<event>`` (e.g., ``Blurpool/INIT``) and values are an instance of
#: :class:`Trace`.
Traces = Dict[str, 'Trace']
# Track whether atexit triggered _close(), which indicates whether the python process is shutting down
# If so, do not run close() again via __del__(), as Python machinery (e.g. the ability to do conditional
# imports) are destroyed between close() and __del__().
# Using a global variable instead of an instance variable as _close() is not bound to the instance
_did_atexit_run = False
def _set_atexit_ran():
global _did_atexit_run
_did_atexit_run = True
# Since atexit calls hooks in LIFO order, this hook will always be invoked after all atexit-triggered
# _close() calls are invoked
atexit.register(_set_atexit_ran)
def _get_default_passes():
return [
passes.sort_selective_backprop_first,
passes.sort_fused_layernorm_last,
passes.sort_low_precision_layernorm_last,
passes.set_filo_order,
passes.warn_if_multiple_loss_interpolation,
]
@dataclass
class Trace():
"""Record of an algorithm's execution.
Attributes:
name (str): The name of the algorithm.
event (Event): The current event.
exit_code (int | None): Optional return value from an algorithm. Default: None.
order (int | None): Order in which the algorithm was executed
in the list of algorithms. None means algorithm was not run.
run (bool): Whether the algorithm was run. Default: False
"""
name: str = ''
event: Optional[Event] = None
exit_code: Optional[int] = None
order: Optional[int] = None
run: bool = False
def _setup_trace(algorithms: Sequence[Algorithm], event: Event) -> Traces:
"""The default traces of an entire run is an OrderedDict.
The keys are of format ``<algorithm_name>/<event>`` (e.g., ``Blurpool/INIT``) and values are an instance of
:class:`Trace`.
"""
return OrderedDict([(f'{algo}/{event}', Trace(name=algo.__class__.__name__)) for algo in algorithms])
# Track which callbacks are already open, so it is possible to error and instruct the user to call
# previous_trainer.close() if necessary before attempting to reuse a callback
_OPEN_CALLBACKS = weakref.WeakSet()
class Engine():
"""Coordinator for running algorithms and resolving ordering conflicts among them for composition.
Args:
state (State): The initial :class:`.State` of the trainer. ``state`` will be modified in-place.
logger (Logger): A :class:`.Logger` instance to be used for logging algorithm and callback
specific metrics.
algorithm_passes ([AlgorithmPass | Tuple[AlgorithmPass, int] | Sequence[AlgorithmPass | Tuple[AlgorithmPass, int]], optional):
Optional list of passes to change order in which algorithms are applied. These passes are merged with the
default passes specified in :class:`.Engine`. If ``None``, then no additional passes will be used.
"""
def __init__(
self,
state: State,
logger: Logger,
algorithm_passes: Optional[Union[passes.AlgorithmPass, Tuple[passes.AlgorithmPass, int],
Sequence[Union[passes.AlgorithmPass, Tuple[passes.AlgorithmPass,
int]]]]] = None,
):
self.logger = logger
self.state = state
self._is_closed = False
self.algorithm_passes: List[passes.AlgorithmPass] = _get_default_passes()
if algorithm_passes is not None:
# Wrap in list if not already a list or if it's a length 2 list specifying a single
# call to register_pass with type [AlgorithmPass, int]
if not isinstance(algorithm_passes, list) or (len(algorithm_passes) == 2 and
isinstance(algorithm_passes[1], int)):
algorithm_passes = [algorithm_passes] # type: ignore wrapping list
algo_passes = algorithm_passes if isinstance(algorithm_passes, list) else [algorithm_passes]
for algo_pass in algo_passes:
algo_pass = ensure_tuple(algo_pass)
if len(algo_pass) == 1 and isinstance(algo_pass[0], Callable):
self.register_pass(algo_pass[0])
elif len(algo_pass) == 2 and isinstance(algo_pass[0], Callable) and isinstance(algo_pass[1], int):
self.register_pass(algo_pass[0], algo_pass[1])
else:
raise ValueError(
textwrap.dedent('Received invalid algorithm_pass. Expected either a single AlgorithmPass '
f'or a tuple of (AlgorithmPass, int), but received {algo_pass}.'))
atexit.register(self._close, state, logger)
def run_event(
self,
event: Union[Event, str],
) -> Traces:
"""Runs the sequence of algorithms and callbacks (see :class:`.Callback`).
Filters algorithms by calling each one's :meth:`.Algorithm.match` method, internally checks for conflicting
algorithms, then runs each algorithm's :meth:`.Algorithm.apply` method to make in-place changes to the
``state``.
The default order of execution for algorithms is determined by the provided list. However, :class:`.Engine` makes
changes to this order internally to resolve ordering conflicts.
Returns :data:`.Traces` of the execution, a dictionary with keys formatted as ``<algorithm_name>/<event>`` (e.g.,
``Blurpool/INIT``), and values are an instance of :class:`.Trace`.
Callbacks are always run after algorithms and do not return a trace.
This method can be called with either the :class:`.Event` enum member values or a string of the event name.
Examples:
>>> engine = Engine(state, logger)
>>> engine.run_event(Event.BEFORE_LOSS)
OrderedDict()
>>> # calling with a string of the event name also works
>>> engine.run_event('before_loss')
OrderedDict()
Args:
event (Event | str): The current :class:`.Event`. It can be the enum member values or a
string with the event value.
Returns:
traces (Traces): Ordered dictionary of trace for each algorithm.
"""
duration_marker = None
event = Event(event)
self._debug_log(event, 'Running event')
if self._is_closed:
raise RuntimeError(('The engine was already closed and therefore cannot be used again. '
'To fix, please create a new Engine (or Trainer)'))
if self.state.profiler is not None:
name = f'event/{event.canonical_name}'
if (event.is_before_event or event.is_after_event):
# if not part of an event pair (e.g. init), then don't record an event here
if event in _ALWAYS_RECORD_EVENTS:
actions = [ProfilerAction.ACTIVE, ProfilerAction.WARMUP, ProfilerAction.SKIP]
else:
actions = [ProfilerAction.ACTIVE, ProfilerAction.WARMUP]
duration_marker = self.state.profiler.marker(name, actions=actions)
if event.is_after_event and duration_marker is not None:
duration_marker.finish()
self._assert_dataloader_and_duration_set(self.state, event)
if event == Event.INIT:
# For the INIT event, run the callbacks first to initialize the loggers
# For other events, run the algorithms first, so the callbacks have the state
# after algorithms modify it
self._check_for_still_open_callbacks()
# Run loggers first, so they can be initialized before any callbacks that may
# use them.
self._run_loggers(event)
self._run_nonlogger_callbacks(event)
traces = self._run_algorithms(event)
else:
traces = self._run_algorithms(event)
# Run callbacks first, so any log calls from a callback that are executed lazily
# get registered before they are flushed by the logger itself.
self._run_nonlogger_callbacks(event)
self._run_loggers(event)
if event.is_before_event and duration_marker is not None:
duration_marker.start()
return traces
def run_marker_only_event(
self,
event: Union[Event, str],
) -> None:
"""Runs the marker for an event if the profiler is enabled.
This is primarily used to complete the dataloader marker at the end of the dataloader. In
this scenario, the dataloader marker has started from Event.BEFORE_DATALOADER, but
Event.AFTER_DATALOADER cannot be called as no batch was yielded from the dataloader.
Args:
event (Event | str): The current :class:`.Event`. It can be the enum member values or a
string with the event value.
"""
duration_marker = None
event = Event(event)
if self._is_closed:
raise RuntimeError(('The engine was already closed and therefore cannot be used again. '
'To fix, please create a new Engine (or Trainer)'))
if self.state.profiler is not None:
name = f'event/{event.canonical_name}'
if (event.is_before_event or event.is_after_event):
# if not part of an event pair (e.g. init), then don't record an event here
if event in _ALWAYS_RECORD_EVENTS:
actions = [ProfilerAction.ACTIVE, ProfilerAction.WARMUP, ProfilerAction.SKIP]
else:
actions = [ProfilerAction.ACTIVE, ProfilerAction.WARMUP]
duration_marker = self.state.profiler.marker(name, actions=actions)
if event.is_after_event and duration_marker is not None:
duration_marker.finish()
if event.is_before_event and duration_marker is not None:
duration_marker.start()
def register_pass(self, algorithm_pass: passes.AlgorithmPass, index: int = -1):
"""Registers an algorithm pass with the Engine.
Args:
algorithm_pass (passes.AlgorithmPass): A method that maps a list of
algorithms to a list of algorithms.
index (int, optional): The index to insert into the list of passes.
If -1 (default), the pass will be insert to the end of the list.
"""
if index == -1:
index = len(self.algorithm_passes)
self.algorithm_passes.insert(index, algorithm_pass)
@staticmethod
def _assert_dataloader_and_duration_set(state: State, event: Event):
# correctness checks that dataloader and max duration need to be set for certain events
if event != Event.INIT and event != Event.AFTER_LOAD: # dataloader should be set on all events expect INIT/AFTER_LOAD
assert state.dataloader is not None, f'The trainer should have set state.dataloader for event {event}.'
if event != Event.INIT and event != Event.AFTER_LOAD and not event.is_predict and not event.is_eval:
assert state.max_duration is not None, f'The trainer should have set state.max_duration for event {event}.'
def _run_algorithms(
self,
event: Event,
) -> Traces:
algorithms_to_run = [algo for algo in self.state.algorithms if algo.match(event, self.state)]
# apply algorithm passes
algorithms_to_run = self._compile(algorithms_to_run, event)
trace = _setup_trace(algorithms_to_run, event)
for order, algorithm in enumerate(algorithms_to_run):
marker = None
if self.state.profiler is not None:
marker = self.state.profiler.marker(f'algorithm/{algorithm.__class__.__name__}/event/{event.value}',
categories=[
event.value,
algorithm.__class__.__name__,
])
ctx = cast(ContextManager, contextlib.nullcontext()) if marker is None else marker
with ctx:
self._debug_log(event, f'Running algorithm {type(algorithm).__name__}')
exit_code = algorithm.apply(event, self.state, self.logger)
trace_key = f'{algorithm}/{event}'
trace[trace_key] = Trace(name=algorithm.__class__.__name__,
event=event,
exit_code=exit_code,
order=order,
run=True)
if self.logger is not None:
if len(trace) > 0:
self.logger.log_traces(
{f'algorithm_traces/{tr.name}/{tr.event}': 1 if tr.run else 0 for _, tr in trace.items()})
return trace
def _compile(
self,
algorithms_to_run: Sequence[Algorithm],
event: Event,
) -> Sequence[Algorithm]:
"""Runs compilation passes that modify the order and content of a list of algorithms.
Currently, runs the algorithms in a FILO queue for the ``before_`` and ``after_`` events. For example,
algorithms will run in order ABCD during ``before_loss``, and in DCBA during ``after_loss``. The motivation
here is that algorithms can 'undo' their effects upon the exit of an event. Note that events that
have the pattern ``_start`` or ``_end`` will still run with ABCD order.
The intent of this method is to eventually store and handle other algorithms' collisions and ordering
requirements.
Args:
algorithms_to_run(Sequence[Algorithm]): Sequence of algorithms.
event (Event): The current event.
Returns:
Sequence[Algorithm]: Modified sequence of algorithms.
"""
# run reordering passes on the algorithms
for passes in self.algorithm_passes:
algorithms_to_run = passes(algorithms_to_run, event)
return algorithms_to_run
def _check_for_still_open_callbacks(self):
# Some callbacks may be open from a previous training run
# If so, error and instruct the user that they must call `trainer.close()`
# so callbacks can clean up and reset their state properly
for cb in self.state.callbacks:
# If it's not in the set, then the callback is new, so it's closed by definition
if cb in _OPEN_CALLBACKS:
raise RuntimeError(
('Cannot create a new trainer with an open callback or logger from a previous trainer. '
'To fix, call trainer.close() before creating this new trainer to ensure that all '
'callbacks or loggers shut down properly.'))
_OPEN_CALLBACKS.add(cb)
def _run_callbacks(
self,
event: Union[Event, str],
callbacks: Optional[Sequence[Callback]] = None,
):
"""Runs a sequence of callbacks by calling the function for an event.
Args:
event (Event | str): The current :class:`.Event`.
callbacks (Callback | Sequence[Callback], optional): The callbacks to run.
If None is specified, will use all the callback in state (self.state).
"""
event = Event(event)
callbacks = self.state.callbacks if callbacks is None else callbacks
for cb in callbacks:
marker = None
if self.state.profiler is not None:
marker = self.state.profiler.marker(f'callback/{cb.__class__.__name__}/event/{event.value}',
categories=[
event.value,
cb.__class__.__name__,
])
ctx = cast(ContextManager, contextlib.nullcontext()) if marker is None else marker
with ctx:
self._debug_log(event, f'Running callback {type(cb).__name__}')
cb.run_event(event, self.state, self.logger)
def _run_loggers(self, event: Union[Event, str]):
loggers = [callback for callback in self.state.callbacks if isinstance(callback, LoggerDestination)]
self._run_callbacks(event, loggers)
def _run_nonlogger_callbacks(self, event: Union[Event, str]):
callbacks = [callback for callback in self.state.callbacks if not isinstance(callback, LoggerDestination)]
self._run_callbacks(event, callbacks)
def __del__(self):
global _did_atexit_run
if _did_atexit_run or self._is_closed:
# Do not attempt to shutdown again, since close() already ran via __atexit__ or was already invoked
return
self.close()
atexit.unregister(_set_atexit_ran)
atexit.unregister(self._close)
def _debug_log(self, event: Event, msg: str):
"""Helper to include timestamp and event info in log messages."""
timestamp = f'[ep={int(self.state.timestamp.epoch)}][ba={int(self.state.timestamp.batch)}]'
# for eval or pr
if event.is_eval:
timestamp += f'[eval_ba={int(self.state.eval_timestamp.batch)}]'
if event.is_predict:
timestamp += f'[predict_ba={int(self.state.predict_timestamp.batch)}]'
timestamp += f'[event={event.name}]'
if os.environ.get('ENGINE_DEBUG', None):
log.debug(f'{timestamp}: {msg}')
def close(self) -> None:
"""Shutdown the engine.
As part of the shutdown procedure, :meth:`.Callback.close` and :meth:`.Callback.post_close` are invoked
for each callback. Note that :meth:`.Callback.post_close` is invoked only for callbacks that did not raise
an exception during :meth:`.Callback.close`.
This method does not re-raise any exceptions from :meth:`.Callback.close` and :meth:`.Callback.post_close`.
Instead, these exceptions are logged as errors.
"""
self._close(self.state, self.logger)
# The self._is_closed flag would not be set if `_close` is called via atexit
# However, in these cases, the engine would never be used again, as Python is shutting
# down. It is only required to set the flag if the user manually calls `close()` and still holds
# a reference to the engine.
self._is_closed = True
@staticmethod
def _close(state: State, logger: Logger):
"""The actual shutdown logic, as a static method, so the underlying engine can still be garbage collected."""
log.debug('Closing the engine')
callback_to_has_exception: Dict[Callback, bool] = {}
for callback in state.callbacks:
try:
log.debug('Closing callback %s', type(callback).__name__)
callback.close(state, logger)
except Exception as e:
log.error(
f'Error running {callback.__class__.__name__}.close(). Skipping {callback.__class__.__name__}.post_close().',
exc_info=e,
stack_info=True)
callback_to_has_exception[callback] = True
else:
callback_to_has_exception[callback] = False
for callback in state.callbacks:
if callback_to_has_exception[callback] is False:
try:
log.debug('Post-closing callback %s', type(callback).__name__)
callback.post_close()
except Exception as e:
log.error(f'Error running {callback.__class__.__name__}.post_close().', exc_info=e, stack_info=True)
else:
_OPEN_CALLBACKS.discard(callback)
# Try to shut down any persistent workers
try:
state.train_dataloader._iterator._shutdown_workers() # type: ignore [reportGeneralTypeIssues]
except:
pass
| composer-dev | composer/core/engine.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Algorithm Passes reorder or modify the execution of algorithms by the Engine.
The order in which algorithms are run matters significantly during composition. For example, the
:class:`.SelectiveBackprop` algorithm runs on the :attr:`.Event.AFTER_DATALOADER` event and must run before
any data augmentations. :class:`.Engine` runs re-ordering passes to resolve such ordering issues or conflicts.
These modifications are represented as algorithm passes, which are functions that modify a list of algorithms.
For example, an algorithm pass that ensures a certain algorithm runs last, would be implemented as:
.. code-block:: python
def run_last(algorithms: Sequence[Algorithm], event: Event) -> Sequence[Algorithm]:
algorithms = sorted(algorithms, key=lambda x: isinstance(x, MyAlgorithm))
The passes in this module are registered by default into :class:`.Engine`.
"""
import warnings
from typing import Any, Callable, Sequence, TypeVar
from composer.core.algorithm import Algorithm
from composer.core.event import Event
T = TypeVar('T')
AlgorithmPass = Callable[[Sequence[Algorithm], Event], Sequence[Algorithm]]
def sort_to_front(list_to_sort: Sequence[T], cls: Any) -> Sequence[T]:
"""Helper function to sort instances of a provided class to the front.
Example:
.. testsetup::
from composer.core.passes import sort_to_front
.. doctest::
>>> sort_to_front([1, 'b', 3], str)
['b', 1, 3]
Args:
list_to_sort: list of objects to sort
cls: sorts all objects of this class to the front
Returns:
sorted_list: Sorted List
"""
return sorted(list_to_sort, key=lambda x: not isinstance(x, cls))
def sort_to_back(list_to_sort: Sequence[T], cls: Any) -> Sequence[T]:
"""Helper function to sort instances of a provided class to the back.
Example:
.. testsetup::
from composer.core.passes import sort_to_back
.. doctest::
>>> sort_to_back([1, 'b', 3], str)
[1, 3, 'b']
Args:
list_to_sort: list of objects to sort
cls: sorts all objects of this class to the back
Returns:
sorted_list: Sorted List
"""
return sorted(list_to_sort, key=lambda x: isinstance(x, cls))
def sort_selective_backprop_first(algorithms: Sequence[Algorithm], event: Event) -> Sequence[Algorithm]:
"""Selective Backprop should run before any algorithms modify the loss.
:class:`.SelectiveBackprop` runs after the dataloader returns the batch and executes an extra forward pass to rank
and prune the examples in the batch by loss. To ensure a clean estimate of loss, :class:`.SelectiveBackprop` should
run before any other data augmentations (e.g., :class:`.MixUp`) on the :attr:`.Event.AFTER_DATALOADER` event.
"""
from composer.algorithms import SelectiveBackprop
return sort_to_front(algorithms, cls=SelectiveBackprop)
def sort_fused_layernorm_last(algorithms: Sequence[Algorithm], event: Event) -> Sequence[Algorithm]: #noqa: D403
"""FusedLayerNorm should run after other algorithms that add LayerNorms (e.g. GatedLinearUnits).
This ensures that all LayerNorms are converted to optimized fused versions.
"""
from composer.algorithms import FusedLayerNorm
return sort_to_back(algorithms, cls=FusedLayerNorm)
def sort_low_precision_layernorm_last(algorithms: Sequence[Algorithm],
event: Event) -> Sequence[Algorithm]: #noqa: D403
"""LowPrecisionLayerNorm should run after other algorithms that add LayerNorms (e.g. GatedLinearUnits).
This ensures that all LayerNorms are converted to the intended precision.
"""
from composer.algorithms import LowPrecisionLayerNorm
return sort_to_back(algorithms, cls=LowPrecisionLayerNorm)
def set_filo_order(algorithms: Sequence[Algorithm], event: Event) -> Sequence[Algorithm]:
"""Establish a FILO order of algorithms ``before_`` and ``after_`` events.
For the events that follow the ``before_*`` and ``after_*`` pattern (e.g., :attr:`.Event.BEFORE_LOSS`
and :attr:`.Event.AFTER_LOSS), the ordering of algorithms is reversed for the ``after_*`` events.
For example, four given algorithms ``A``, ``B``, ``C``, and ``D`` will run in ``ABCD`` ordering on
the ``before_*`` event while ``DCBA`` ordering on the ``after_*`` event.
This allows algorithms to "clean up" their changes. For example, :class:`.LabelSmoothing` will smooth the labels
upon the :attr:`.Event.BEFORE_LOSS` event and then restore the original unsmoothed labels on the
:attr:`.Event.AFTER_LOSS` event.
Events with the pattern ``_start`` or ``_end`` will not be affected.
"""
if event.name.startswith('AFTER_'):
return list(reversed(algorithms))
return algorithms
def warn_if_multiple_loss_interpolation(algorithms: Sequence[Algorithm], event: Event) -> Sequence[Algorithm]:
"""Multiple algorithms that interpolate the loss may have unexpected behavior."""
from composer.algorithms.warnings import NotIntendedUseWarning
is_interpolate = [a for a in algorithms if hasattr(a, 'interpolate_loss') and a.interpolate_loss] # type: ignore
if len(is_interpolate) > 1:
warnings.warn(
NotIntendedUseWarning(
f'Multiple algorithms interpolating the loss can lead to unexpected behavior: {is_interpolate}'))
return algorithms
| composer-dev | composer/core/passes.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A wrapper for a dataloader to include metrics that apply to a specific dataset."""
from __future__ import annotations
import math
import textwrap
import warnings
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
from composer.core.data_spec import DataSpec, ensure_data_spec
from composer.core.event import Event
from composer.core.state import State
from composer.core.time import Time, TimeUnit
from composer.devices import Device, DeviceGPU
__all__ = ['Evaluator', 'evaluate_periodically', 'ensure_evaluator', 'validate_eval_automicrobatching']
def evaluate_periodically(eval_interval: Union[str, Time, int], eval_at_fit_end: bool = True):
"""Helper function to generate an evaluation interval callable.
Args:
eval_interval (str | Time | int): A :class:`.Time` instance or time string, or integer in epochs,
representing how often to evaluate. Set to ``0`` to disable evaluation.
eval_at_fit_end (bool): Whether to evaluate at the end of training, regardless of `eval_interval`.
Default: True
Returns:
(State, Event) -> bool: A callable for the ``eval_interval`` argument of an
:class:`.Evaluator`.
"""
if isinstance(eval_interval, int):
eval_interval = Time(eval_interval, TimeUnit.EPOCH)
if isinstance(eval_interval, str):
eval_interval = Time.from_timestring(eval_interval)
if eval_interval.unit not in (TimeUnit.EPOCH, TimeUnit.BATCH, TimeUnit.DURATION):
raise ValueError('The `eval_interval` must have units of EPOCH, BATCH, DURATION or be a function.')
last_batch_seen = -1
def should_eval(state: State, event: Event):
# `TimeUnit.Duration` value is a float from `[0.0, 1.0)`
if not eval_interval.unit == TimeUnit.DURATION and int(eval_interval) <= 0:
return False
nonlocal last_batch_seen # required to use the last_batch_seen from the outer function scope
# if requested, evaluate at the end of training, as long as the length of training is specified.
if eval_at_fit_end and event == Event.FIT_END and state.timestamp.batch != last_batch_seen:
return True
if eval_interval.unit == TimeUnit.EPOCH and int(
state.timestamp.epoch) % int(eval_interval) == 0 and event == Event.EPOCH_END:
last_batch_seen = state.timestamp.batch
return True
if eval_interval.unit == TimeUnit.BATCH and int(
state.timestamp.batch) % int(eval_interval) == 0 and event == Event.BATCH_END:
last_batch_seen = state.timestamp.batch
return True
if eval_interval.unit == TimeUnit.DURATION:
assert state.max_duration is not None, 'max_duration should not be None'
if state.dataloader_len is None:
raise RuntimeError(
f'Evaluation interval of type `dur` or {TimeUnit.DURATION} requires the dataloader to be sized.')
if state.max_duration.unit == TimeUnit.EPOCH and int(
state.timestamp.batch) % math.ceil(state.max_duration.value * float(eval_interval) *
state.dataloader_len) == 0 and event == Event.BATCH_END:
last_batch_seen = state.timestamp.batch
return True
elif state.max_duration.unit == TimeUnit.BATCH and int(state.timestamp.batch) % math.ceil(
state.max_duration.value * eval_interval.value) == 0 and event == Event.BATCH_END:
last_batch_seen = state.timestamp.batch
return True
elif state.max_duration.unit == TimeUnit.SAMPLE and event == Event.BATCH_END:
# If last sample in batch is not evenly divisible by eval_interval, perform evaluation in next batch
if int(state.timestamp.batch) > 0:
samples_in_a_batch = int(state.timestamp.sample) // int(state.timestamp.batch)
if int(state.timestamp.sample) // math.ceil(state.max_duration.value * eval_interval) != int(
state.timestamp.sample - samples_in_a_batch) // math.ceil(
state.max_duration.value * eval_interval):
last_batch_seen = state.timestamp.batch
return True
elif state.max_duration.unit == TimeUnit.TOKEN and event == Event.BATCH_END:
raise ValueError(f'Evaluation interval of type `dur` is not supported yet for max_duration as `tok`')
return False
return should_eval
class Evaluator:
"""A wrapper for a dataloader to include metrics that apply to a specific dataset.
For example, :class:`.CrossEntropyLoss` metric for NLP models.
.. doctest::
>>> eval_evaluator = Evaluator(
... label='myEvaluator',
... dataloader=eval_dataloader,
... metric_names=['MulticlassAccuracy']
... )
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_evaluator,
... optimizers=optimizer,
... max_duration='1ep',
... )
Args:
label (str): Name of the Evaluator.
dataloader (DataSpec | Iterable | Dict[str, Any]): Iterable that yields batches, a :class:`.DataSpec`
for evaluation, or a Dict of :class:`.DataSpec` kwargs.
metric_names: The list of metric names to compute.
Each value in this list can be a regex string (e.g. "MulticlassAccuracy", "f1" for "BinaryF1Score",
"Top-." for "Top-1", "Top-2", etc). Each regex string will be matched against the keys of the dictionary returned
by ``model.get_metrics()``. All matching metrics will be evaluated.
By default, if left blank, then all metrics returned by ``model.get_metrics()`` will be used.
subset_num_batches (int, optional): The maximum number of batches to use for each evaluation. Defaults to ``None``,
which means that the ``eval_subset_num_batches`` parameter from the :class:`.Trainer` will be used.
Set to ``-1`` to evaluate the entire ``dataloader``.
eval_interval (Time | int | str | (State, Event) -> bool, optional): An integer,
which will be interpreted to be epochs, a str (e.g. ``1ep``, or ``10ba``), a :class:`.Time` object, or a callable.
Defaults to ``None``, which means that the ``eval_interval`` parameter from the :class:`.Trainer` will be used.
If an integer (in epochs), :class:`.Time` string, or :class:`.Time` instance, the evaluator will be run
with this frequency. :class:`.Time` strings or :class:`.Time` instances must have units of
:attr:`.TimeUnit.BATCH` or :attr:`.TimeUnit.EPOCH`.
Set to ``0`` to disable evaluation.
If a callable, it should take two arguments (:class:`.State`, :class:`.Event`) and return a bool
representing whether the evaluator should be invoked. The event will be either :attr:`.Event.BATCH_END`
or :attr:`.Event.EPOCH_END`.
When specifying ``eval_interval``, the evaluator(s) are also run at the ``Event.FIT_END`` if it doesn't
evenly divide the training duration.
device_eval_microbatch_size (int, optional): The number of samples to use for each microbatch when evaluating.
If set to ``auto``, dynamically decreases device_eval_microbatch_size if microbatch is too large for GPU.
If None, sets `device_eval_microbatch_size` to per rank batch size. (default: ``None``)
"""
def __init__(
self,
*,
label: str,
dataloader: Union[DataSpec, Iterable, Dict[str, Any]],
metric_names: Optional[List[str]] = None,
subset_num_batches: Optional[int] = None,
eval_interval: Optional[Union[int, str, Time, Callable[[State, Event], bool]]] = None,
device_eval_microbatch_size: Optional[Union[int, str]] = None,
):
self.label = label
self.dataloader = ensure_data_spec(dataloader)
self.metric_names = []
if metric_names is not None:
if not isinstance(metric_names, list):
raise ValueError(f'``metric_names`` should be a list of strings, not a {type(metric_names)}')
self.metric_names = metric_names
self.subset_num_batches = subset_num_batches
self._eval_interval = None
self.eval_interval = eval_interval
self.auto_microbatching = _is_auto_microbatching(device_eval_microbatch_size)
self.device_eval_microbatch_size = _get_initial_device_eval_microbatch_size(
device_eval_microbatch_size,
self.auto_microbatching,
self.dataloader.dataloader,
)
@property
def eval_interval(self):
return self._eval_interval
@eval_interval.setter
def eval_interval(self, eval_interval: Optional[Union[int, str, Time, Callable[[State, Event], bool]]]):
if eval_interval is None:
self._eval_interval = None
elif not callable(eval_interval):
self._eval_interval = evaluate_periodically(eval_interval)
else:
self._eval_interval = eval_interval
def ensure_evaluator(evaluator: Union[Evaluator, DataSpec, Iterable, Dict[str, Any]], default_metric_names: List[str]):
"""Ensure that ``evaluator`` is an :class:`.Evaluator`.
Args:
evaluator (Evaluator | DataSpec | Iterable | Dict[str, Any]): A dataloader,
:class:`.DataSpec` instance, dictionary of :class:`.DataSpec` kwargs, or existing evaluator.
default_metric_names (List[str]): The names of the metrics for the ``evaluator``,
if a dataloader was specified.
Returns:
Evaluator: An evaluator.
"""
if isinstance(evaluator, Evaluator):
return evaluator
else:
return Evaluator(
label='eval',
dataloader=evaluator,
metric_names=default_metric_names,
)
def validate_eval_automicrobatching(auto_microbatching: bool, device: Device):
"""Ensure automicrobatching is only on GPU.
Unlike `device_train_microbatch_size`, this validation must be done separately from the
`_is_auto_microbatching` check because `device` is not available during `Evaluator`
initialization.
"""
if auto_microbatching and not isinstance(device, DeviceGPU):
raise ValueError(
'Can only use adaptive device_eval_microbatch_size on GPU. Please set device_eval_microbatch_size >= 1.')
def _is_auto_microbatching(device_eval_microbatch_size: Optional[Union[int, str]]):
if device_eval_microbatch_size == 'auto':
warnings.warn(("Setting `device_eval_microbatch_size='auto'` is an experimental feature which may cause "
'uncaught Cuda Out of Memory errors. In this case, please manually '
'set device_eval_microbatch_size explicitly to an integer instead.'))
return True
else:
return False
def _get_initial_device_eval_microbatch_size(device_eval_microbatch_size: Optional[Union[int, str]],
auto_microbatching: bool, dataloader: Iterable) -> int:
"""Sets initial value of device_eval_microbatch_size.
If auto_microbatching, sets initial `device_eval_microbatch_size` to per rank batch size.
"""
if auto_microbatching or device_eval_microbatch_size is None:
try:
batch_size = getattr(dataloader, 'batch_size')
except AttributeError as e:
if auto_microbatching:
raise AttributeError(
"`device_eval_microbatch_size='auto'` requires the `dataloader` to have a `batch_size` attribute."
) from e
else:
raise AttributeError(
textwrap.dedent(
'`device_eval_microbatch_size` is not set and `dataloader` does not have a `batch_size` attribute. '
'Please either set `device_eval_microbatch_size` or `dataloader.batch_size`.')) from e
return batch_size
elif isinstance(device_eval_microbatch_size, int):
return device_eval_microbatch_size
else:
raise ValueError("device_eval_microbatch_size must be an int or ``'auto'``")
| composer-dev | composer/core/evaluator.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The state of the trainer."""
from __future__ import annotations
import collections.abc
import logging
import textwrap
import warnings
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Sequence, Union, cast
import torch
import torch.nn.modules.utils
from packaging import version
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Optimizer
from torch.utils.data import DataLoader, Dataset
from torchmetrics import Metric
from torchmetrics.metric import jit_distributed_available
from composer.core.data_spec import DataSpec
from composer.core.event import Event
from composer.core.precision import Precision
from composer.core.serializable import Serializable
from composer.core.time import Time, Timestamp, TimeUnit
from composer.devices import Device
from composer.utils import batch_get, batch_set, dist, ensure_tuple, get_composer_env_dict, is_model_deepspeed
if TYPE_CHECKING:
import deepspeed
import composer.core.types as types
from composer.core.algorithm import Algorithm
from composer.core.callback import Callback
from composer.core.evaluator import Evaluator
from composer.core.passes import AlgorithmPass
from composer.loggers import Logger
from composer.profiler import Profiler
__all__ = ['State']
log = logging.getLogger(__name__)
@contextmanager
def fsdp_state_dict_type_context(module: torch.nn.Module, state_dict_type: str = 'full'):
"""Context manager for materializing or loading an fsdp module's state dict.
Args:
module (torch.nn.Module): The torch module that you want to call `state_dict()`
or `load_state_dict()` on.
state_dict_type (str, optional): which of the three state dict types you want to use.
choices are ['full', 'sharded', 'local']. Defaults to 'full'.
* 'full': the full, unsharded state dict materialized only on rank 0 with cpu_offload if necessary
* 'local': the sharded, flattened state_dict, where each rank only gets a single shard.
* 'sharded': the sharded, unflattened state_dict, where each rank only gets a single shard.
See torch.distributed.fsdp.StateDictType for more info.
Raises:
RuntimeError: if your torch version is earlier than 1.13.0 because FSDP is not available for those versions.
NotImplementedError: if you specify a state_dict_type not in ['full', 'sharded', 'local'].
"""
if version.parse(torch.__version__) < version.parse('1.13.0'):
raise RuntimeError('To use FSDP with Composer, you must use torch>=1.13.0.')
from torch.distributed.fsdp import FullStateDictConfig
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import LocalStateDictConfig, StateDictType
# torch forgot to put ShardedStateDictConfig in torch/distributed/fsdp/__init__.py, so we
# have to import it this way.
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardedStateDictConfig
# Full is the full monolithic state dict materialized in memory on just rank 0
# with offloading to cpu if necessary
if state_dict_type == 'full':
state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
fsdp_state_dict_type = StateDictType.FULL_STATE_DICT
# Sharded is sharded state dict, but unflattened parameters (not useful for FSDP, but
# useful if you plan to use the state dict outside of FSDP).
elif state_dict_type == 'sharded':
state_dict_config = ShardedStateDictConfig()
fsdp_state_dict_type = StateDictType.SHARDED_STATE_DICT
# Local is the FSDP standard sharded, flattened parameters. This is what the parameters
# are formatted to for a single rank's FSDP module.
elif state_dict_type == 'local':
state_dict_config = LocalStateDictConfig()
fsdp_state_dict_type = StateDictType.LOCAL_STATE_DICT
else:
raise NotImplementedError(f'No valid FSDP state_dict_type for {state_dict_type}')
with FSDP.state_dict_type(module, state_dict_type=fsdp_state_dict_type, state_dict_config=state_dict_config):
yield
def fsdp_get_optim_state_dict(model: torch.nn.Module,
optim: torch.optim.Optimizer,
state_dict_type: str = 'full') -> Dict[str, Any]:
"""Materializes a given model's optimizer's state_dict.
Args:
model (torch.nn.Module): The model that the optimizer corresponds to.
optim (torch.optim.Optimizer): The optimizer that you want a state dict for.
state_dict_type (str, optional): which of the three state dict types you want to use.
choices are ['full', 'sharded', 'local']. Defaults to 'full'.
* 'full': the full, unsharded state dict materialized only on rank 0
* 'local': the sharded, flattened state_dict, where each rank only gets a single shard.
* 'sharded': the sharded, unflattened state_dict, where each rank only gets a single shard.
Raises:
RuntimeError: if your torch version is earlier than 1.13.0 because FSDP is not available for those versions.
NotImplementedError: if you specify a state_dict_type not in ['full', 'sharded', 'local'].
Returns:
Dict[str, Any]: The state_dict for the given optimizer.
"""
if version.parse(torch.__version__) < version.parse('1.13.0'):
raise RuntimeError('To use FSDP with Composer, you must use torch>=1.13.0.')
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
if state_dict_type == 'full':
# Converts local state dict to full.
return FSDP.full_optim_state_dict(model=model, optim=optim)
elif state_dict_type == 'sharded':
# Converts local state dict to sharded.
return FSDP.sharded_optim_state_dict(model=model, optim=optim)
elif state_dict_type == 'local':
# State dict is already local, so just return state dict.
return optim.state_dict()
else:
raise NotImplementedError(f'No valid FSDP state_dict_type for {state_dict_type}')
def get_fsdp_sharded_optim_state_dict(full_optim_state_dict: Dict[str, Any], model: torch.nn.Module):
if version.parse(torch.__version__) < version.parse('1.13.0'):
raise RuntimeError('To use FSDP with Composer, you must use torch>=1.13.0.')
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
log.debug(
f'Scattering optimizer state dict with keys {full_optim_state_dict.keys()} and model of type {type(model)}')
return FSDP.scatter_full_optim_state_dict(full_optim_state_dict=full_optim_state_dict, model=model)
def get_fsdp_full_optim_state_dict(model: torch.nn.Module, optim: torch.optim.Optimizer, rank0_only: bool = True):
if version.parse(torch.__version__) < version.parse('1.13.0'):
raise RuntimeError('To use FSDP with Composer, you must use torch>=1.13.0.')
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
return FSDP.full_optim_state_dict(model=model, optim=optim, rank0_only=rank0_only)
def _ensure_backwards_compatible_checkpointing(state_dict: Dict[str, Any]):
# v0.4.1 removed the leading underscores for the keys in the state_dict
# It also renamed _is_model_ddp_wrapped to is_model_ddp
state = {}
for attribute_name, serialized_value in state_dict.items():
if attribute_name == '_is_model_ddp_wrapped':
attribute_name = 'is_model_ddp'
if attribute_name.startswith('_'):
attribute_name = attribute_name[1:]
# Torchmetrics adds a new attribute as of 0.11 which must be added to deserialized metrics
if attribute_name == 'train_metrics':
for metric_name in serialized_value.keys():
metric = serialized_value[metric_name]
if not hasattr(metric, 'distributed_available_fn'):
metric.distributed_available_fn = jit_distributed_available
serialized_value[metric_name] = metric
elif attribute_name == 'eval_metrics':
for evaluator_name, eval_metrics in serialized_value.items():
for metric_name in eval_metrics.keys():
metric = eval_metrics[metric_name]
if not hasattr(metric, 'distributed_available_fn'):
metric.distributed_available_fn = jit_distributed_available
serialized_value[evaluator_name][metric_name] = metric
state[attribute_name] = serialized_value
return state
_STATE_DICT_SERIALIZED_ATTRIBUTES = [
# List of attributes that are serialized with state_dict
# Only the attributes listed in state.serialized_attributes will actually be saved.
'model',
'optimizers',
'schedulers',
'algorithms',
'callbacks',
'scaler',
'timestamp',
]
class State(Serializable):
"""The state of the trainer.
Contains variables that the trainer tracks throughout the training loop. Note that all the necessary parts (i.e.,
:attr:`serialized_attributes`) of state are serialized when the trainer is checkpointed so that it can be used to
restore the trainer and continue training from a checkpoint. :mod:`~composer.algorithms` are able to modify an
instance of this class in-place.
.. note::
An instance of this class is automatically constructed by the :class:`~.Trainer` constructor. A user need
not instantiate this class.
Args:
model (torch.nn.Module): The model, typically as a subclass of :class:`~.ComposerModel`.
rank_zero_seed (int): The seed used on the rank zero process. It is assumed that each rank's seed is
``rank_zero_seed + dist.get_global_rank()``.
run_name (str): The name for this training run.
device (Device): The device used by this process. The trainer moves the model and loaded data to this device.
device_train_microbatch_size (int, optional): The microbatch size for each device during training.
auto_microbatching (bool, optional): Whether automatic microbatching is enabled.
train_dataloader (Iterable, optional): Dataloader used for training
evaluators (Evaluator | Evaluators, optional): :class:`.Evaluator` used for evaluation.
dataloader (Iterable, optional): The active DataLoader.
dataloader_len (int | Time[int], optional): The number of batches per dataloader iteration (e.g. epoch).
The trainer will yield the first ``dataloader_len`` batches per iteration. If ``-1`` (the default),
the entire dataloader will be iterated over.
dataloader_label (str, optional): The name for the dataloader. Required if ``dataloader`` is specified.
(default: ``None``)
By convention, the training dataloader is called ``'train'``. The evaluator dataloader is called
``'eval'``, or when multiple evaluators are used, the name of the evaluator.
dataset_state (Dict[str, Any], optional): Mapping of dataset split to its iteration state for resumption.
dataset_resumption (Dict[str, Any], optional): Mapping of dataset split to whether resumption is used.
max_duration (str | Time, optional): The maximum duration to train for. (default: ``None``)
precision (str | Precision): The numerical precision to use for training. See :class:`~.Precision` for
the supported precisions.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional): The optimizer being used to
train the model. Multiple optimizers are not currently supported.
schedulers (types.PyTorchScheduler | Sequence[types.PyTorchScheduler], optional):
The learning rate scheduler (can also be a list or tuple of schedulers).
scaler (torch.cuda.amp.GradScaler, optional): The gradient scaler in use for mixed precision training.
algorithms (Algorithm | Sequence[Algorithm], optional): The algorithms used for training.
callbacks (Callback | Sequence[Callback], optional): The callbacks used for training.
deepspeed_config (Dict[str, Any], optional): The configuration dictionary for deepspeed.
fsdp_config (Dict[str, Any], optional): The configuration dictionary for FSDP.
Attributes:
batch (types.Batch): The batch. This will be the entire batch during the :attr:`.Event.AFTER_DATALOADER`, or a
microbatch between :attr:`.Event.BATCH_START` and :attr:`.Event.BATCH_END`.
device (Device): The device used by this process. The trainer moves the model and loaded data to this device. This
can be used in callbacks and algorithms to move data onto the correct device.
train_metrics (Dict[str, Metric]): The current train metrics, organized by metric name. ``train_metrics`` will be deep-copied to
ensure that each evaluator updates only its ``train_metrics``.
For example:
>>> trainer = Trainer(
... ...,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... )
>>> trainer.fit()
>>> trainer.state.train_metrics
{'MulticlassAccuracy': MulticlassAccuracy()}
eval_metrics (Dict[str, Dict[str, Metric]]): The current evaluation metrics, organized
by dataloader label and then by metric name. If not using an :class:`.Evaluator`,
the eval dataloader is labeled ``'eval'``. Otherwise, in the case of having multiple evaluation datasets,
the evaluator label is used. See the `Multiple Datasets Documentation <https://docs.mosaicml.com/en/stable/trainer/evaluation.html#multiple-datasets>`_
for more information. ``eval_metrics`` will be deep-copied to ensure that each evaluator updates only its ``eval_metrics``.
For example:
>>> from composer.metrics import CrossEntropy
>>> trainer = Trainer(
... ...,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... )
>>> trainer.fit()
>>> trainer.state.eval_metrics
{'eval': {'CrossEntropy': CrossEntropy(), 'MulticlassAccuracy': MulticlassAccuracy()}}
Or, when using an :class:`.Evaluator` for multiple evaluation datasets:
.. testsetup::
eval_1_dl = eval_dataloader
eval_2_dl = eval_dataloader
>>> from composer.core import Evaluator
>>> trainer = Trainer(
... ...,
... train_dataloader=train_dataloader,
... eval_dataloader=[
... Evaluator(label='eval1', dataloader=eval_1_dl, metric_names=['MulticlassAccuracy']),
... Evaluator(label='eval2', dataloader=eval_2_dl, metric_names=['MulticlassAccuracy']),
... ],
... )
>>> trainer.fit()
>>> trainer.state.eval_metrics
{'eval1': {'MulticlassAccuracy': MulticlassAccuracy()}, 'eval2': {'MulticlassAccuracy': MulticlassAccuracy()}}
eval_timestamp (Timestamp): The timestamp for the current evaluation dataloader. This timestamp is reset
before the dataloader is evaluated. The :attr:`~Timestamp.epoch` attribute for this timestamp is always
``0``.
device_train_microbatch_size (int): The size of each train microbatch per device.
loss (torch.Tensor | Sequence[torch.Tensor]): The most recently computed loss.
model (torch.nn.Module): The training model.
.. note::
When using DeepSpeed or multi-rank training, the model will be wrapped with
:class:`~deepspeed.DeepSpeedEngine` or :class:`~torch.nn.parallel.DistributedDataParallel`,
respectively.
outputs (torch.Tensor | Sequence[torch.Tensor]): The most recently computed output from the model's forward
pass.
predict_timestamp (Timestamp): The timestamp for the current prediction dataloader. This timestamp is reset
before the dataloader is used. The :attr:`~Timestamp.epoch` attribute for this timestamp is always
``0``.
profiler (Profiler): The profiler (if profiling is enabled), or ``None`` if not profiling.
rank_zero_seed (int): The seed of the rank zero process.
run_name (str): The name for this training run.
scaler (torch.cuda.amp.GradScaler): The gradient scaler if using mixed-precision training, or
``None`` if not using mixed-precision training.
serialized_attributes (List[str]): The names of the attribute which are serialized in a checkpoint.
By default, the following attributes are serialized:
+-----------------------+-------------------------------------------------------------+
| Attribute | Description |
+=======================+=============================================================+
| model | The model under training. |
+-----------------------+-------------------------------------------------------------+
| optimizers | The optimizers being used to train the model. |
+-----------------------+-------------------------------------------------------------+
| schedulers | The learning rate schedulers. |
+-----------------------+-------------------------------------------------------------+
| algorithms | The algorithms used for training. |
+-----------------------+-------------------------------------------------------------+
| callbacks | The callbacks used for training. |
+-----------------------+-------------------------------------------------------------+
| scaler | The gradient scaler in use for mixed precision training. |
+-----------------------+-------------------------------------------------------------+
| timestamp | The timestamp that tracks training loop progress. |
+-----------------------+-------------------------------------------------------------+
| rank_zero_seed | The seed of the rank zero process. |
+-----------------------+-------------------------------------------------------------+
| train_metrics | The current training metrics |
+-----------------------+-------------------------------------------------------------+
| eval_metrics | The current evaluation metrics |
+-----------------------+-------------------------------------------------------------+
| run_name | The run name for training. |
+-----------------------+-------------------------------------------------------------+
| dataset_state | The dataset iteration state. |
+-----------------------+-------------------------------------------------------------+
timestamp (Timestamp): The current training timestamp.
"""
def __init__(
self,
# model
model: torch.nn.Module,
# determinism
rank_zero_seed: int,
# run_name
run_name: str,
# device
device: Device,
# stopping conditions
max_duration: Optional[Union[str, Time[int]]] = None,
# data configurations
device_train_microbatch_size: Optional[int] = None,
auto_microbatching: bool = False,
# dataloaders
train_dataloader: Optional[Iterable] = None,
evaluators: Optional[Union[Evaluator, Sequence[Evaluator]]] = None,
# these track the current 'active' dataloader
# depending on train, eval, or others
dataloader: Optional[Iterable] = None,
dataloader_label: Optional[str] = None,
dataloader_len: Union[int, Time[int]] = -1,
dataset_state: Optional[Dict[str, Any]] = None,
dataset_resumption: Optional[Dict[str, Any]] = None,
# precision
precision: Union[str, Precision] = Precision.FP32,
# optimizers
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None,
# scaler
scaler: Optional[torch.cuda.amp.grad_scaler.GradScaler] = None,
# algorithms and callbacks
algorithms: Optional[Union[Algorithm, Sequence[Algorithm]]] = None,
callbacks: Optional[Union[Callback, Sequence[Callback]]] = None,
# Distributed training configs
deepspeed_config: Optional[Dict[str, Any]] = None,
fsdp_config: Optional[Dict[str, Any]] = None,
):
self.rank_zero_seed = rank_zero_seed
self.model = model
self.run_name = run_name
self.device = device
self.device_train_microbatch_size = device_train_microbatch_size
self.auto_microbatching = auto_microbatching
self._dataloader_len = None
self._dataloader = None
self._dataloader_label = None
self.set_dataloader(dataloader, dataloader_label, dataloader_len)
self.dataset_state = dataset_state
self.dataset_resumption = dataset_resumption or {}
self._max_duration = None
self.max_duration = max_duration
self._train_dataloader = train_dataloader
self._evaluators = list(ensure_tuple(evaluators))
self.timestamp = Timestamp()
self.eval_timestamp = Timestamp()
self.predict_timestamp = Timestamp()
self._precision = Precision(precision)
if optimizers is None:
self._optimizers = []
else:
self._optimizers = list(ensure_tuple(optimizers))
self._schedulers = []
self.scaler = scaler
self._algorithms = list(ensure_tuple(algorithms))
self._callbacks = list(ensure_tuple(callbacks))
self.profiler: Optional[Profiler] = None
self.deepspeed_config = deepspeed_config
self.fsdp_config = fsdp_config
self.fsdp_state_dict_type: Optional[str] = None
if self.fsdp_enabled:
if self.fsdp_config is not None:
self.fsdp_state_dict_type = self.fsdp_config.get('state_dict_type', 'full')
else:
self.fsdp_state_dict_type = 'full'
# Set defaults for transient variables (to make pyright happy)
self.batch: Any = None
self.loss: Union[torch.Tensor, Sequence[torch.Tensor]] = torch.Tensor()
self.outputs: Union[torch.Tensor, Sequence[torch.Tensor]] = torch.Tensor()
# These attributes will be serialized using .state_dict(), and loaded with .load_state_dict()
# All other attributes will not be serialized.
# For simplicity, omit the leading underscore for private attributes.
# For example, even though the optimizers are stored on the state
# as the "_optimizers" attribute, here we specify just "optimizers"
self.serialized_attributes = [
'model',
'optimizers',
'schedulers',
'algorithms',
'callbacks',
'scaler',
'timestamp',
'rank_zero_seed',
'train_metrics',
'eval_metrics',
'run_name',
'dataset_state',
]
self.train_metrics: Dict[str, Metric] = {}
self.eval_metrics: Dict[str, Dict[str, Metric]] = {}
self.train_metric_values: Dict[str, float] = {}
self.eval_metric_values: Dict[str, float] = {}
self.total_loss_dict: Dict[str, float] = {}
def _dataset_of(self, dataloader: Optional[Union[Evaluator, DataSpec, DataLoader, Iterable]]) -> Optional[Dataset]:
"""Get the dataset contained by the given dataloader-like object.
Args:
dataloader (Evaluator | DataSpec | DataLoader | Iterable, optional): The dataloader, wrapped dataloader, or
generic python iterable to get the dataset of, if applicable.
Returns:
Dataset: Its dataset, if there is one.
"""
from composer.core.evaluator import Evaluator
# If it's None, no dataset for you.
if dataloader is None:
return None
# An Evaluator is a dataloader wrapped with metrics. Unwrap its dataloader.
if isinstance(dataloader, Evaluator):
dataloader = dataloader.dataloader
# A DataSpec is a dataloader wrapped with an on-device transform. Unwrap its dataloader.
if isinstance(dataloader, DataSpec):
dataloader = dataloader.dataloader
# If what we now have is an actual DataLoader, return its dataset. If not, return None.
if isinstance(dataloader, DataLoader):
return dataloader.dataset
else:
return None
@property
def train_dataloader(self) -> Optional[Union[Iterable, DataLoader]]:
"""Get the train dataloader.
Returns:
Iterable | DataLoader, optional: The dataloader.
"""
return self._train_dataloader
@train_dataloader.setter
def train_dataloader(self, train_dataloader: Optional[Union[Iterable, DataLoader]]):
"""Set the train dataloader.
Args:
train_dataloader (Iterable | DataLoader, optional): The dataloader.
"""
self._train_dataloader = train_dataloader
# Load dataset state from checkpoint when train_dataloader is set
if self.dataset_state:
dataset = self._dataset_of(self._train_dataloader)
if hasattr(dataset, 'load_state_dict'):
dataset.load_state_dict(self.dataset_state['train']) # pyright: ignore
self.dataset_resumption['train'] = True
self.dataset_state['train'] = None
@property
def seed(self):
"""The seed for the current rank."""
return self.rank_zero_seed + dist.get_global_rank()
@property
def max_duration(self):
"""The maximum training duration."""
return self._max_duration
@max_duration.setter
def max_duration(self, max_duration: Optional[Union[str, Time[int]]]):
if max_duration is None:
self._max_duration = None
return
if isinstance(max_duration, str):
max_duration = cast(Time[int], Time.from_timestring(max_duration))
if max_duration.unit == TimeUnit.DURATION:
raise ValueError('TimeUnit.DURATION is not allowed as a unit for max_duration')
self._max_duration = max_duration
def get_elapsed_duration(self) -> Optional[Time[float]]:
"""Get the elapsed training duration.
Returns:
Optional[Time[float]]: The elapsed duration, in :attr:`TimeUnit.DURATION`.
``Time(0.0, TimeUnit.DURATION)`` represents the beginning of training and ``Time(1.0, TimeUnit.DURATION)``
represents a completed training process. Returns ``None`` if ``max_duration`` is None.
"""
if self.max_duration is None:
return None
return self.timestamp.get(self.max_duration.unit) / self.max_duration
def stop_training(self):
"""Gracefully stop training.
The current batch of training will finish, and any scheduled evaluation,
logging, and evaluation for that batch, as well as any epoch end events.
"""
self.max_duration = self.timestamp.batch
@property
def optimizers(self):
"""The optimizers."""
return self._optimizers
@optimizers.setter
def optimizers(self, optimizers: Union[Optimizer, Sequence[Optimizer]]):
self._optimizers[:] = ensure_tuple(optimizers)
@property
def schedulers(self):
"""The schedulers."""
return self._schedulers
@schedulers.setter
def schedulers(self, schedulers: Union[types.PyTorchScheduler, Sequence[types.PyTorchScheduler]]):
self._schedulers[:] = ensure_tuple(schedulers)
def batch_get_item(self, key: Union[str, int, Callable, Any]) -> Any:
"""Gets element from batch either specified by key or user-specified function.
See batch_get in `utils/batch_helpers.py` for examples.
Args:
key (str | int | Tuple[Callable, Callable] | Any, optional): A key to index into the batch or a
user-specified function to do the extracting. A pair of callables is also
supported for cases where a get and set function pair are both passed
(like in Algorithms). The getter is assumed to be the first of the pair.
Returns:
The part of the batch specified by the key. This could be any type
depending on what the batch is composed of.
"""
return batch_get(self.batch, key)
def batch_set_item(self, key: Union[str, int, Callable, Any], value: Any):
"""Sets the element specified by the key of the set_fn to the specified value.
This is not an in-place operation, as for tuple-typed batches, a new batch object
must be created to modify them.
See batch_set in `utils/batch_helpers.py` for examples.
Args:
key (str | int | Tuple[Callable, Callable] | Any, optional): A key to index into the batch or a user-specified
function to do the setting. A pair of callables is also supported for
cases where a get and set function pair are both passed (like in
Algorithms). The setter is assumed to be the second of the pair.
value (Any): The value that batch[key] or batch.key gets set to or that the
user-defined set function sets a part of the batch to.
Returns:
batch (Any): The updated batch with value set at key.
"""
self.batch = batch_set(self.batch, key=key, value=value)
@property
def callbacks(self):
"""The callbacks."""
return self._callbacks
@callbacks.setter
def callbacks(self, callbacks: Sequence[Callback]):
self._callbacks[:] = callbacks
@property
def algorithms(self):
"""The algorithms."""
return self._algorithms
@algorithms.setter
def algorithms(self, algorithms: Sequence[Algorithm]):
self._algorithms[:] = algorithms
@property
def evaluators(self):
"""The evaluators."""
return self._evaluators
@evaluators.setter
def evaluators(self, evaluators: Union[Evaluator, Sequence[Evaluator]]):
self._evaluators[:] = list(ensure_tuple(evaluators))
# Load dataset state from checkpoint when evaluators are set
if self.dataset_state:
state = self.dataset_state['eval']
for evaluator in self._evaluators:
dataset = self._dataset_of(evaluator)
if hasattr(dataset, 'load_state_dict') and evaluator.label in state:
dataset.load_state_dict(state[evaluator.label]) # pyright: ignore
del self.dataset_state['eval']
@property
def deepspeed_enabled(self):
"""Indicates if deepspeed is enabled."""
return self.deepspeed_config is not None
@property
def fsdp_enabled(self):
"""Indicates if FSDP is enabled."""
if version.parse(torch.__version__) < version.parse('1.13.0'):
return False
from torch.distributed.fsdp import FullyShardedDataParallel
for module in self.model.modules():
if isinstance(module, FullyShardedDataParallel):
return True
return False
@property
def fsdp_sharded_state_dict_enabled(self):
if self.fsdp_config is None:
return False
return (self.fsdp_enabled and self.fsdp_state_dict_type in ['sharded', 'local'])
def _get_integrations_state_dict(self) -> Dict[str, Any]:
"""Gets a dictionary of information about integrations to store in the state dict.
This metadata is used for loading things from state dict that need to be done outside
of the normal Composer load path (e.g. HuggingFace model/tokenizer).
"""
from composer.models import HuggingFaceModel
integrations = {}
if isinstance(self.model, HuggingFaceModel):
integrations['huggingface'] = self.model.get_metadata()
return integrations
def _get_state_metadata(self) -> Dict[str, Any]:
"""Gets a dictionary of metadata to store in the state dict.
This metadata is used for checking compatibility between the current environment/setup
and the environment/setup that was used for the checkpoint that is being loaded in
"""
metadata_dict = {}
metadata_dict['composer_env_info'] = get_composer_env_dict()
metadata_dict['device'] = self.device.name
metadata_dict['precision'] = self.precision.value
metadata_dict['world_size'] = dist.get_world_size()
metadata_dict['device_train_microbatch_size'] = self.device_train_microbatch_size
if self._train_dataloader is not None and hasattr(self._train_dataloader, 'batch_size'):
metadata_dict['train_dataloader_batch_size'] = self._train_dataloader.batch_size # type: ignore
return metadata_dict
def _dataset_state_dict(self) -> Dict[str, Any]:
"""Collect the state dict(s) of our train and eval dataset(s).
Returns:
Dict[str, Any]: The state dict(s).
"""
obj = {
'train': None,
'eval': {},
}
dataset = self._dataset_of(self.train_dataloader)
if hasattr(dataset, 'state_dict'):
num_samples = int(self.timestamp.sample_in_epoch.value)
obj['train'] = dataset.state_dict(num_samples, True) # pyright: ignore
for evaluator in self.evaluators:
dataset = self._dataset_of(evaluator)
if hasattr(dataset, 'state_dict'):
# Don't save eval sample because we do not checkpoint during eval.
obj['eval'][evaluator.label] = dataset.state_dict(0, True) # pyright: ignore
return obj
def state_dict(self) -> Dict[str, Any]:
"""Collect the state dicts of our serializable attributes.
Returns:
Dict[str, Any]: The state dict.
"""
state_dict = {}
for attribute_name in self.serialized_attributes:
attribute_value = getattr(self, attribute_name)
if attribute_name == 'dataset_state':
serialized_value = self._dataset_state_dict()
elif attribute_name == 'model':
# Save model directly instead of by class name, since model may be wrapped by DistributedDataParallel
# If it is DDP wrapped, do not save the `module.` prefix, as that is an implementation detail
if self.fsdp_enabled and self.fsdp_state_dict_type is not None:
with fsdp_state_dict_type_context(attribute_value, state_dict_type=self.fsdp_state_dict_type):
model_state = attribute_value.state_dict()
else:
model_state = attribute_value.state_dict()
if self.is_model_ddp:
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(model_state, 'module.')
serialized_value = model_state
elif attribute_name == 'optimizers':
optimizer = ensure_tuple(attribute_value)[
0] # Let's stop pretending. We don't support more than one optimizer.
if self.fsdp_enabled and self.fsdp_state_dict_type is not None:
optim_state_dict = {
type(optimizer).__qualname__:
fsdp_get_optim_state_dict(self.model, optimizer, state_dict_type=self.fsdp_state_dict_type)
}
else:
optim_state_dict = {type(optimizer).__qualname__: optimizer.state_dict()}
serialized_value = optim_state_dict
elif attribute_name == 'algorithms':
# Store as list to preserve order in which algorithms were applied
serialized_value = [(type(obj).__qualname__, obj.state_dict()) for obj in ensure_tuple(attribute_value)]
elif attribute_name in _STATE_DICT_SERIALIZED_ATTRIBUTES:
serialized_value = {type(obj).__qualname__: obj.state_dict() for obj in ensure_tuple(attribute_value)}
else:
serialized_value = attribute_value
state_dict[attribute_name] = serialized_value
state_dict['integrations'] = self._get_integrations_state_dict()
state_dict['metadata'] = self._get_state_metadata()
return state_dict
def _apply_required_algorithms(
self,
state_dict: Dict[str, Any],
logger: Logger,
exclude_algorithms: Optional[List[str]] = None,
algorithm_passes: Optional[List[AlgorithmPass]] = None,
):
"""Applies required algorithms which haven't been specified and aren't in the exclude list.
Args:
state_dict (Dict[str, Any]): State from checkpoint.
logger (Logger): Logger to use.
exclude_algorithms (List[str], optional): List of algorithm names to exclude. (default: ``None``)
algorithm_passes (List[AlgorithmPass], optional): A list of algorithm passes to apply to autoloaded algorithms
to sort them into the correct order. (default: ``None``)
"""
# Don't try to autoload on old checkpoints
if not isinstance(state_dict['algorithms'], list):
return
import composer.algorithms as algorithms # type: ignore imports used in `eval(representation)`
# Get repr of existing algorithms
current_algos = {}
for algo in self.algorithms:
if algo.required_on_load():
if type(algo) not in current_algos:
current_algos[type(algo)] = []
current_algos[type(algo)].append(algo.__repr__())
# Gather algorithms to apply
missing_algos = set()
missing_algo_names = []
missing_algo_reprs = []
for algo_name, serialized_value in state_dict['algorithms']:
# Check if required algorithm
if hasattr(algorithms, algo_name) and getattr(algorithms, algo_name).required_on_load():
# Check that algorithm is not explicitly excluded by user
if exclude_algorithms is None or algo_name not in exclude_algorithms:
try:
algo = eval(f"algorithms.{serialized_value['repr']}")
except:
warnings.warn(
textwrap.dedent(
f"required_on_load algorithm {serialized_value['repr']} was enabled when training the "
f'loaded checkpoint. Attempted to check its presence but recreating the algorithm '
"failed. This may be due to a change in the algorithm's API. If this required_on_load "
'algorithm is not properly specified, it may lead to unexpected behavior, including '
'failing to load weights for some layers.'))
continue
# Raise warning if we are unable to safely autoapply
if type(algo) in current_algos and not serialized_value['repr'] in current_algos[type(algo)]:
warnings.warn(
textwrap.dedent(
f"required_on_load algorithm {serialized_value['repr']} was enabled when training the "
f"loaded checkpoint but is now specified in the following forms: {', '.join(current_algos[type(algo)])}."
'Potential parameter discrepancies for this required_on_load algorithm may lead to '
'unexpected behavior, including failing to load weights for some layers.'))
# Otherwise, queue algorithm to be autoapplied
elif type(algo) not in current_algos:
missing_algos.add(algo)
missing_algo_names.append(algo_name)
missing_algo_reprs.append(serialized_value['repr'])
self.algorithms.append(algo)
# Reorder algorithms based on algorithm_passes from engine
algo_list = self.algorithms
if algorithm_passes is not None:
for algo_pass in algorithm_passes:
algo_list = algo_pass(algo_list, Event.INIT)
# Raise ValueError if algorithm_passes order any checkpoint algorithm before an already
# applied user specified algorithm
encountered_ckpt_algo = False
for algo in algo_list:
if algo in missing_algos:
encountered_ckpt_algo = True
elif encountered_ckpt_algo:
raise ValueError(
textwrap.dedent('The following algorithms were enabled when training this checkpoint '
f'and are required to successfully load it: {missing_algo_reprs}. '
'Attempted to autocreate and apply required algorithms, but at least one '
'of the loaded algorithms was ordered before a user specified algorithm '
'which has already been applied, preventing automatic application of '
'algorithms. If you wish to use pretrained weights and reinitialize '
'layers which have undergone surgery, the following algorithms may be '
'excluded using `load_exclude_algorithms`, e.g. '
f'`load_exclude_algorithms=[{missing_algo_names}]`.'))
try:
for algo in missing_algos: # TODO: use compiled algorithm order
if algo.match(Event.INIT, self):
algo.apply(Event.INIT, self, logger)
warnings.warn(
textwrap.dedent(
f'Automatically adding required_on_load algorithm {repr(algo)} to trainer, which was enabled '
'when training the loaded checkpoint. If you wish to use pretrained weights and ignore '
f'required_on_load algorithms, which may result in some weights failing to load, include {type(algo).__qualname__} '
f"in `load_exclude_algorithms`, e.g. `load_exclude_algorithms=['{type(algo).__qualname__}']`."))
except Exception as e:
raise ValueError(
textwrap.dedent(
'The following algorithms were enabled when training this checkpoint '
f'and are required to successfully load it: {missing_algo_reprs}. '
'Attempted to autocreate and apply required algorithms but an exception was '
'encountered. If you wish to use pretrained weights and reinitialize layers which '
'have undergone surgery, the following algorithms may be excluded using '
f'`load_exclude_algorithms`, e.g. `load_exclude_algorithms=[{missing_algo_names}]`.')) from e
def load_model_state(
self,
state_dict: Dict[str, Any],
logger: Logger,
strict: bool,
exclude_algorithms: Optional[List[str]] = None,
algorithm_passes: Optional[List[AlgorithmPass]] = None,
):
"""Loads the model's state from a ``state_dict``.
Args:
state_dict (Dict[str, Any]): The state dict, generated from a previous call to :meth:`state_dict`.
logger (Logger): The logger.
strict (bool): Whether the keys (i.e., model parameter names) in the model state dict should
perfectly match the keys in the model instance.
exclude_algorithms (List[str], optional): List of algorithm names to exclude from autoloading. (default: ``None``)
algorithm_passes (List[AlgorithmPass], optional): A list of algorithm passes to apply to autoloaded algorithms
to sort them into the correct order. (default: ``None``)
"""
if 'algorithms' in state_dict:
self._apply_required_algorithms(state_dict, logger, exclude_algorithms, algorithm_passes)
if state_dict.get('is_model_ddp', False) and not self.is_model_ddp:
# This check is for backwards compatibility, as pre-v0.6.0 checkpoints serialized the state
# with the `module.` prefix
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(state_dict['model'], 'module.')
if self.fsdp_enabled and self.fsdp_state_dict_type is not None:
with fsdp_state_dict_type_context(self.model, state_dict_type=self.fsdp_state_dict_type):
missing_keys, unexpected_keys = self.model.load_state_dict(state_dict['model'], strict=strict)
else:
missing_keys, unexpected_keys = self.model.load_state_dict(state_dict['model'], strict=strict)
if len(missing_keys) > 0:
log.warning(f"Found these missing keys in the checkpoint: {', '.join(missing_keys)}")
if len(unexpected_keys) > 0:
log.warning(f"Found these unexpected keys in the checkpoint: {', '.join(unexpected_keys)}")
def load_optim_state(self, state_dict: Dict[str, Any]):
"""Load the optimizer state.
Args:
state_dict (Dict[str, Any]): The state to load.
"""
serialized_value = state_dict['optimizers']
for optimizer in ensure_tuple(self.optimizers):
if type(optimizer).__qualname__ not in serialized_value:
warnings.warn(
f'{type(optimizer).__qualname__} is not in the state_dict. Its state will not be restored.',
category=UserWarning)
continue
optim_state_dict = serialized_value[type(optimizer).__qualname__]
if self.fsdp_enabled:
log.debug(f'Loading FSDP optimizer with fsdp_state_dict_type={self.fsdp_state_dict_type}')
if self.fsdp_state_dict_type == 'sharded':
if version.parse(torch.__version__) < version.parse('1.13.0'):
raise RuntimeError('To use FSDP with Composer, you must use torch>=1.13.0.')
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
# Optimizer and optimizer state dict are already sharded, but not
# flattened, so we flatten the state dict then load it.
flattened_optim_state_dict = FSDP.flatten_sharded_optim_state_dict(
sharded_optim_state_dict=optim_state_dict, model=self.model, optim=optimizer)
optimizer.load_state_dict(flattened_optim_state_dict)
elif self.fsdp_state_dict_type == 'local':
# Optimizer and optimizer state dict are already sharded and flattened,
# so just load the state_dict.
optimizer.load_state_dict(optim_state_dict)
else: # fsdp_state_dict_type == 'full'
# FSDP enabled, but fsdp_state_dict is set to 'full', so the state dict
# is a full state dict and we must shard and flatten it first before loading it.
sharded_optim_state_dict = get_fsdp_sharded_optim_state_dict(full_optim_state_dict=optim_state_dict,
model=self.model)
log.debug(f'optimizer.load_state_dict call with fsdp_state_dict_type=full')
optimizer.load_state_dict(sharded_optim_state_dict)
# No FSDP, so just load the optim state dict.
else:
log.debug(f'Loading optimizer state dict')
optimizer.load_state_dict(optim_state_dict)
def _load_dataset_state(self, obj: Dict[str, Any]) -> None:
"""Load the dataset state.
Args:
obj (Dict[str, Any]): The state to load.
"""
self.dataset_state = obj
dataset = self._dataset_of(self.train_dataloader)
if hasattr(dataset, 'load_state_dict'):
dataset.load_state_dict(obj['train']) # pyright: ignore
obj['train'] = None
self.dataset_resumption['train'] = True
for evaluator in self.evaluators:
dataset = self._dataset_of(evaluator)
if hasattr(dataset, 'load_state_dict') and evaluator.label in obj['eval']:
dataset.load_state_dict(obj['eval'][evaluator.label]) # pyright: ignore
del obj['eval'][evaluator.label]
if 'eval' not in self.dataset_resumption:
self.dataset_resumption['eval'] = {}
# Note: We currently disable setting dataset_resumption for eval datasets,
# which means they have one sample fetched in _spin_dataloaders before training
# starts. This avoids "CUDA error: initialization error" -- its not clear why.
# self.dataset_resumption['eval'][evaluator.label] = True
def load_state_dict(
self,
state: Dict[str, Any],
logger: Logger,
strict: bool = False,
exclude_algorithms: Optional[List[str]] = None,
algorithm_passes: Optional[List[AlgorithmPass]] = None,
):
"""Loads the state.
Args:
state (Dict[str, Any]): object returned from call to :meth:`state_dict`.
logger (Logger): The logger.
strict (bool): whether the keys in the ``state["model"]`` should perfectly match the keys in the
``self.model``. Defaults to False.
exclude_algorithms (List[str], optional): List of algorithm names to exclude from autoloading. (default: ``None``)
algorithm_passes (List[AlgorithmPass], optional): A list of algorithm passes to apply to autoloaded algorithms
to sort them into the correct order. (default: ``None``)
"""
state = _ensure_backwards_compatible_checkpointing(state)
# Call load_model_state since it applies required algorithms
if 'model' in state:
self.load_model_state(
state,
logger,
strict=strict,
exclude_algorithms=exclude_algorithms,
algorithm_passes=algorithm_passes,
)
for attribute_name, serialized_value in state.items():
# Skip removed attributes as well as algorithms and model, which was already loaded
if attribute_name not in self.serialized_attributes or attribute_name == 'model':
continue
# Integrations are extra information about other libraries (e.g. huggingface) and not attributes to be loaded here
if attribute_name == 'integrations':
continue
# Skip metadata, which is not an attribute on State
if attribute_name == 'metadata':
continue
log.debug(f'Loading {attribute_name} into state.')
# Restructure algorithms serialized_value from list to dict
if attribute_name == 'algorithms' and isinstance(serialized_value, list):
serialized_value = {algo_name: algo_serialized for algo_name, algo_serialized in serialized_value}
if attribute_name == 'dataset_state':
self._load_dataset_state(serialized_value)
elif attribute_name == 'optimizers':
self.load_optim_state(state)
elif attribute_name == 'train_metrics':
state_field_value = getattr(self, attribute_name)
for metric_name, metric in serialized_value.items():
metric._device = self.device._device
state_field_value[metric_name] = metric
elif attribute_name == 'eval_metrics':
state_field_value = getattr(self, attribute_name)
for eval_key, eval_metrics in serialized_value.items():
for metric_name, metric in eval_metrics.items():
metric._device = self.device._device
state_field_value[eval_key][metric_name] = metric
elif attribute_name in _STATE_DICT_SERIALIZED_ATTRIBUTES:
state_field_value = getattr(self, attribute_name)
for target in ensure_tuple(state_field_value):
if type(target).__qualname__ not in serialized_value:
warnings.warn(
f'{type(target).__qualname__} is not in the state_dict. Its state will not be restored.',
category=UserWarning)
continue
source = serialized_value[type(target).__qualname__]
target.load_state_dict(source)
else:
# direct serialization
try:
setattr(self, attribute_name, serialized_value)
except AttributeError:
# ignore AttributeError for properties that have getters but not setters.
pass
@property
def dataloader(self):
"""The active dataloader."""
return self._dataloader
@property
def dataloader_label(self):
"""The dataloader label for the active dataloader.
By default, the training dataloader is called ``'train'``. The evaluator dataloader
is called ``'eval'``, or when multiple evaluators are used, the name of the evaluator.
However, the dataloader label can be explicitly specified in :meth:`.Trainer.fit`
and :meth:`.Trainer.eval`.
Returns:
Optional[str]: The dataloader label, or None if no dataloader is set.
"""
return self._dataloader_label
def set_dataloader(
self,
dataloader: Optional[Iterable] = None,
dataloader_label: Optional[str] = None,
dataloader_len: Union[int, Time[int]] = -1,
):
"""Update the active dataloader and dataloader label.
Args:
dataloader (Iterable, optional): The dataloader. Defaults to None.
dataloader_label (str, optional): The dataloader label. Must be ``None`` if and only if
``dataloader`` is None. Defaults to None.
dataloader_len (int, int): The number of batches per dataloader iteration (e.g. epoch), as used by the trainer.
Set to ``-1`` to iterate over the entire dataset. (Default: ``-1``.)
"""
if dataloader is None:
dataloader_label = None
else:
if dataloader_label is None:
raise ValueError('If the `dataloader` is specified, then `dataloader_label` must not be None.')
self._dataloader = dataloader
self._dataloader_label = dataloader_label
if dataloader is not None:
self.dataloader_len = dataloader_len # setting it to -1 will do a failsafe read of len(dataloader)
else:
self._dataloader_len = None
@property
def dataloader_len(self):
"""The number of batches per dataloader iteration (e.g. epoch), as used by the trainer.
.. note::
If not explicitly specified, this value is an approximation, as it depends on ``len(self.dataloader)``.
See the :doc:`PyTorch DataLoader Documentation <torch:data>` for more information.
Returns:
Optional[Time[int]]: The number of batches per dataloader iteration (e.g. epoch), or None if no dataloader
is defined or if the dataloader has an unknown length (e.g. streaming dataloaders).
"""
return self._dataloader_len
@dataloader_len.setter
def dataloader_len(self, num_batches: Union[int, Time[int]]):
if isinstance(num_batches, int):
num_batches = Time(num_batches, TimeUnit.BATCH)
if self._dataloader is None:
raise RuntimeError('`State.dataloader_len` cannot be set if the dataloader is not defined.')
try:
if isinstance(self._dataloader, collections.abc.Sized):
dataloader_len = len(self._dataloader)
else:
dataloader_len = None
except (TypeError, NotImplementedError):
dataloader_len = None
if dataloader_len is not None and num_batches >= 0 and int(num_batches) > dataloader_len:
warnings.warn((f'DataloaderNumBatchesWarning: The dataloader_len ({int(num_batches)}) '
f'is greater than the length (i.e. number of batches) of the dataloader, which is '
f'{dataloader_len}. State.dataloader_len is thus being set to {dataloader_len}.'))
self._dataloader_len = Time(dataloader_len, TimeUnit.BATCH)
return
if num_batches < 0:
if dataloader_len is not None:
# len(dataloader) is an approximation -- see https://pytorch.org/docs/stable/data.html.
# However, in the worst case where additional last batches are dropped, this calculation should be
# an over-estimate, leading to the entire dataloader still being iterated over.
self._dataloader_len = Time(dataloader_len, TimeUnit.BATCH)
else:
# The dataloader length is unknown.
self._dataloader_len = None
return
self._dataloader_len = num_batches
@property
def precision(self):
"""The numerical precision to use for training.
See :class:`~.Precision` for the supported precisions.
"""
return self._precision
@precision.setter
def precision(self, precision: Union[str, Precision]):
self._precision = Precision(precision)
@property
def is_model_ddp(self):
"""Whether :attr:`model` is an instance of a :class:`.DistributedDataParallel`."""
return isinstance(self.model, DistributedDataParallel)
@property
def deepspeed_model(self) -> deepspeed.DeepSpeedEngine:
"""Cast :attr:`model` to :class:`~deepspeed.DeepSpeedEngine`."""
if is_model_deepspeed(self.model):
return cast('deepspeed.DeepSpeedEngine', self.model)
raise TypeError('state.model is not a DeepSpeed model')
| composer-dev | composer/core/state.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""A collection of custom loss functions and loss function related utilities."""
from composer.loss.loss import DiceLoss, binary_cross_entropy_with_logits, loss_registry, soft_cross_entropy
__all__ = [
'DiceLoss',
'binary_cross_entropy_with_logits',
'loss_registry',
'soft_cross_entropy',
]
| composer-dev | composer/loss/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Custom loss functions."""
from __future__ import annotations
import warnings
from typing import Optional
import torch
from torch import Tensor
from torch.nn import functional as F
from torch.nn.modules.loss import _Loss
from composer.loss.utils import ensure_targets_one_hot, infer_target_type
__all__ = ['binary_cross_entropy_with_logits', 'loss_registry', 'soft_cross_entropy']
def binary_cross_entropy_with_logits(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
reduction: str = 'sum',
pos_weight: Optional[Tensor] = None,
scale_by_batch_size: Optional[bool] = True,
) -> torch.Tensor:
r"""Replacement for :class:`~F.binary_cross_entropy_with_logits` that handles class indices or one-hot labels.
:class:`~torch.nn.functional.binary_cross_entropy_with_logits` with ``reduction =
'mean'` will typically result in very small loss values (on the order of 1e-3), which
necessitates scaling the learning rate by 1e3 to allow the model to learn. This
implementation avoids this by using ``reduction = sum`` and scaling the loss inversely
proportionally to the batch size.
Args:
input (torch.Tensor) : :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K \geq 1`
in the case of K-dimensional loss. `input` is expected to contain unnormalized scores
(often referred to as logits).
target (torch.Tensor) : If containing class indices, shape :math:`(N)` where each value is
:math:`0 \leq \text{targets}[i] \leq C-1`, or :math:`(N, d_1, d_2, ..., d_K)` with
:math:`K \geq 1` in the case of K-dimensional loss. If containing class probabilities,
same shape as the input.
weight (torch.Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`. Default: ``None``.
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default:
``'sum'``
pos_weight (Tensor, optional): a weight of positive examples.
Must be a vector with length equal to the number of classes.
scale_by_batch_size (bool, optional): Whether to scale the loss by the batch size
(i.e. input.shape[0]). Default: ``True``.
"""
target = ensure_targets_one_hot(input, target)
bce = F.binary_cross_entropy_with_logits(input=input,
target=target,
weight=weight,
reduction=reduction,
pos_weight=pos_weight)
if scale_by_batch_size:
bce /= torch.tensor(input.shape[0])
return bce
def soft_cross_entropy(input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
ignore_index: int = -100,
reduction: str = 'mean'):
r"""Drop-in replacement for :class:`~.F.cross_entropy` that handles class indices or one-hot labels.
.. note::
This function will be obsolete with `this update <https://github.com/pytorch/pytorch/pull/61044>`_.
Args:
input (torch.Tensor) : :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K \geq 1`
in the case of K-dimensional loss. `input` is expected to contain unnormalized scores
(often referred to as logits).
target (torch.Tensor) : If containing class indices, shape :math:`(N)` where each value is
:math:`0 \leq \text{targets}[i] \leq C-1`, or :math:`(N, d_1, d_2, ..., d_K)` with
:math:`K \geq 1` in the case of K-dimensional loss. If containing class probabilities,
same shape as the input.
weight (torch.Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`. Default: ``None``.
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When ``size_average`` is
``True``, the loss is averaged over non-ignored targets. Note that
``ignore_index`` is only applicable when the target contains class indices.
Default: ``-100``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
"""
target_type = infer_target_type(input, target)
if target_type == 'indices':
return F.cross_entropy(input=input,
target=target,
weight=weight,
ignore_index=ignore_index,
reduction=reduction)
elif target_type == 'one_hot':
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(f'{reduction} reduction not supported.')
if ignore_index != -100:
warnings.warn('ignore_index not supported when using dense labels. Ignoring targets with 0 probability.')
xentropy = -(target * F.log_softmax(input, dim=1))
if weight is not None:
# Ugly dimension shuffle to make multiplication work.
xentropy = torch.movedim(xentropy, 1, -1)
xentropy *= weight # PyTorch doesn't normalize weights
xentropy = torch.movedim(xentropy, -1, 1)
xentropy = xentropy.sum(dim=1)
num_examples = torch.numel(xentropy)
if reduction == 'sum':
xentropy = xentropy.sum()
elif reduction == 'mean':
xentropy = xentropy.mean()
# Re-weight loss to account for examples with less than 1 total probability (ignored examples)
total_prob = target.sum()
if total_prob <= 0:
raise ValueError('No targets have nonzero probability')
if total_prob < num_examples:
warnings.warn('Some targets have less than 1 total probability.')
xentropy *= num_examples / total_prob
return xentropy
else:
raise ValueError(f'Unrecognized target type {target_type}')
class DiceLoss(_Loss):
"""Criterion that computes the dice loss between input and target.
The implementation is derived from MONAI: `<https://docs.monai.io/en/stable/losses.html#diceloss>`_.
For more information about the dice loss see the original paper on dice loss:
`<https://arxiv.org/abs/1606.04797>`_.
Args:
sigmoid (bool): If true, apply a sigmoid function to the input. Default: ``False``
softmax (bool): If true, apply a softmax function to the input. Default: ``False``
squared_pred (bool): If true, square the inputs and targets when calculating the
class unions. Default: ``False``
jaccard (bool): If true, compute the jaccard index (soft IoU) instead of dice.
Default: ``False``
batch (bool): If true, sum the intersection and union areas over the batch
dimension before dividing the two quantities. If false, a dice loss value is
computed independently for each sample in the batch before the reduction.
ignore_absent_classes (bool): If true, remove classes that are not present in
the target from the loss calculation. Classes not present in the target do
not contribute to the gradient, but can decrease the weight of present classes,
slowing optimization. This should have no effect if all classes are present in
each sample. Default: ``'False'``
reduction (str): Specifies the reduction to apply to the output: ``'none'`` |
``'mean'`` | ``'sum'``. ``'none'``: no reduction will be appied, ``'mean'``:
the weighted mean of the output is taken, ``'sum'``: the output will be summed.
Default: ``'mean'``
"""
def __init__(self,
sigmoid: bool = False,
softmax: bool = False,
squared_pred: bool = False,
jaccard: bool = False,
batch: bool = False,
ignore_absent_classes: bool = False,
reduction: str = 'mean'):
super().__init__(reduction=reduction)
if sigmoid and softmax:
raise ValueError('Both sigmoid and softmax should not be true.')
if not reduction in ['none', 'mean', 'sum']:
raise ValueError(f'reduction was {reduction}, but must be one of ["none", "mean", "sum"]')
self.sigmoid = sigmoid
self.softmax = softmax
self.squared_pred = squared_pred
self.jaccard = jaccard
self.reduction = reduction
self.batch = batch
self.ignore_absent_classes = ignore_absent_classes
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
# If target is not one-hot, convert to one-hot
target = ensure_targets_one_hot(input, target)
# Get mask of pixels with a target
target_mask = target.sum(dim=1, keepdim=True) != 0
if input.shape != target.shape:
raise AssertionError(f'ground truth has different shape ({target.shape}) from input ({input.shape})')
if self.sigmoid:
input = torch.sigmoid(input)
n_pred_ch = input.shape[1]
if self.softmax:
if n_pred_ch == 1:
warnings.warn('single channel prediction, `softmax=True` ignored.')
else:
input = torch.softmax(input, 1)
reduce_axis = torch.arange(2, len(input.shape)).tolist()
if self.batch:
# reducing spatial dimensions and batch
reduce_axis = [0] + reduce_axis
intersection = torch.sum(target * input, dim=reduce_axis)
if self.squared_pred:
target = torch.pow(target, 2)
input = torch.pow(input, 2)
# Zero out pixels which do not have a target
input = target_mask * input
ground_o = torch.sum(target, dim=reduce_axis)
pred_o = torch.sum(input, dim=reduce_axis)
union = ground_o + pred_o
if self.jaccard:
union = 2.0 * (union - intersection)
epsilon = 1e-5
ious = 1.0 - (2.0 * intersection + epsilon) / (union + epsilon)
if self.ignore_absent_classes:
if self.batch:
ious = ious[ground_o > 0]
else:
ious = ious[:, (ground_o.sum(dim=0) > 0)]
if self.reduction == 'mean':
iou = torch.mean(ious) # the batch and channel average
elif self.reduction == 'sum':
iou = torch.sum(ious) # sum over the batch and channel dims
elif self.reduction == 'none':
# If we are not computing voxelwise loss components at least
# make sure a none reduction maintains a broadcastable shape
iou = ious
else:
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
return iou
loss_registry = {
'binary_cross_entropy_with_logits': binary_cross_entropy_with_logits,
'soft_cross_entropy': soft_cross_entropy
}
| composer-dev | composer/loss/loss.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Loss-related utilities."""
from __future__ import annotations
import warnings
from typing import Optional
import torch
__all__ = ['infer_target_type', 'ensure_targets_one_hot', 'check_for_index_targets']
def infer_target_type(input: torch.Tensor, targets: torch.Tensor) -> str:
"""Infers whether the target is in indices format or one_hot format.
Example indices format: [1, 4, 7] Example one_hot format [[0, 1, 0], [1, 0, 0], ...]
"""
if input.shape == targets.shape:
return 'one_hot'
elif input.ndim == targets.ndim + 1:
return 'indices'
else:
raise RuntimeError(f'Unable to infer indices or one_hot. Targets has shape {targets.shape}'
f' and the inputs to cross entropy has shape {input.shape}. For one_hot, '
'expect targets.shape == inputs.shape. For indices, expect '
'inputs.ndim == targets.ndim + 1')
def ensure_targets_one_hot(input: torch.Tensor,
targets: torch.Tensor,
num_classes: Optional[int] = None) -> torch.Tensor:
r"""Ensures that the targets are in a one-hot format rather than an index format.
Args:
input (torch.Tensor): :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K \geq 1`
in the case of K-dimensional loss. `input` is expected to contain unnormalized scores
(often referred to as logits).
targets (torch.Tensor) : If containing class indices, shape :math:`(N)` where each value is
:math:`0 \leq \text{targets}[i] \leq C-1`, or :math:`(N, d_1, d_2, ..., d_K)` with
:math:`K \geq 1` in the case of K-dimensional loss. If containing class probabilities,
same shape as the input.
num_classes (int, optional): Number of classes. If not specified, this will be inferred
from input. Default: ``None``
"""
if infer_target_type(input, targets) == 'indices':
# If the number of classes isn't specified, attempt to infer it from the input
if num_classes is None:
num_classes = input.shape[1]
# Convert to one-hot tensor
targets = _one_hot(targets, num_classes=num_classes, dim=1)
return targets.float()
def check_for_index_targets(targets: torch.Tensor) -> bool:
"""Checks if a given set of targets are indices by looking at the type."""
index_dtypes = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
return targets.dtype in index_dtypes
def _one_hot(tensor: torch.Tensor, num_classes: int = -1, dim: int = -1) -> torch.Tensor:
"""Converts a tensor of index class labels to a tensor of one-hot class labels.
Implementation is based on MONAI one-hot conversion function:
`<https://github.com/Project-MONAI/MONAI/blob/b390b0956334325edc0e5000afb58e2be7cbe550/monai/networks/utils.py#L49>`_.
Args:
tensor (torch.Tensor): Tensor containing index class labels.
num_classes (int): Size of the class dimension for the output one-hot tensor. If set to -1,
the number of classes will be inferred to be one greater than the largest value in ``tensor``.
dim (int): Location of the new class dimension of size ``num_classes``.
Returns:
torch.Tensor: One-hot class labels i.e. the same shape as ``tensor`` except with an
extra dimension of size ``num_classes`` inserted after the first dimension
"""
if not check_for_index_targets(tensor):
raise ValueError(f'tensor must be integer type, current type: {tensor.dtype}')
max_index = tensor.max() + 1
if num_classes == -1:
num_classes = int(max_index)
if num_classes < max_index:
raise ValueError(f'num_classes must be greater than or equal to tensor.max() + 1: {num_classes} < {max_index}')
# Remove negative indices
neg_indices = tensor.min() < 0
if neg_indices:
warnings.warn('Negative label indices are being ignored in conversion to one-hot labels')
tensor = tensor.clone().long()
tensor[tensor < 0] = num_classes
num_classes += 1 # Add extra class for negative indices
# Assume class dimension is inserted after the first dimension
tensor = tensor.unsqueeze(dim)
tensor_shape = list(tensor.shape)
tensor_shape[dim] = num_classes
# Convert to one-hot
one_hot_tensor = torch.zeros(size=tensor_shape, dtype=tensor.dtype, device=tensor.device)
one_hot_tensor.scatter_(dim=dim, index=tensor, value=1)
# Remove negative indices
if neg_indices:
one_hot_tensor = one_hot_tensor[:, 0:-1]
return one_hot_tensor
| composer-dev | composer/loss/utils.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Estimate total time of training."""
from __future__ import annotations
import time
import warnings
from typing import Dict, List, Optional
from composer.core import Callback, State, TimeUnit
from composer.loggers import Logger
__all__ = ['RuntimeEstimator']
class RuntimeEstimator(Callback):
"""Estimates total training time.
The training time is computed by taking the time elapsed for the current duration and multiplying
out to the full extended length of the training run.
This callback provides a best attempt estimate. This estimate may be inaccurate if throughput
changes through training or other significant changes are made to the model or dataloader.
Example:
.. doctest::
>>> from composer import Trainer
>>> from composer.callbacks import RuntimeEstimator
>>> # constructing trainer object with this callback
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... optimizers=optimizer,
... max_duration='1ep',
... callbacks=[RuntimeEstimator()],
... )
The runtime estimate is logged by the :class:`.Logger` to the following key as described below.
+-----------------------------------+---------------------------------------------------------+
| Key | Logged data |
+===================================+=========================================================+
| `wall_clock/remaining_estimate` | Estimated time to completion |
+-----------------------------------+---------------------------------------------------------+
Args:
skip_batches (int, optional): Number of batches to skip before starting clock to estimate
remaining time. Typically, the first few batches are slower due to dataloader, cache
warming, and other reasons. Defaults to 1.
time_unit (str, optional): Time unit to use for `wall_clock` logging. Can be one of
'seconds', 'minutes', 'hours', or 'days'. Defaults to 'hours'.
"""
def __init__(self, skip_batches: int = 1, time_unit: str = 'hours') -> None:
self._enabled = True
self.batches_left_to_skip = skip_batches
self.start_time = None
self.start_dur = None
self.divider = 1
if time_unit == 'seconds':
self.divider = 1
elif time_unit == 'minutes':
self.divider = 60
elif time_unit == 'hours':
self.divider = 60 * 60
elif time_unit == 'days':
self.divider = 60 * 60 * 24
else:
raise ValueError(
f'Invalid time_unit: {time_unit}. Must be one of "seconds", "minutes", "hours", or "days".')
# Keep track of time spent evaluating
self.total_eval_wct = 0.0
self.eval_wct_per_label: Dict[str, List[float]] = {}
# How often eval is called as fraction of total training time
self.eval_frequency_per_label: Dict[str, float] = {}
self.last_elapsed_fraction: float = 0.0
def _get_elapsed_duration(self, state: State) -> Optional[float]:
"""Get the elapsed duration.
Unlike `state.get_elapsed_duration`, this method computes fractional progress in an epoch
provided at least 1 epoch has passed by recording how many batches were in each epoch.
"""
if state.max_duration is None:
return None
if state.max_duration.unit == TimeUnit('ep'):
if state.timestamp.epoch.value >= 1:
batches_per_epoch = (state.timestamp.batch -
state.timestamp.batch_in_epoch).value / state.timestamp.epoch.value
return state.timestamp.get('ba').value / (state.max_duration.value * batches_per_epoch)
elif state.dataloader_len is not None:
return state.timestamp.get('ba').value / (state.max_duration.value * state.dataloader_len.value)
elapsed_dur = state.get_elapsed_duration()
if elapsed_dur is not None:
return elapsed_dur.value
return None
def batch_start(self, state: State, logger: Logger) -> None:
if self._enabled and self.start_time is None and self.batches_left_to_skip == 0:
self.start_time = time.time()
self.start_dur = self._get_elapsed_duration(state)
if self.start_dur is None:
warnings.warn('`max_duration` is not set. Cannot estimate remaining time.')
self._enabled = False
def batch_end(self, state: State, logger: Logger) -> None:
if not self._enabled:
return
if self.batches_left_to_skip > 0:
self.batches_left_to_skip -= 1
return
elapsed_dur = self._get_elapsed_duration(state)
assert elapsed_dur is not None, 'max_duration checked as non-None on batch_start if enabled'
assert self.start_dur is not None
assert self.start_time is not None
if elapsed_dur > self.start_dur:
elapsed_time = time.time() - self.start_time
elapsed_time -= self.total_eval_wct # Subtract time spent evaluating
rate = elapsed_time / (elapsed_dur - self.start_dur)
remaining_time = rate * (1 - elapsed_dur)
# Add remaining time from each evaluator using known frequencies. We explicitly compute
# frequency instead of using time interpolation to avoid saw tooth pattern in estimates
for dataloader_label, eval_wcts in self.eval_wct_per_label.items():
# Discard first eval_wct if possible as it is often slower due to dataset downloading
eval_wct_avg = None
num_evals_finished = len(eval_wcts)
if num_evals_finished > 1:
eval_wct_avg = sum(eval_wcts[1:]) / (num_evals_finished - 1)
else:
eval_wct_avg = sum(eval_wcts) / num_evals_finished
eval_rate = self.eval_frequency_per_label[dataloader_label]
num_total_evals = 1 / eval_rate
remaining_calls = num_total_evals - num_evals_finished
remaining_time += eval_wct_avg * remaining_calls
logger.log_metrics({'wall_clock/remaining_estimate': remaining_time / self.divider})
def eval_end(self, state: State, logger: Logger) -> None:
# If eval is called before training starts, ignore it
if not self._enabled or self.start_time is None:
return
self.total_eval_wct += state.eval_timestamp.total_wct.total_seconds()
# state.dataloader_label should always be non-None unless user explicitly sets evaluator
# label to None, ignoring type hints
assert state.dataloader_label is not None, 'evaluator label must not be None'
if state.dataloader_label not in self.eval_wct_per_label:
self.eval_wct_per_label[state.dataloader_label] = []
self.eval_wct_per_label[state.dataloader_label].append(state.eval_timestamp.total_wct.total_seconds())
elapsed_dur = self._get_elapsed_duration(state)
assert elapsed_dur is not None, 'max_duration checked as non-None on batch_start if enabled'
assert self.start_dur is not None, 'start_dur is set on batch_start if enabled'
elapsed_fraction = elapsed_dur - self.start_dur
num_evals_finished = len(self.eval_wct_per_label[state.dataloader_label])
self.eval_frequency_per_label[state.dataloader_label] = elapsed_fraction / num_evals_finished
| composer-dev | composer/callbacks/runtime_estimator.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Create a submission for MLPerf Training benchmark."""
import json
import logging
import os
import platform
import subprocess
import sys
import warnings
from typing import Any, Dict, Iterable, Optional
import torch
from torch.utils.data import DataLoader, IterableDataset
import composer
from composer.core import Callback, State
from composer.loggers import Logger
from composer.utils import dist
try:
import cpuinfo
import psutil
from mlperf_logging import mllog
from mlperf_logging.mllog import constants
mlperf_available = True
except ImportError:
mlperf_available = False
# this callback only supports the following options:
BENCHMARKS = ('resnet', 'bert')
DIVISIONS = ('open',)
STATUS = ('onprem', 'cloud', 'preview')
__all__ = ['MLPerfCallback', 'get_system_description']
def _global_rank_zero() -> bool:
return dist.get_global_rank() == 0
def _local_rank_zero() -> bool:
return dist.get_local_rank() == 0
def _require_mlperf_logging():
if not mlperf_available:
raise ImportError("""Please install with `pip install 'mosaicml[mlperf]'` and also
install the logging library from: https://github.com/mlcommons/logging""")
class MLPerfCallback(Callback):
"""Create compliant results file for MLPerf Training benchmark.
A submission folder structure will be created with the ``root_folder``
as the base and the following directories::
root_folder/
results/
[system_name]/
[benchmark]/
results_0.txt
results_1.txt
...
systems/
[system_name].json
A required systems description will be automatically generated,
and best effort made to populate the fields, but should be manually
checked prior to submission.
Currently, only open division submissions are supported with this Callback.
Example:
.. code-block:: python
from composer.callbacks import MLPerfCallback
callback = MLPerfCallback(
root_folder='/submission',
index=0,
metric_name='MulticlassAccuracy',
metric_label='eval',
target='0.759',
)
During training, the metric found in ``state.eval_metrics[metric_label][metric_name]``
will be compared against the target criterion.
.. note::
This is currently an experimental logger that has not been used (yet)
to submit an actual result to MLPerf. Please use with caution.
.. note::
MLPerf submissions require clearing the system cache prior to any training run.
By default, this callback does not clear the cache, as that is a system specific
operation. To enable cache clearing, and thus pass the mlperf compliance checker,
provide a ``cache_clear_cmd`` that will be executed with ``os.system``.
Args:
root_folder (str): The root submission folder
index (int): The repetition index of this run. The filename created will be
``result_[index].txt``.
benchmark (str, optional): Benchmark name. Currently only ``resnet`` supported.
Default: ``'resnet'``.
target (float, optional): The target metric before the mllogger marks the stop
of the timing run. Default: ``0.759`` (resnet benchmark).
division (str, optional): Division of submission. Currently only ``open`` division supported.
Default: ``'open'``.
metric_name (str, optional): name of the metric to compare against the target.
Default: ``MulticlassAccuracy``.
metric_label (str, optional): The label name. The metric will be accessed via
``state.eval_metrics[metric_label][metric_name]``.
submitter (str, optional): Submitting organization. Default: ``"MosaicML"``.
system_name (str, optional): Name of the system (e.g. 8xA100_composer). If
not provided, system name will default to ``[world_size]x[device_name]_composer``,
e.g. ``8xNVIDIA_A100_80GB_composer``.
status (str, optional): Submission status. One of (onprem, cloud, or preview).
Default: ``"onprem"``.
cache_clear_cmd (str, optional): Command to invoke during the cache clear. This callback
will call ``os.system(cache_clear_cmd)``. Default is disabled (None)
host_processors_per_node (int, optional): Total number of host processors per node. Default: ``None``.
exit_at_target (bool, optional): Whether to exit training when target metric is met. Default: ``False``.
"""
def __init__(
self,
root_folder: str,
index: int,
benchmark: str = 'resnet',
target: float = 0.759,
division: str = 'open',
metric_name: str = 'MulticlassAccuracy',
metric_label: str = 'eval',
submitter: str = 'MosaicML',
system_name: Optional[str] = None,
status: str = 'onprem',
cache_clear_cmd: Optional[str] = None,
host_processors_per_node: Optional[int] = None,
exit_at_target: bool = False,
) -> None:
_require_mlperf_logging()
if benchmark not in BENCHMARKS:
raise ValueError(f'benchmark: {benchmark} must be one of {BENCHMARKS}')
if division not in DIVISIONS:
raise ValueError(f'division: {division} must be one of {DIVISIONS}')
if status not in STATUS:
raise ValueError(f'status: {status} must be one of {STATUS}')
self.mllogger = mllog.get_mllogger()
self.target = target
self.benchmark = benchmark
self.target = target
self.division = division
self.submitter = submitter
self.status = status
self.cache_clear_cmd = cache_clear_cmd
self.root_folder = root_folder
self.metric_name = metric_name
self.metric_label = metric_label
self.exit_at_target = exit_at_target
self._file_handler = None
self.system_desc = get_system_description(submitter, division, status, system_name, host_processors_per_node)
if system_name is None:
system_name = self.system_desc['system_name']
self.system_name = system_name
# file paths to save the systems file, results file
self.systems_path = os.path.join(root_folder, 'systems', f'{system_name}.json')
self.filename = os.path.join(root_folder, 'results', system_name, benchmark, f'result_{index}.txt')
# upload names for object store logging
self.upload_name = '{run_name}' + f'/results/{system_name}/{benchmark}/result_{index}.txt'
self.system_desc_upload_name = '{run_name}' + f'/systems/{system_name}.json'
self.success = False
def init(self, state: State, logger: Logger) -> None:
# setup here requies access to rank, which is only available after
# the trainer is initialized
if _local_rank_zero():
self._create_submission_folders(self.root_folder, self.system_name, self.benchmark)
with open(self.systems_path, 'w') as f:
json.dump(self.system_desc, f, indent=4)
if os.path.exists(self.filename):
raise FileExistsError(f'{self.filename} already exists.')
dist.barrier()
self._file_handler = logging.FileHandler(self.filename)
self._file_handler.setLevel(logging.INFO)
self.mllogger.logger.addHandler(self._file_handler)
if self.cache_clear_cmd is not None:
if _local_rank_zero():
subprocess.run(self.cache_clear_cmd.split(), check=True, text=True)
self.mllogger.start(key=mllog.constants.CACHE_CLEAR)
else:
warnings.warn('cache_clear_cmd was not provided. For a valid submission, please provide the command.')
dist.barrier()
if _local_rank_zero():
self.mllogger.start(key=mllog.constants.INIT_START)
if _global_rank_zero():
self._log_dict({
constants.SUBMISSION_BENCHMARK: self.benchmark,
constants.SUBMISSION_DIVISION: self.division,
constants.SUBMISSION_ORG: self.submitter,
constants.SUBMISSION_PLATFORM: self.system_name,
constants.SUBMISSION_STATUS: self.status,
})
# optionally, upload the system description file
logger.upload_file(self.system_desc_upload_name, self.systems_path)
def _create_submission_folders(self, root_folder: str, system_name: str, benchmark: str):
os.makedirs(root_folder, exist_ok=True)
results_folder = os.path.join(root_folder, 'results')
log_folder = os.path.join(root_folder, 'results', system_name)
benchmark_folder = os.path.join(log_folder, benchmark)
systems_folder = os.path.join(root_folder, 'systems')
os.makedirs(results_folder, exist_ok=True)
os.makedirs(log_folder, exist_ok=True)
os.makedirs(benchmark_folder, exist_ok=True)
os.makedirs(systems_folder, exist_ok=True)
def _log_dict(self, data: Dict[str, Any]):
for key, value in data.items():
self.mllogger.event(key=key, value=value)
def _get_accuracy(self, state: State) -> float:
if self.metric_name not in state.eval_metrics[self.metric_label]:
raise ValueError(f'{self.metric_name} must be a validation metric.')
metric = state.eval_metrics[self.metric_label][self.metric_name].compute()
return float(metric)
def _get_time(self, state: State) -> int:
"""Different benchmarks log different units of time."""
benchmark_time = {
'resnet': state.timestamp.epoch.value,
'bert': state.timestamp.sample.value,
}
return benchmark_time[self.benchmark]
def _get_dataloader_stats(self, dataloader: Iterable):
"""Returns a tuple of ``(batch_size, num_samples)``."""
if isinstance(dataloader, DataLoader):
num_samples = len(dataloader.dataset) # type: ignore
if isinstance(dataloader.dataset, IterableDataset):
num_samples *= dist.get_world_size()
return (dataloader.batch_size, num_samples)
try:
# attempt to import ffcv and test if its an ffcv loader.
import ffcv # type: ignore
if isinstance(dataloader, ffcv.loader.Loader):
# Use the cached attribute ffcv.init_traversal_order to compute number of samples
return (
dataloader.batch_size, # type: ignore
len(dataloader.next_traversal_order()) * dist.get_world_size() # type: ignore
)
except ImportError:
pass
raise TypeError(f'torch dataloader or ffcv dataloader required (and ffcv installed)')
def fit_start(self, state: State, logger: Logger) -> None:
if _global_rank_zero():
if len(state.evaluators) > 1:
raise ValueError('Only one evaluator is supported for the MLPerfCallback.')
if state.train_dataloader is None:
raise ValueError('Train dataloader need to be provided')
batch_size, num_samples = self._get_dataloader_stats(state.train_dataloader)
_, eval_num_samples = self._get_dataloader_stats(state.evaluators[0].dataloader.dataloader)
if batch_size is None:
raise ValueError('Batch size is required to be set for dataloader.')
self._log_dict({
constants.SEED: state.seed,
constants.GLOBAL_BATCH_SIZE: batch_size * dist.get_world_size(),
constants.DEVICE_TRAIN_MICROBATCH_SIZE: state.device_train_microbatch_size,
constants.TRAIN_SAMPLES: num_samples,
constants.EVAL_SAMPLES: eval_num_samples,
})
if _local_rank_zero():
self.mllogger.event(key=constants.INIT_STOP)
dist.barrier()
if _global_rank_zero():
self.mllogger.event(key=constants.RUN_START)
def epoch_start(self, state: State, logger: Logger) -> None:
if _global_rank_zero():
self.mllogger.event(key=constants.EPOCH_START, metadata={'epoch_num': self._get_time(state)})
self.mllogger.event(key=constants.BLOCK_START,
metadata={
'first_epoch_num': self._get_time(state),
'epoch_count': 1
})
def epoch_end(self, state: State, logger: Logger) -> None:
if _global_rank_zero():
self.mllogger.event(key=constants.EPOCH_STOP, metadata={'epoch_num': self._get_time(state)})
logger.upload_file(remote_file_name=self.upload_name, file_path=self.filename)
def eval_start(self, state: State, logger: Logger) -> None:
if _global_rank_zero():
self.mllogger.event(key=constants.EVAL_START, metadata={'epoch_num': self._get_time(state)})
def eval_end(self, state: State, logger: Logger) -> None:
accuracy = self._get_accuracy(state)
if _global_rank_zero():
self.mllogger.event(key=constants.EVAL_STOP, metadata={'epoch_num': self._get_time(state)})
self.mllogger.event(key=constants.EVAL_ACCURACY,
value=accuracy,
metadata={'epoch_num': self._get_time(state)})
self.mllogger.event(key=constants.BLOCK_STOP, metadata={'first_epoch_num': self._get_time(state)})
if accuracy > self.target and not self.success:
self.mllogger.event(key=constants.RUN_STOP, metadata={'status': 'success'})
self.mllogger.logger.removeHandler(self._file_handler)
self.success = True # only log once
# upload to object store after eval complete
logger.upload_file(remote_file_name=self.upload_name, file_path=self.filename)
if accuracy > self.target and self.exit_at_target:
# stop training
state.stop_training()
def close(self, state: State, logger: Logger) -> None:
if self._file_handler is not None:
self._file_handler.close()
def get_system_description(
submitter: str,
division: str,
status: str,
system_name: Optional[str] = None,
host_processors_per_node: Optional[int] = None,
) -> Dict[str, str]:
"""Generates a valid system description.
Makes a best effort to auto-populate some of the fields, but should
be manually checked prior to submission. The system name is
auto-generated as ``"[world_size]x[device_name]_composer"``, e.g.
``"8xNVIDIA_A100_80GB_composer"``.
Args:
submitter (str): Name of the submitting organization.
division (str): Submission division (open, closed).
status (str): System status (cloud, onprem, preview).
system_name (str, optional): System name. Default: ``None``.
Returns:
system description as a dictionary
"""
is_cuda = torch.cuda.is_available()
cpu_info = cpuinfo.get_cpu_info()
system_desc = {
'submitter': submitter,
'division': division,
'status': status,
'number_of_nodes': dist.get_world_size() / dist.get_local_world_size(),
'host_processors_per_node': str(host_processors_per_node) if host_processors_per_node else '',
'host_processor_model_name': str(cpu_info.get('brand_raw', 'CPU')),
'host_processor_core_count': str(psutil.cpu_count(logical=False)),
'host_processor_vcpu_count': '',
'host_processor_frequency': '',
'host_processor_caches': '',
'host_processor_interconnect': '',
'host_memory_capacity': '',
'host_storage_type': '',
'host_storage_capacity': '',
'host_networking': '',
'host_networking_topology': '',
'host_memory_configuration': '',
'accelerators_per_node': str(dist.get_local_world_size()) if is_cuda else '0',
'accelerator_model_name': str(torch.cuda.get_device_name(None)) if is_cuda else '',
'accelerator_host_interconnect': '',
'accelerator_frequency': '',
'accelerator_on-chip_memories': '',
'accelerator_memory_configuration': '',
'accelerator_memory_capacity': '',
'accelerator_interconnect': '',
'accelerator_interconnect_topology': '',
'cooling': '',
'hw_notes': '',
'framework':
f'PyTorch v{torch.__version__} and MosaicML composer v{composer.__version__}', # type: ignore (third-party missing stub)
'other_software_stack': {
'cuda_version': torch.version.cuda if is_cuda else '', # type: ignore (third-party missing stub)
'composer_version': composer.__version__,
'python_version': sys.version,
},
'operating_system': f'{platform.system()} {platform.release()}',
'sw_notes': '',
}
if system_desc['number_of_nodes'] != 1:
warnings.warn('Number of nodes > 1 not tested, proceed with caution.')
if system_name is None:
world_size = dist.get_world_size()
if is_cuda:
device_name = system_desc['accelerator_model_name']
else:
device_name = system_desc['host_processor_model_name']
device_name = device_name.replace(' ', '_')
system_name = f'{world_size}x{device_name}_composer'
# default to system name as "[world_size]x[device_name]"
# e.g. 8xNVIDIA_A100_80GB
system_desc['system_name'] = system_name
return system_desc
| composer-dev | composer/callbacks/mlperf.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Monitor throughput during training."""
from __future__ import annotations
import warnings
from collections import deque
from typing import Any, Callable, Deque, Dict, Optional, Union
import torch
from composer.core import Callback, State
from composer.loggers import Logger
from composer.models.base import ComposerModel
from composer.utils import dist
__all__ = ['SpeedMonitor']
GPU_AVAILABLE_FLOPS = {
# source: https://resources.nvidia.com/en-us-tensor-core/nvidia-tensor-core-gpu-datasheet
# nvidia publishes spec sheet with a 2x sparsity factor
'h100-sxm': {
'fp64': 67e12,
'fp32': 67e12,
'tf32': 989e12 / 2,
'fp16': 1.979e15 / 2,
'amp_fp16': 1.979e15 / 2,
'bf16': 1.979e15 / 2,
'amp_bf16': 1.979e15 / 2,
'fp8': 3.958e15 / 2,
'amp_fp8': 3.958e15 / 2,
'int8': 3.958e15 / 2,
},
'h100-pcie': {
'fp64': 51e12,
'fp32': 51e12,
'tf32': 756e12 / 2,
'fp16': 1.513e15 / 2,
'amp_fp16': 1.513e15 / 2,
'bf16': 1.513e15 / 2,
'amp_bf16': 1.513e15 / 2,
'fp8': 3.026e15 / 2,
'amp_fp8': 3.026e15 / 2,
'int8': 3.026e15 / 2,
},
# source: https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/a100/pdf/nvidia-a100-datasheet-us-nvidia-1758950-r4-web.pdf
# sxm and pcie have same flop counts
'a100': {
'fp64': 19.5e12,
'fp32': 19.5e12,
'tf32': 156e12,
'fp16': 312e12,
'amp_fp16': 312e12,
'bf16': 312e12,
'amp_bf16': 312e12,
},
# source: https://images.nvidia.com/content/technologies/volta/pdf/volta-v100-datasheet-update-us-1165301-r5.pdf
'v100-sxm': {
'fp64': 7.8e12,
'fp32': 15.7e12,
'fp16': 125e12,
'amp_fp16': 125e12,
},
'v100-pcie': {
'fp64': 7e12,
'fp32': 14e12,
'fp16': 112e12,
'amp_fp16': 112e12,
},
'v100s-pcie': {
'fp64': 8.2e12,
'fp32': 16.4e12,
'fp16': 130e12,
'amp_fp16': 130e12,
},
# source: https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/tesla-t4/t4-tensor-core-datasheet-951643.pdf
# sxm and pcie have same flop counts
't4': {
'fp32': 8.1e12,
'fp16': 65e12,
'amp_fp16': 65e12,
'int8': 130e12,
'int4': 260e12,
},
}
def get_gpu_flops_available(state: State):
gpu_flops_available = None
# Return 0 if no CUDA device (e.g., when running with CPU only)
if not torch.cuda.is_available():
return 0
# torch.cuda.get_device_name() ex output: 'NVIDIA A100-SXM4-40GB'
device_name = torch.cuda.get_device_name().lower()
if 'h100-sxm' in device_name:
device_name = 'h100-sxm'
elif 'h100-pcie' in device_name:
device_name = 'h100-pcie'
elif 'a100' in device_name:
device_name = 'a100'
elif 'v100-sxm' in device_name:
device_name = 'v100-sxm'
elif 'v100-pcie' in device_name:
device_name = 'v100-pcie'
elif 't4' in device_name:
device_name = 't4'
else:
device_name = None
if device_name is not None:
try:
gpu_flops_available = int(GPU_AVAILABLE_FLOPS[device_name][state.precision.value])
except:
gpu_flops_available = None
if gpu_flops_available is None:
warnings.warn(
f'gpu_flop count not found for {device_name} with precision: {state.precision.value}; ' +\
f'MFU cannot be calculated and reported. gpu_flops_available can be manually' +\
f'overridden by setting gpu_flops_available in SpeedMonitor.'
)
# Setting to 0 will disable MFU computation and prevent
# the speed monitor from running this helper every batch
gpu_flops_available = 0
return gpu_flops_available
class SpeedMonitor(Callback):
"""Logs the training throughput and utilization.
The training throughput is logged on the :attr:`.Event.BATCH_END` event once we have reached
the `window_size` threshold. If a model has `flops_per_batch` attribute, then flops per second
is also logged. If running on a known GPU type or if `gpu_flops_available` is set, then MFU is
also logged. All metrics are also logged as per device by dividing by world size.
To compute `flops_per_sec`, the model attribute `flops_per_batch` should be set to a callable
which accepts a batch and returns the number of flops for that batch. Typically, this should
be flops per sample times the batch size unless pad tokens are used.
The wall clock time is logged on every :attr:`.Event.BATCH_END` event.
Example:
.. doctest::
>>> from composer import Trainer
>>> from composer.callbacks import SpeedMonitor
>>> # constructing trainer object with this callback
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... optimizers=optimizer,
... max_duration='1ep',
... callbacks=[SpeedMonitor(window_size=100)],
... )
The training throughput is logged by the :class:`.Logger` to the following keys as
described below.
+-------------------------------------+-----------------------------------------------------------+
| Key | Logged data |
+=====================================+===========================================================+
| | Rolling average (over `window_size` most recent |
| `throughput/batches_per_sec` | batches) of the number of batches processed per second |
| | |
+-------------------------------------+-----------------------------------------------------------+
| | Rolling average (over `window_size` most recent |
| `throughput/samples_per_sec` | batches) of the number of samples processed per second |
| | |
+-------------------------------------+-----------------------------------------------------------+
| | Rolling average (over `window_size` most recent |
| `throughput/tokens_per_sec` | batches) of the number of tokens processed per second. |
| | Only logged when dataloader.dataset has `max_seq_len`. |
| | This may include padding depending on dataset |
+-------------------------------------+-----------------------------------------------------------+
| | Estimates flops by `flops_per_batch * batches_per_sec` |
| `throughput/flops_per_sec` | if model has attribute `flops_per_batch` |
| | |
+-------------------------------------+-----------------------------------------------------------+
| `throughput/device/batches_per_sec` | `throughput/batches_per_sec` divided by world size |
+-------------------------------------+-----------------------------------------------------------+
| `throughput/device/samples_per_sec` | `throughput/samples_per_sec` divided by world size |
+-------------------------------------+-----------------------------------------------------------+
| | `throughput/tokens_per_sec` divided by world size. Only |
| `throughput/device/tokens_per_sec` | logged when dataloader.dataset has `max_seq_len`. This |
| | may include pad tokens depending on dataset |
+-------------------------------------+-----------------------------------------------------------+
| | `throughput/flops_per_sec` divided by world size. Only |
| `throughput/device/flops_per_sec` | logged when model has attribute `flops_per_batch` |
| | |
+-------------------------------------+-----------------------------------------------------------+
| | `throughput/device/flops_per_sec` divided by world size. |
| `throughput/device/mfu` | Only logged when model has attribute `flops_per_batch` |
| | and `gpu_flops_available`, which can be passed as an |
| | argument if not automatically determined by SpeedMonitor |
+-------------------------------------+-----------------------------------------------------------+
| `wall_clock/train` | Total elapsed training time |
+-------------------------------------+-----------------------------------------------------------+
| `wall_clock/val` | Total elapsed validation time |
+-------------------------------------+-----------------------------------------------------------+
| `wall_clock/total` | Total elapsed time (wall_clock/train + wall_clock/val) |
+-------------------------------------+-----------------------------------------------------------+
Args:
window_size (int, optional): Number of batches to use for a rolling average of throughput.
Defaults to 100.
gpu_flops_available (float, optional): Number of flops available on the GPU. If not set,
SpeedMonitor will attempt to determine this automatically. Defaults to None.
time_unit (str, optional): Time unit to use for `wall_clock` logging. Can be one of
'seconds', 'minutes', 'hours', or 'days'. Defaults to 'hours'.
"""
def __init__(
self,
window_size: int = 100,
gpu_flops_available: Optional[Union[float, int]] = None,
time_unit: str = 'hours',
):
# Track the batch num samples and wct to compute throughput over a window of batches
self.history_samples: Deque[int] = deque(maxlen=window_size + 1)
self.history_wct: Deque[float] = deque(maxlen=window_size + 1)
self.history_flops: Deque[float] = deque(maxlen=window_size + 1)
self.gpu_flops_available = gpu_flops_available
self.divider = 1
if time_unit == 'seconds':
self.divider = 1
elif time_unit == 'minutes':
self.divider = 60
elif time_unit == 'hours':
self.divider = 60 * 60
elif time_unit == 'days':
self.divider = 60 * 60 * 24
else:
raise ValueError(
f'Invalid time_unit: {time_unit}. Must be one of "seconds", "minutes", "hours", or "days".')
# Keep track of time spent evaluating
self.total_eval_wct = 0.0
def state_dict(self) -> Dict[str, Any]:
return {
'total_eval_wct': self.total_eval_wct,
}
def load_state_dict(self, state: Dict[str, Any]) -> None:
self.total_eval_wct = state['total_eval_wct']
def init(self, state: State, logger: Logger) -> None:
del logger # unused
if self.gpu_flops_available is None:
self.gpu_flops_available = get_gpu_flops_available(state)
def batch_end(self, state: State, logger: Logger):
# Add the new element
self.history_samples.append(state.timestamp.sample.value)
self.history_wct.append(state.timestamp.total_wct.total_seconds())
# Log the throughput
if len(self.history_wct) == self.history_wct.maxlen:
world_size = dist.get_world_size()
elapsed_batches = len(self.history_samples) - 1
elapsed_samples = int(self.history_samples[-1]) - int(self.history_samples[0])
elapsed_wct = self.history_wct[-1] - self.history_wct[0]
batches_per_sec = elapsed_batches / elapsed_wct
samples_per_sec = elapsed_samples / elapsed_wct
dev_batches_per_sec = batches_per_sec / world_size
dev_samples_per_sec = samples_per_sec / world_size
logger.log_metrics({'throughput/batches_per_sec': batches_per_sec})
logger.log_metrics({'throughput/samples_per_sec': samples_per_sec})
logger.log_metrics({'throughput/device/batches_per_sec': dev_batches_per_sec})
logger.log_metrics({'throughput/device/samples_per_sec': dev_samples_per_sec})
# Compute token stats if dataloader.dataset has max_seq_len. Assumes no padding.
try:
max_seq_len = state.dataloader.dataset.max_seq_len # type: ignore
# Only applicable to seq data / models
logger.log_metrics({'throughput/tokens_per_sec': samples_per_sec * max_seq_len})
logger.log_metrics({'throughput/device/tokens_per_sec': dev_samples_per_sec * max_seq_len})
except AttributeError:
pass
# Compute flops stats if model has flops_per_batch
composer_model = state.model
if not isinstance(composer_model, ComposerModel):
composer_model = composer_model.module # Pass through DDP wrapping
if hasattr(composer_model, 'flops_per_batch'):
model_flops_per_batch = composer_model.flops_per_batch # type: ignore
if not isinstance(model_flops_per_batch, Callable):
raise TypeError('flops_per_batch must a callable accepting a batch and '
f'returning an int or float. Instead, got {type(model_flops_per_batch)}.')
device_flops_per_batch = model_flops_per_batch(state.batch)
# Sum flops across all ranks since each rank computes the flops for its own batch
flops_per_batch_tensor = state.device.tensor_to_device(
torch.tensor(device_flops_per_batch, dtype=torch.float))
dist.all_reduce(flops_per_batch_tensor, reduce_operation='SUM')
flops_per_batch = flops_per_batch_tensor.item()
self.history_flops.append(flops_per_batch)
# Log the flops throughput
if len(self.history_flops) == self.history_flops.maxlen:
world_size = dist.get_world_size()
elapsed_flops = sum(self.history_flops) - self.history_flops[0]
elapsed_wct = self.history_wct[-1] - self.history_wct[0]
flops_per_sec = elapsed_flops / elapsed_wct
device_flops_per_sec = flops_per_sec / world_size
logger.log_metrics({'throughput/flops_per_sec': flops_per_sec})
logger.log_metrics({'throughput/device/flops_per_sec': device_flops_per_sec})
if self.gpu_flops_available:
mfu = device_flops_per_sec / self.gpu_flops_available
logger.log_metrics({'throughput/device/mfu': mfu})
# Log the time
# `state.timestamp` excludes any time spent in evaluation
train_wct = state.timestamp.total_wct.total_seconds()
logger.log_metrics({
'wall_clock/train': train_wct / self.divider,
'wall_clock/val': self.total_eval_wct / self.divider,
'wall_clock/total': (train_wct + self.total_eval_wct) / self.divider,
})
def eval_end(self, state: State, logger: Logger):
del logger # unused
self.total_eval_wct += state.eval_timestamp.total_wct.total_seconds()
| composer-dev | composer/callbacks/speed_monitor.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Threshold stopping callback."""
from typing import Any, Callable, Optional, Union
import torch
from composer.core import Callback, State
from composer.loggers import Logger
class ThresholdStopper(Callback):
"""Halt training when a metric value reaches a certain threshold.
Example:
.. doctest::
>>> from composer import Evaluator, Trainer
>>> from composer.callbacks.threshold_stopper import ThresholdStopper
>>> # constructing trainer object with this callback
>>> threshold_stopper = ThresholdStopper('MulticlassAccuracy', 'my_evaluator', 0.7)
>>> evaluator = Evaluator(
... dataloader = eval_dataloader,
... label = 'my_evaluator',
... metric_names = ['MulticlassAccuracy']
... )
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=evaluator,
... optimizers=optimizer,
... max_duration="1ep",
... callbacks=[threshold_stopper],
... )
Args:
monitor (str): The name of the metric to monitor.
dataloader_label (str): The label of the dataloader or evaluator associated with the tracked metric. If
monitor is in an Evaluator, the dataloader_label field should be set to the Evaluator's label. If
monitor is a training metric or an ordinary evaluation metric not in an Evaluator, dataloader_label
should be set to 'train' or 'eval' respectively. If dataloader_label is set to 'train', then the
callback will stop training in the middle of the epoch.
threshold (float): The threshold that dictates when to halt training. Whether training stops if the metric
exceeds or falls below the threshold depends on the comparison operator.
comp (Callable[[Any, Any], Any], optional): A comparison operator to measure change of the monitored metric. The comparison
operator will be called ``comp(current_value, prev_best)``. For metrics where the optimal value is low
(error, loss, perplexity), use a less than operator and for metrics like accuracy where the optimal value
is higher, use a greater than operator. Defaults to :func:`torch.less` if loss, error, or perplexity are substrings
of the monitored metric, otherwise defaults to :func:`torch.greater`
stop_on_batch (bool, optional): A bool that indicates whether to stop training in the middle of an epoch if
the training metrics satisfy the threshold comparison. Defaults to False.
"""
def __init__(self,
monitor: str,
dataloader_label: str,
threshold: float,
*,
comp: Optional[Union[str, Callable[[
Any,
Any,
], Any]]] = None,
stop_on_batch: bool = False):
self.monitor = monitor
self.threshold = threshold
self.dataloader_label = dataloader_label
self.stop_on_batch = stop_on_batch
if callable(comp):
self.comp_func = comp
if isinstance(comp, str):
if comp.lower() in ('greater', 'gt'):
self.comp_func = torch.greater
elif comp.lower() in ('less', 'lt'):
self.comp_func = torch.less
else:
raise ValueError(
"Unrecognized comp string. Use the strings 'gt', 'greater', 'lt' or 'less' or a callable comparison operator"
)
if comp is None:
if any(substr in monitor.lower() for substr in ['loss', 'error', 'perplexity']):
self.comp_func = torch.less
else:
self.comp_func = torch.greater
def _get_monitored_metric(self, state: State):
if self.dataloader_label == 'train':
if self.monitor in state.train_metrics:
return state.train_metrics[self.monitor].compute()
else:
if self.monitor in state.eval_metrics[self.dataloader_label]:
return state.eval_metrics[self.dataloader_label][self.monitor].compute()
raise ValueError(f"Couldn't find the metric {self.monitor} with the dataloader label {self.dataloader_label}."
"Check that the dataloader_label is set to 'eval', 'train' or the evaluator name.")
def _compare_metric_and_update_state(self, state: State):
metric_val = self._get_monitored_metric(state)
if not torch.is_tensor(metric_val):
metric_val = torch.tensor(metric_val)
if self.comp_func(metric_val, self.threshold):
state.stop_training()
def eval_end(self, state: State, logger: Logger) -> None:
if self.dataloader_label == state.dataloader_label:
# if the monitored metric is an eval metric or in an evaluator
self._compare_metric_and_update_state(state)
def epoch_end(self, state: State, logger: Logger) -> None:
if self.dataloader_label == state.dataloader_label:
# if the monitored metric is not an eval metric, the right logic is run on EPOCH_END
self._compare_metric_and_update_state(state)
def batch_end(self, state: State, logger: Logger) -> None:
if self.stop_on_batch and self.dataloader_label == state.dataloader_label:
self._compare_metric_and_update_state(state)
| composer-dev | composer/callbacks/threshold_stopper.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Callbacks that run at each training loop :class:`.Event`.
Each callback inherits from the :class:`.Callback` base class. See detailed description and
examples for writing your own callbacks at the :class:`.Callback` base class.
"""
from composer.callbacks.checkpoint_saver import CheckpointSaver
from composer.callbacks.early_stopper import EarlyStopper
from composer.callbacks.export_for_inference import ExportForInferenceCallback
from composer.callbacks.health_checker import HealthChecker
from composer.callbacks.image_visualizer import ImageVisualizer
from composer.callbacks.lr_monitor import LRMonitor
from composer.callbacks.memory_monitor import MemoryMonitor
from composer.callbacks.mlperf import MLPerfCallback
from composer.callbacks.optimizer_monitor import OptimizerMonitor
from composer.callbacks.runtime_estimator import RuntimeEstimator
from composer.callbacks.speed_monitor import SpeedMonitor
from composer.callbacks.threshold_stopper import ThresholdStopper
__all__ = [
'OptimizerMonitor',
'LRMonitor',
'MemoryMonitor',
'SpeedMonitor',
'CheckpointSaver',
'MLPerfCallback',
'EarlyStopper',
'ExportForInferenceCallback',
'ThresholdStopper',
'ImageVisualizer',
'HealthChecker',
'RuntimeEstimator',
]
| composer-dev | composer/callbacks/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Monitor learning rate during training."""
from composer.core import Callback, State
from composer.loggers import Logger
__all__ = ['LRMonitor']
class LRMonitor(Callback):
"""Logs the learning rate.
This callback iterates over all optimizers and their parameter groups to log
learning rate under the ``lr-{OPTIMIZER_NAME}/group{GROUP_NUMBER}`` key.
Example:
.. doctest::
>>> from composer import Trainer
>>> from composer.callbacks import LRMonitor
>>> # constructing trainer object with this callback
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... optimizers=optimizer,
... max_duration="1ep",
... callbacks=[LRMonitor()],
... )
The learning rate is logged by the :class:`.Logger` to the following key as described
below.
+---------------------------------------------+---------------------------------------+
| Key | Logged data |
+=============================================+=======================================+
| | Learning rate for each optimizer and |
| ``lr-{OPTIMIZER_NAME}/group{GROUP_NUMBER}`` | parameter group for that optimizer is |
| | logged to a separate key. |
+---------------------------------------------+---------------------------------------+
"""
def __init__(self) -> None:
pass
def batch_end(self, state: State, logger: Logger):
assert state.optimizers is not None, 'optimizers must be defined'
for optimizer in state.optimizers:
lrs = [group['lr'] for group in optimizer.param_groups]
name = optimizer.__class__.__name__
for lr in lrs:
for idx, lr in enumerate(lrs):
logger.log_metrics({f'lr-{name}/group{idx}': lr})
| composer-dev | composer/callbacks/lr_monitor.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Callback to save checkpoints during training."""
from __future__ import annotations
import logging
import os
import pathlib
import tempfile
import textwrap
from typing import Callable, List, Optional, Union
from composer.core import Callback, Event, State, Time, TimeUnit
from composer.loggers import Logger
from composer.utils import (FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, FORMAT_NAME_WITH_DIST_TABLE, PartialFilePath,
checkpoint, create_symlink_file, dist, ensure_folder_has_no_conflicting_files,
format_name_with_dist, is_model_deepspeed, reproducibility)
log = logging.getLogger(__name__)
__all__ = ['CheckpointSaver', 'checkpoint_periodically']
def checkpoint_periodically(interval: Union[str, int, Time]) -> Callable[[State, Event], bool]:
r"""Helper function to create a checkpoint scheduler according to a specified interval.
Args:
interval (Union[str, int, :class:`.Time`]): The interval describing how often checkpoints should be
saved. If an integer, it will be assumed to be in :attr:`.TimeUnit.EPOCH`\s.
Otherwise, the unit must be either :attr:`.TimeUnit.EPOCH` or :attr:`.TimeUnit.BATCH`.
Checkpoints will be saved every ``n`` batches or epochs (depending on the unit),
and at the end of training.
Returns:
Callable[[State, Event], bool]: A function that can be passed as the ``save_interval``
argument into the :class:`.CheckpointSaver`.
"""
if isinstance(interval, str):
interval = Time.from_timestring(interval)
if isinstance(interval, int):
interval = Time(interval, TimeUnit.EPOCH)
if interval.unit == TimeUnit.EPOCH:
save_event = Event.EPOCH_CHECKPOINT
elif interval.unit == TimeUnit.BATCH:
save_event = Event.BATCH_CHECKPOINT
else:
raise NotImplementedError(
f'Unknown checkpointing interval: {interval.unit}. Must be TimeUnit.EPOCH or TimeUnit.BATCH.')
def save_interval(state: State, event: Event):
elapsed_duration = state.get_elapsed_duration()
assert elapsed_duration is not None, 'elapsed_duration is set on the BATCH_CHECKPOINT and EPOCH_CHECKPOINT'
# Always checkpoint at end of training
if elapsed_duration >= 1.0:
return True
if save_event == Event.EPOCH_CHECKPOINT:
count = state.timestamp.epoch
elif save_event == Event.BATCH_CHECKPOINT:
count = state.timestamp.batch
else:
raise RuntimeError(f'Invalid save_event: {save_event}')
if event == save_event and int(count) % int(interval) == 0:
return True
return False
return save_interval
class CheckpointSaver(Callback): # noqa: D101
__doc__ = f"""Callback to save checkpoints.
.. note::
If the ``folder`` argument is specified when constructing the :class:`.Trainer`, then the :class:`.CheckpointSaver`
callback need not be constructed manually. However, for advanced checkpointing use cases
(such as saving a weights-only checkpoint at one interval and the full training state
at another interval), instance(s) of this :class:`.CheckpointSaver` callback can be specified in the
``callbacks`` argument of the :class:`.Trainer`, as shown in the example below.
Example
.. testsetup::
from composer.callbacks.checkpoint_saver import CheckpointSaver
.. doctest::
>>> trainer = Trainer(..., callbacks=[
... CheckpointSaver(
... folder='{{run_name}}/checkpoints',
... filename="ep{{epoch}}-ba{{batch}}-rank{{rank}}",
... latest_filename="latest-rank{{rank}}",
... save_interval="1ep",
... weights_only=False,
... )
... ])
Args:
folder (str, optional): Format string for the save_folder where checkpoints will be saved.
Default: ``'{{run_name}}/checkpoints'``.
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_TABLE, prefix=' ')}
.. note::
When training with multiple devices (i.e. GPUs), ensure that ``'{{rank}}'`` appears in the format.
Otherwise, multiple processes may attempt to write to the same file.
filename (str, optional): A format string describing how to name checkpoints.
Default: ``'ep{{epoch}}-ba{{batch}}-rank{{rank}}.pt'``.
Checkpoints will be saved approximately to ``{{folder}}/{{filename.format(...)}}``.
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, prefix=' ')}
.. note::
* By default, only the rank zero process will save a checkpoint file.
* When using DeepSpeed, each rank will save a checkpoint file in tarball format. DeepSpeed
requires tarball format, as it saves model and optimizer states in separate files.
Ensure that ``'{{rank}}'`` appears within the ``filename``. Otherwise, multiple ranks
may attempt to write to the same file(s), leading to corrupted checkpoints. If no tarball file
extension is specified, ``'.tar'`` will be used.
* To use compression (regardless of whether DeepSpeed is enabled), set the file extension
to ``'.tar.gz'``, ``'.tgz'``, ``'.tar.bzip'``, or ``'.tar.lzma'`` (depending on the desired
compression algorithm).
.. warning::
Using compression will block the training loop while checkpoints are being compressed. As such, we
recommend saving checkpoints without compression.
Consider the following scenario where:
* The :attr:`~.State.run_name` is ``'awesome-training-run'``
* The default ``folder='{{run_name}}/checkpoints'`` is used.
* The default ``name='ep{{epoch}}-ba{{batch}}-rank{{rank}}'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
When DeepSpeed is not being used, the rank zero process will save the checkpoint to
``"awesome-training-run/checkpoints/ep1-ba42-rank0"``.
When DeepSpeed is being used, each rank (process) will save checkpoints to::
awesome-training-run/checkpoints/ep1-ba42-rank0.tar
awesome-training-run/checkpoints/ep1-ba42-rank1.tar
awesome-training-run/checkpoints/ep1-ba42-rank2.tar
...
remote_file_name (str, optional): Format string for the checkpoint's remote file name.
Default: ``"{{run_name}}/checkpoints/ep{{epoch}}-ba{{batch}}-rank{{rank}}"``.
After the checkpoint is saved, it will be periodically uploaded.
The remote file name will be determined by this format string.
.. seealso:: :doc:`Uploading Files</trainer/file_uploading>` for notes for file uploading.
The same format variables for ``filename`` are available.
Leading slashes (``'/'``) will be stripped.
To disable uploading checkpoints, set this parameter to ``None``.
latest_filename (str, optional): A format string for a symlink which points to the last saved checkpoint.
Default: ``'latest-rank{{rank}}.pt'``.
Symlinks will be created approximately at ``{{folder}}/{{latest_filename.format(...)}}``.
The same format variables as for ``name`` are available.
To disable symlinks, set this parameter to ``None``.
Consider the following scenario, where:
* The :attr:`~.State.run_name` is 'awesome-training-run'
* The default ``folder='{{run_name}}/checkpoints'`` is used.
* The default ``name='ep{{epoch}}-ba{{batch}}-rank{{rank}}'`` is used.
* The default ``latest_filename='latest-rank{{rank}}'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
When DeepSpeed is not being used, the rank zero process will save the checkpoint to
``'awesome-training-run/checkpoints/ep1-ba42-rank0'``,
and a symlink will be created at
``'awesome-training-run/checkpoints/latest-rank0' -> 'awesome-training-run/checkpoints/ep1-ba42-rank0'``
When DeepSpeed is being used, each rank (process) will save checkpoints to::
awesome-training-run/checkpoints/ep1-ba42-rank0.tar
awesome-training-run/checkpoints/ep1-ba42-rank1.tar
awesome-training-run/checkpoints/ep1-ba42-rank2.tar
...
Corresponding symlinks will be created at::
awesome-training-run/checkpoints/latest-rank0.tar -> awesome-training-run/checkpoints/ep1-ba42-rank0.tar
awesome-training-run/checkpoints/latest-rank1.tar -> awesome-training-run/checkpoints/ep1-ba42-rank1.tar
awesome-training-run/checkpoints/latest-rank2.tar -> awesome-training-run/checkpoints/ep1-ba42-rank2.tar
...
latest_remote_file_name (str, optional): Format string for the checkpoint's latest symlink remote file name.
Default: ``'{{run_name}}/checkpoints/latest-rank{{rank}}"``.
Whenever a new checkpoint is saved, a symlink is created or updated to point to the latest checkpoint's ``remote_file_name``.
The remote file name will be determined by this format string. This parameter has no effect if ``latest_filename`` or ``remote_file_name`` is ``None``.
.. seealso:: :doc:`Uploading Files</trainer/file_uploading>` for notes for file uploading.
The same format variables for ``filename`` are available.
Leading slashes (``'/'``) will be stripped.
To disable symlinks in logger, set this parameter to ``None``.
overwrite (bool, optional): Whether existing checkpoints should be overridden.
If ``False`` (the default), then the ``folder`` must not exist or must not contain checkpoints which may conflict
with the current run. Default: ``False``.
save_interval (Time | str | int | (State, Event) -> bool): A :class:`.Time`, time-string, integer (in epochs),
or a function that takes (state, event) and returns a boolean whether a checkpoint should be saved.
If an integer, checkpoints will be saved every n epochs.
If :class:`.Time` or a time-string, checkpoints will be saved according to this interval.
.. seealso:: :func:`.checkpoint_periodically`
If a function, then this function should take two arguments (:class:`.State`, :class:`.Event`).
The first argument will be the current state of the trainer, and the second argument will be
be :attr:`.Event.BATCH_CHECKPOINT` or :attr:`.Event.EPOCH_CHECKPOINT` (depending on the current training
progress). It should return ``True`` if a checkpoint should be saved given the current state and
event.
weights_only (bool): If ``True``, save only the model weights instead of the entire training state.
This parameter must be ``False`` when using DeepSpeed. Default: ``False``.
num_checkpoints_to_keep (int, optional): The number of checkpoints to keep locally. The oldest checkpoints
are removed first. Set to ``-1`` to keep all checkpoints locally. Default: ``-1``.
Checkpoints will be removed after they have been uploaded. For example, when this callback
is used in conjunction with the :class:`.RemoteUploaderDownloader`, set this
parameter to ``0`` to immediately delete checkpoints from the local disk after they have been uploaded to
the object store.
This parameter only controls how many checkpoints are kept locally; checkpoints are not deleted from
remote file systems.
Attributes:
saved_checkpoints (List[Tuple[Timestamp, List[pathlib.Path]]]): The checkpoint timestamps and filepaths.
This list contains tuples of the save timestamp and the checkpoint filepaths.
This list will have at most ``num_checkpoints_to_keep`` entries. The latest checkpoint
will be at the end.
.. note::
When using DeepSpeed, the index of a filepath in each list corresponds to the global rank of
the process that wrote that file. Each filepath is valid only on the process's (rank's) node.
Otherwise, when not using DeepSpeed, each sub-list will contain only one filepath since only rank zero
saves checkpoints.
"""
def __init__(
self,
folder: Union[str, pathlib.Path] = '{run_name}/checkpoints',
filename: Union[str, pathlib.Path] = 'ep{epoch}-ba{batch}-rank{rank}.pt',
remote_file_name: Optional[Union[str,
pathlib.Path]] = '{run_name}/checkpoints/ep{epoch}-ba{batch}-rank{rank}.pt',
latest_filename: Optional[Union[str, pathlib.Path]] = 'latest-rank{rank}.pt',
latest_remote_file_name: Optional[Union[str, pathlib.Path]] = '{run_name}/checkpoints/latest-rank{rank}.pt',
save_interval: Union[Time, str, int, Callable[[State, Event], bool]] = '1ep',
*,
overwrite: bool = False,
num_checkpoints_to_keep: int = -1,
weights_only: bool = False,
):
folder = str(folder)
filename = str(filename)
remote_file_name = str(remote_file_name) if remote_file_name is not None else None
latest_filename = str(latest_filename) if latest_filename is not None else None
latest_remote_file_name = str(latest_remote_file_name) if latest_remote_file_name is not None else None
if not callable(save_interval):
save_interval = checkpoint_periodically(save_interval)
self.save_interval = save_interval
self.last_checkpoint_batch: Optional[Time] = None
self.folder = folder
self.filename = PartialFilePath(filename.lstrip('/'), folder)
self.latest_filename = PartialFilePath(latest_filename.lstrip('/'), folder) if latest_filename else None
self.remote_file_name = PartialFilePath(remote_file_name) if remote_file_name else None
self.latest_remote_file_name = PartialFilePath(latest_remote_file_name) if latest_remote_file_name else None
self.overwrite = overwrite
self.saved_checkpoints: List[str] = []
self.num_checkpoints_to_keep = num_checkpoints_to_keep
self.weights_only = weights_only
def init(self, state: State, logger: Logger) -> None:
folder = format_name_with_dist(self.folder, state.run_name)
os.makedirs(folder, exist_ok=True)
def fit_start(self, state: State, logger: Logger) -> None:
if not self.overwrite:
# checks that save_folder contains no files with a timestamp after the current timestamp,
# which has potential for future conflicts.
folder = format_name_with_dist(self.folder, state.run_name)
ensure_folder_has_no_conflicting_files(folder, self.filename.filename, state.timestamp)
dist.barrier() # holds all ranks until folder check is done
if is_model_deepspeed(state.model) and self.weights_only:
raise NotImplementedError('weights_only=True is not supported when using DeepSpeed.')
def batch_checkpoint(self, state: State, logger: Logger):
if self.save_interval(state, Event.BATCH_CHECKPOINT) and self.last_checkpoint_batch != state.timestamp.batch:
self._save_checkpoint(
state,
logger,
)
def epoch_checkpoint(self, state: State, logger: Logger):
if self.save_interval(state, Event.EPOCH_CHECKPOINT) and self.last_checkpoint_batch != state.timestamp.batch:
self._save_checkpoint(
state,
logger,
)
def get_state_dict(self, state):
return {
'state': state.state_dict(),
'rng': reproducibility.get_rng_state(),
}
def _save_checkpoint(self, state: State, logger: Logger):
self.last_checkpoint_batch = state.timestamp.batch
is_deepspeed = is_model_deepspeed(state.model)
if is_deepspeed and '{rank}' not in self.filename.filename:
raise ValueError(f'Save filename {self.filename.filename} must have {{rank}} for deepspeed.')
# save the checkpoint to the filename
filename = self.filename.format(state, is_deepspeed)
saved_path = checkpoint.save_checkpoint(
state=state,
filename=filename,
weights_only=self.weights_only,
)
if not saved_path: # not all ranks save
return
if self.latest_filename is not None:
symlink = self.latest_filename.format(state, is_deepspeed)
os.makedirs(os.path.dirname(symlink), exist_ok=True)
try:
os.remove(symlink)
except FileNotFoundError:
pass
os.symlink(os.path.relpath(filename, os.path.dirname(symlink)), symlink)
# if remote file name provided, upload the checkpoint
if self.remote_file_name is not None:
remote_file_name = self.remote_file_name.format(
state,
is_deepspeed,
).lstrip('/')
logger.upload_file(remote_file_name=remote_file_name, file_path=filename, overwrite=self.overwrite)
if self.latest_remote_file_name is not None:
symlink_name = self.latest_remote_file_name.format(
state,
is_deepspeed,
).lstrip('/') + '.symlink'
# create and upload a symlink file
with tempfile.TemporaryDirectory() as tmpdir:
symlink_filename = os.path.join(tmpdir, 'latest.symlink')
create_symlink_file(remote_file_name, symlink_filename)
logger.upload_file(
remote_file_name=symlink_name,
file_path=symlink_filename,
overwrite=True,
)
self.saved_checkpoints.append(filename)
if self.num_checkpoints_to_keep >= 0:
self._rotate_checkpoints()
def _rotate_checkpoints(self):
while len(self.saved_checkpoints) > self.num_checkpoints_to_keep:
checkpoint = self.saved_checkpoints.pop(0)
os.remove(checkpoint)
| composer-dev | composer/callbacks/checkpoint_saver.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Callback to export model for inference."""
from __future__ import annotations
import logging
from copy import deepcopy
from typing import Any, Optional, Sequence, Union
import torch.nn as nn
from composer.core import Callback, State
from composer.loggers import Logger
from composer.utils import ExportFormat, ObjectStore, Transform, export_with_logger
log = logging.getLogger(__name__)
__all__ = ['ExportForInferenceCallback']
class ExportForInferenceCallback(Callback):
"""Callback to export model for inference.
Example:
.. doctest::
>>> from composer import Trainer
>>> from composer.callbacks import ExportForInferenceCallback
>>> # constructing trainer object with this callback
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... optimizers=optimizer,
... max_duration="1ep",
... callbacks=[ExportForInferenceCallback(save_format='torchscript',save_path='/tmp/model.pth')],
... )
Args:
save_format (Union[str, ExportFormat]): Format to export to. Either ``"torchscript"`` or ``"onnx"``.
save_path (str): The path for storing the exported model. It can be a path to a file on the local disk,
a URL, or if ``save_object_store`` is set, the object name
in a cloud bucket. For example, ``my_run/exported_model``.
save_object_store (ObjectStore, optional): If the ``save_path`` is in an object name in a cloud bucket
(i.e. AWS S3 or Google Cloud Storage), an instance of
:class:`~.ObjectStore` which will be used
to store the exported model. If this is set to ``None``, will save to ``save_path`` using the logger.
(default: ``None``)
sample_input (Any, optional): Example model inputs used for tracing. This is needed for "onnx" export
transforms (Sequence[Transform], optional): transformations (usually optimizations) that should
be applied to the model. Each Transform should be a callable that takes a model and returns a modified model.
"""
def __init__(
self,
save_format: Union[str, ExportFormat],
save_path: str,
save_object_store: Optional[ObjectStore] = None,
sample_input: Optional[Any] = None,
transforms: Optional[Sequence[Transform]] = None,
):
self.save_format = save_format
self.save_path = save_path
self.save_object_store = save_object_store
self.sample_input = sample_input
self.transforms = transforms
def after_dataloader(self, state: State, logger: Logger) -> None:
del logger
if self.sample_input is None and self.save_format == 'onnx':
self.sample_input = deepcopy(state.batch)
def fit_end(self, state: State, logger: Logger):
self.export_model(state, logger)
def export_model(self, state: State, logger: Logger):
export_model = state.model.module if state.is_model_ddp else state.model
if not isinstance(export_model, nn.Module):
raise ValueError(f'Exporting Model requires type torch.nn.Module, got {type(export_model)}')
export_with_logger(model=export_model,
save_format=self.save_format,
save_path=self.save_path,
logger=logger,
save_object_store=self.save_object_store,
sample_input=(self.sample_input, {}),
transforms=self.transforms)
| composer-dev | composer/callbacks/export_for_inference.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Monitor gradients during training."""
import torch
from composer.core import Callback, State
from composer.loggers import Logger
from composer.utils import dist
__all__ = ['OptimizerMonitor']
class OptimizerMonitor(Callback):
"""Computes and logs the L2 norm of gradients as well as any optimizer-specific metrics implemented in the optimizer's `report_per_parameter_metrics` method.
L2 norms are calculated after the reduction of gradients across GPUs. This function iterates over the parameters of
the model and may cause a reduction in throughput while training large models. In order to ensure the
correctness of the norm, this function should be called after gradient unscaling in cases where gradients are scaled.
Example:
.. doctest::
>>> from composer import Trainer
>>> from composer.callbacks import OptimizerMonitor
>>> # constructing trainer object with this callback
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... optimizers=optimizer,
... max_duration="1ep",
... callbacks=[OptimizerMonitor()],
... )
The metrics are logged by the :class:`.Logger` to the following keys as described below. `grad_l2_norm` and `layer_grad_l2_norm` are
logged in addition to metrics logged by the optimizer's `report_per_parameter_metrics` method. For convenience we have listed
the metrics logged by DecoupledAdamW below.
+-----------------------------------------------+-----------------------------------------------------+
| Key | Logged data |
+===============================================+=====================================================+
| | L2 norm of the gradients of all parameters in |
| ``l2_norm/grad/global`` | the model on the :attr:`.Event.AFTER_TRAIN_BATCH` |
| | event. |
+-----------------------------------------------+-----------------------------------------------------+
| | Layer-wise L2 norms |
| ``l2_norm/grad/LAYER_NAME`` | |
| | |
+-----------------------------------------------+-----------------------------------------------------+
| | Layer-wise L2 norms of Adam first moment after |
| ``l2_norm/moment/LAYER_NAME`` | calling optimizer step. |
| | |
+-----------------------------------------------+-----------------------------------------------------+
| | Layer-wise ratio of the gradient norm to the |
| ``l2_norm_ratio/moment_grad/LAYER_NAME`` | moment norm after calling optimizer step. |
| | |
+-----------------------------------------------+-----------------------------------------------------+
| | Layer-wise cosine angle between gradient and moment |
| ``cosine/moment_grad/LAYER_NAME`` | after calling optimizer step. |
| | |
+-----------------------------------------------+-----------------------------------------------------+
| | Layer-wise L2 norms of parameter weights |
| ``l2_norm/param/LAYER_NAME`` | |
| | |
+-----------------------------------------------+-----------------------------------------------------+
| | Layer-wise L2 norms of the square root |
| ``l2_norm/second_moment_sqrt/LAYER_NAME`` | of the Adam second moment is. |
| | |
+-----------------------------------------------+-----------------------------------------------------+
| | Layer-wise L2 norms of the step |
| ``l2_norm/update/LAYER_NAME`` | |
| | |
+-----------------------------------------------+-----------------------------------------------------+
| | Layer-wise cosine between the gradient and the step |
| ``cosine/update_grad/LAYER_NAME`` | |
| | |
+-----------------------------------------------+-----------------------------------------------------+
| | Layer-wise ratio between step size and parameter |
| ``l2_norm_ratio/update_param/LAYER_NAME`` | norm |
| | |
+-----------------------------------------------+-----------------------------------------------------+
"""
def __init__(self, log_optimizer_metrics: bool = True):
self.log_optimizer_metrics = log_optimizer_metrics
def batch_end(self, state: State, logger: Logger):
norm = 0.0
optimizer_metrics = {}
for name, p in state.model.named_parameters():
if p.grad is not None and p.requires_grad:
metric_reporter = getattr(state.optimizers[0], 'report_per_parameter_metrics', None)
if callable(metric_reporter) and self.log_optimizer_metrics:
optimizer_metrics = metric_reporter(p, name, optimizer_metrics)
# Always log grad norm as a default metric if it's not specified
if f'l2_norm/grad/{name}' not in optimizer_metrics:
param_grad_norm = torch.linalg.vector_norm(p.grad)
optimizer_metrics[f'l2_norm/grad/{name}'] = param_grad_norm
if state.fsdp_enabled and dist.get_world_size() > 0 and self.log_optimizer_metrics:
# If FSDP is enabled, the optimizer state lives on different ranks and must be reduced
# and combined before we can compute metrics.
# Each metric has a different way of being reduced, so the optimizer is responsible for implementing
# the reduction process.
# It occurs first via a pre-reduce, where the metric on each rank is modified and prepared
# then an all-reduce where the modified metric on each rank is combined into the correct metric across all ranks.
#
# For example, L2 norms are squared on each rank before we apply all_reduce(SUM) and take the sqrt on each rank
pre_reduce_metrics = getattr(state.optimizers[0], 'pre_reduce_metrics', None)
if callable(pre_reduce_metrics) and self.log_optimizer_metrics:
optimizer_metrics = pre_reduce_metrics(optimizer_metrics)
dist_reduce_metrics = getattr(state.optimizers[0], 'dist_reduce_metrics', None)
if callable(dist_reduce_metrics) and self.log_optimizer_metrics:
optimizer_metrics = dist_reduce_metrics(optimizer_metrics)
for metric in optimizer_metrics:
if metric.startswith('l2_norm/grad'):
norm += optimizer_metrics[metric]**2
optimizer_metrics['l2_norm/grad/global'] = norm**0.5
for metric in optimizer_metrics:
if isinstance(optimizer_metrics[metric], torch.Tensor):
optimizer_metrics[metric] = optimizer_metrics[metric].item()
logger.log_metrics(optimizer_metrics)
| composer-dev | composer/callbacks/optimizer_monitor.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Check GPU Health during training."""
import logging
import os
from collections import deque
from datetime import datetime
from typing import List, Optional, Tuple
import numpy as np
import torch
from composer.core import Callback, State
from composer.core.time import Timestamp
from composer.loggers import Logger
from composer.utils import MissingConditionalImportError, dist
log = logging.getLogger(__name__)
__all__ = ['HealthChecker']
class HealthChecker(Callback):
"""Checks for GPU health.
This callback checks for GPU health by tracking and alerting for abnormal
GPU utilizations.
For example, if the average utilization during the observation window is,
[30, 30, 45], then the range (45-30=15) would exceed a threshold of 10%.
Args:
threshold (float, optional): Threshold of GPU utilization range to
trigger an alert. Defaults to 10.
sample_freq (int, optional): Sample frequency in seconds. Default: 5.
window_size (int, optional): Window size in seconds. HealthChecker will
check for abnormalities at this frequency. Default: 120.
wait (int, optional): Seconds to wait for starting to sample. Default: 120.
slack_webhook_url (str, optional): Slack URL to send alerts. Can also
be set with the SLACK_WEBHOOK_URL environment variable. Default: None
test_mode (bool, optional): If True, will send a test alert at the first check.
Default: False
"""
def __init__(
self,
threshold: float = 10,
sample_freq: int = 5,
window_size: int = 120,
wait: int = 120,
slack_webhook_url: Optional[str] = None,
test_mode: bool = False,
) -> None:
self.sample_freq = sample_freq
self.window_size = window_size
self.wait = wait
self.slack_webhook_url = slack_webhook_url
self.test_mode = test_mode
if not self.slack_webhook_url:
self.slack_webhook_url = os.environ.get('SLACK_WEBHOOK_URL', None)
if self.slack_webhook_url:
# fail fast if missing import
try:
import slack_sdk
del slack_sdk
except ImportError as e:
raise MissingConditionalImportError('health_checker', 'slack_sdk', None) from e
self.last_sample = 0
self.last_check = 0
self.metrics = []
if self._is_available():
self.metrics.append(GPUUtilization(threshold))
def init(self, state: State, logger: Logger) -> None:
pass
def after_train_batch(self, state: State, logger: Logger):
if not self.metrics:
return
if self._sample(state.timestamp):
for metric in self.metrics:
metric.sample()
if self._check(state.timestamp):
for metric in self.metrics:
message, alert = metric.check()
if self.test_mode and message and dist.get_global_rank() == 0:
alert = True
message = '[**THIS IS A TEST**]' + message
if alert and not metric.alerted:
self._alert(message, state)
metric.alerted = True
metric.clear()
def _sample(self, timestamp: Timestamp) -> bool:
now = timestamp.total_wct.seconds
if now < self.wait:
return False
if now - self.last_sample >= self.sample_freq:
self.last_sample = now
return True
return False
def _check(self, timestamp: Timestamp) -> bool:
now = timestamp.total_wct.seconds
if now - self.last_check >= self.window_size:
self.last_check = now
return True
return False
def _alert(self, message: str, state: State) -> None:
prefix = '[{now}][{run_name}][node_rank={node_rank}]'.format(
now=datetime.now(),
run_name=state.run_name,
node_rank=dist.get_node_rank(),
)
node_name = os.environ.get('NODENAME', None)
if node_name is not None:
prefix += f'[node={node_name}]'
message = prefix + ' : ' + message
logging.warning(message)
if self.slack_webhook_url:
from slack_sdk.webhook import WebhookClient
client = WebhookClient(url=self.slack_webhook_url)
client.send(text=message)
@staticmethod
def _is_available() -> bool:
if not torch.cuda.is_available():
return False
try:
import pynvml
pynvml.nvmlInit() # type: ignore
return True
except ImportError:
raise MissingConditionalImportError('health_checker', 'pynvml', None)
except pynvml.NVMLError_LibraryNotFound: # type: ignore
logging.warning('NVML not found, disabling GPU health checking')
except Exception as e:
logging.warning(f'Error initializing NVML: {e}')
return False
class GPUUtilization:
"""GPU Utilization Metric."""
def __init__(self, threshold=10) -> None:
self.samples = deque()
self.threshold = threshold
self.alerted = False
def sample(self) -> None:
if dist.get_local_rank() == 0:
sample = self._sample()
if sample is not None:
self.samples.append(sample)
def _sample(self) -> Optional[List]:
try:
import pynvml
except ImportError:
raise MissingConditionalImportError('health_checker', 'pynvml', None)
try:
samples = []
device_count = pynvml.nvmlDeviceGetCount()
for i in range(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
samples.append(pynvml.nvmlDeviceGetUtilizationRates(handle).gpu)
except pynvml.NVMLError:
return None
return samples
def check(self) -> Tuple[Optional[str], bool]:
if dist.get_local_rank() == 0:
average_sample = np.nanmean(list(self.samples), axis=0)
if np.nanmax(average_sample) - np.nanmin(average_sample) > self.threshold:
message = f'Abnormal GPU utilizations: {average_sample}'
return message, True
else:
message = f':+1: Normal GPU utilizations: {average_sample}'
return message, False
return None, False
def clear(self) -> None:
self.samples.clear()
| composer-dev | composer/callbacks/health_checker.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Early stopping callback."""
from __future__ import annotations
import logging
from typing import Any, Callable, Optional, Union
import torch
from composer.core import Callback, State, Time, TimeUnit
from composer.loggers import Logger
log = logging.getLogger(__name__)
__all__ = ['EarlyStopper']
class EarlyStopper(Callback):
"""Track a metric and halt training if it does not improve within a given interval.
Example:
.. doctest::
>>> from composer import Evaluator, Trainer
>>> from composer.callbacks.early_stopper import EarlyStopper
>>> # constructing trainer object with this callback
>>> early_stopper = EarlyStopper('MulticlassAccuracy', 'my_evaluator', patience=1)
>>> evaluator = Evaluator(
... dataloader = eval_dataloader,
... label = 'my_evaluator',
... metric_names = ['MulticlassAccuracy']
... )
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=evaluator,
... optimizers=optimizer,
... max_duration="1ep",
... callbacks=[early_stopper],
... )
Args:
monitor (str): The name of the metric to monitor.
dataloader_label (str): The label of the dataloader or evaluator associated with the tracked metric.
If ``monitor`` is in an :class:`.Evaluator`, the ``dataloader_label`` field should be set to the label of the
:class:`.Evaluator`.
If monitor is a training metric or an ordinary evaluation metric not in an :class:`.Evaluator`,
the ``dataloader_label`` should be set to the dataloader label, which defaults to ``'train'`` or
``'eval'``, respectively.
comp (str | (Any, Any) -> Any, optional): A comparison operator to measure change of the monitored metric.
The comparison operator will be called ``comp(current_value, prev_best)``. For metrics where the optimal value is low
(error, loss, perplexity), use a less than operator, and for metrics like accuracy where the optimal value
is higher, use a greater than operator. Defaults to :func:`torch.less` if loss, error, or perplexity are substrings
of the monitored metric, otherwise defaults to :func:`torch.greater`.
min_delta (float, optional): An optional float that requires a new value to exceed the best value by at least that amount.
Default: ``0.0``.
patience (Time | int | str, optional): The interval of time the monitored metric can not improve without stopping
training. Default: 1 epoch. If patience is an integer, it is interpreted as the number of epochs.
"""
def __init__(
self,
monitor: str,
dataloader_label: str,
comp: Optional[Union[str, Callable[[
Any,
Any,
], Any]]] = None,
min_delta: float = 0.0,
patience: Union[int, str, Time] = 1,
):
self.monitor = monitor
self.dataloader_label = dataloader_label
self.min_delta = abs(min_delta)
if callable(comp):
self.comp_func = comp
if isinstance(comp, str):
if comp.lower() in ('greater', 'gt'):
self.comp_func = torch.greater
elif comp.lower() in ('less', 'lt'):
self.comp_func = torch.less
else:
raise ValueError(
"Unrecognized comp string. Use the strings 'gt', 'greater', 'lt' or 'less' or a callable comparison operator"
)
if comp is None:
if any(substr in monitor.lower() for substr in ['loss', 'error', 'perplexity']):
self.comp_func = torch.less
else:
self.comp_func = torch.greater
self.best = None
self.best_occurred = None
if isinstance(patience, str):
self.patience = Time.from_timestring(patience)
elif isinstance(patience, int):
self.patience = Time(patience, TimeUnit.EPOCH)
else:
self.patience = patience
if self.patience.unit not in (TimeUnit.EPOCH, TimeUnit.BATCH):
raise ValueError('If `patience` is an instance of Time, it must have units of EPOCH or BATCH.')
def _get_monitored_metric(self, state: State):
if self.dataloader_label == 'train':
if self.monitor in state.train_metrics:
return state.train_metrics[self.monitor].compute()
else:
if self.monitor in state.eval_metrics[self.dataloader_label]:
return state.eval_metrics[self.dataloader_label][self.monitor].compute()
raise ValueError(f"Couldn't find the metric {self.monitor} with the dataloader label {self.dataloader_label}."
"Check that the dataloader_label is set to 'eval', 'train' or the evaluator name.")
def _update_stopper_state(self, state: State):
metric_val = self._get_monitored_metric(state)
if not torch.is_tensor(metric_val):
metric_val = torch.tensor(metric_val)
if self.best is None:
self.best = metric_val
self.best_occurred = state.timestamp
elif self.comp_func(metric_val, self.best) and torch.abs(metric_val - self.best) > self.min_delta:
self.best = metric_val
self.best_occurred = state.timestamp
assert self.best_occurred is not None
if self.patience.unit == TimeUnit.EPOCH:
if state.timestamp.epoch - self.best_occurred.epoch > self.patience:
state.stop_training()
elif self.patience.unit == TimeUnit.BATCH:
if state.timestamp.batch - self.best_occurred.batch > self.patience:
state.stop_training()
else:
raise ValueError(f'The units of `patience` should be EPOCH or BATCH.')
def eval_end(self, state: State, logger: Logger) -> None:
if self.dataloader_label == state.dataloader_label:
# if the monitored metric is an eval metric or in an evaluator
self._update_stopper_state(state)
def epoch_end(self, state: State, logger: Logger) -> None:
if self.dataloader_label == state.dataloader_label:
# if the monitored metric is not an eval metric, the right logic is run on EPOCH_END
self._update_stopper_state(state)
def batch_end(self, state: State, logger: Logger) -> None:
if self.patience.unit == TimeUnit.BATCH and self.dataloader_label == state.dataloader_label:
self._update_stopper_state(state)
| composer-dev | composer/callbacks/early_stopper.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Monitor train and eval images."""
from typing import Any, Callable, Sequence, Tuple, Union
import torch
from composer.core import Callback, State, Time, TimeUnit
from composer.loggers import Logger
from composer.loss.utils import infer_target_type
from composer.utils import MissingConditionalImportError
__all__ = ['ImageVisualizer']
class ImageVisualizer(Callback):
"""Logs image inputs and optionally outputs.
This callback triggers at a user defined interval, and logs a sample of input (optionally also segmentation masks)
images under the ``Images/Train`` and ``Image/Eval`` keys.
Example:
.. doctest::
>>> from composer import Trainer
>>> from composer.callbacks import ImageVisualizer
>>> # constructing trainer object with this callback
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... optimizers=optimizer,
... max_duration="1ep",
... callbacks=[ImageVisualizer()],
... )
The images are logged by the :class:`.Logger` to the following key(s) as described
below.
+---------------------------------------------+---------------------------------------+
| Key | Logged data |
+=============================================+=======================================+
| ``Images/Train`` | Sampled examples of train images |
+---------------------------------------------+---------------------------------------+
| ``Images/Eval`` | Sampled examples of eval images |
+---------------------------------------------+---------------------------------------+
.. note::
This callback only works with wandb logging for now.
Args:
interval (str | Time, optional): Time string specifying how often to log train images. For example, ``interval='1ep'``
means images are logged once every epoch, while ``interval='100ba'`` means images are logged once every 100
batches. Eval images are logged once at the start of each eval. Default: ``"100ba"``.
mode (str, optional): How to log the image labels. Valid values are ``"input"`` (input only)
and "segmentation" which also saves segmentation masks. Default: ``"input"``.
num_images (int, optional): Number of images to log. Should be less than or equal to than the microbatch size.
If there are not enough images in the microbatch, all the images in the microbatch will be logged.
Default: ``8``.
channels_last (bool, optional): Whether the image channel dimension is the last dimension. Default: ``False``.
input_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the input
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 0, which corresponds to any sequence, where the first
element is the input. Default: ``0``.
target_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the target
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 1, which corresponds to any sequence, where the second
element is the target. Default: ``1``.
"""
def __init__(self,
interval: Union[int, str, Time] = '100ba',
mode: str = 'input',
num_images: int = 8,
channels_last: bool = False,
input_key: Union[str, int, Tuple[Callable, Callable], Any] = 0,
target_key: Union[str, int, Tuple[Callable, Callable], Any] = 1):
self.mode = mode
self.num_images = num_images
self.channels_last = channels_last
self.input_key = input_key
self.target_key = target_key
# TODO(Evan): Generalize as part of the logger refactor
try:
import wandb
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='wandb',
conda_package='wandb',
conda_channel='conda-forge') from e
del wandb # unused
# Check that the output mode is valid
if self.mode.lower() not in ['input', 'segmentation']:
raise ValueError(f'Invalid mode: {mode}')
# Check that the interval timestring is parsable and convert into time object
if isinstance(interval, int):
self.interval = Time(interval, TimeUnit.BATCH)
if isinstance(interval, str):
self.interval = Time.from_timestring(interval)
# Verify that the interval has supported units
if self.interval.unit not in [TimeUnit.BATCH, TimeUnit.EPOCH]:
raise ValueError(f'Invalid time unit for parameter interval: '
f'{self.interval.unit}')
self.last_train_time_value_logged = -1
def _log_inputs(self, state: State, logger: Logger, data_name: str):
inputs = state.batch_get_item(key=self.input_key)
# Verify inputs is a valid shape for conversion to an image
if _check_for_image_format(inputs):
inputs = _make_input_images(inputs, self.num_images)
logger.log_images(inputs, name=data_name, use_table=True, channels_last=self.channels_last)
def _log_segmented_inputs(self, state: State, logger: Logger, data_name: str):
inputs = state.batch_get_item(key=self.input_key)
targets = state.batch_get_item(key=self.target_key)
outputs = state.outputs
assert isinstance(outputs, torch.Tensor)
images, masks = _make_segmentation_images(inputs, targets, outputs, self.num_images, self.channels_last)
logger.log_images(images, masks=masks, name=data_name, channels_last=self.channels_last, use_table=True)
def before_forward(self, state: State, logger: Logger):
current_time_value = state.timestamp.get(self.interval.unit).value
if self.mode.lower(
) == 'input' and current_time_value % self.interval.value == 0 and current_time_value != self.last_train_time_value_logged:
self.last_train_time_value_logged = current_time_value
self._log_inputs(state, logger, 'Images/Train')
def eval_before_forward(self, state: State, logger: Logger):
if self.mode.lower() == 'input' and state.eval_timestamp.get(TimeUnit.BATCH).value == 0:
self._log_inputs(state, logger, 'Images/Eval')
def before_loss(self, state: State, logger: Logger):
current_time_value = state.timestamp.get(self.interval.unit).value
if self.mode.lower(
) == 'segmentation' and current_time_value % self.interval.value == 0 and current_time_value != self.last_train_time_value_logged:
self.last_train_time_value_logged = current_time_value
self._log_segmented_inputs(state, logger, 'Images/Train')
def eval_after_forward(self, state: State, logger: Logger):
if self.mode.lower() == 'segmentation' and state.eval_timestamp.get(TimeUnit.BATCH).value == 0:
self._log_segmented_inputs(state, logger, 'Images/Eval')
def _make_input_images(inputs: torch.Tensor, num_images: int):
if inputs.shape[0] < num_images:
num_images = inputs.shape[0]
images = inputs[0:num_images].data.cpu().numpy()
return images
def _make_segmentation_images(inputs: torch.Tensor,
targets: torch.Tensor,
outputs: Union[torch.Tensor, Sequence[torch.Tensor]],
num_images: int,
channels_last: bool = False):
if isinstance(outputs, Sequence):
outputs = torch.stack(list(outputs))
if min([inputs.shape[0], targets.shape[0], outputs.shape[0]]) < num_images:
num_images = min([inputs.shape[0], targets.shape[0], outputs.shape[0]])
images = inputs[0:num_images].data.cpu().numpy()
targets = targets[0:num_images]
outputs = outputs[0:num_images]
# Ensure the targets are in the expected format
if infer_target_type(outputs, targets) == 'one_hot':
if channels_last:
targets = targets.argmax(dim=-1).data.cpu().numpy()
else:
targets = targets.argmax(dim=1).data.cpu().numpy()
else:
targets = targets.data.cpu().numpy()
# Convert the outputs to the expected format
if channels_last:
num_classes = outputs.shape[-1]
outputs = outputs.argmax(dim=-1).cpu().numpy()
else:
num_classes = outputs.shape[1]
outputs = outputs.argmax(dim=1).cpu().numpy()
# Adjust targets such that negative values are mapped to one higher than the maximum class
targets[targets < 0] = num_classes
return images, {'prediction': outputs, 'ground_truth': targets}
def _check_for_image_format(data: torch.Tensor) -> bool:
return data.ndim in [3, 4] and data.numel() > data.shape[0]
| composer-dev | composer/callbacks/image_visualizer.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Log memory usage during training."""
import logging
import math
import warnings
from typing import Dict, Optional, Union
import torch.cuda
from composer.core import Callback, State
from composer.loggers import Logger
log = logging.getLogger(__name__)
__all__ = ['MemoryMonitor']
class MemoryMonitor(Callback):
"""Logs the memory usage of the model.
This callback calls the torch memory stats API for CUDA (see :func:`torch.cuda.memory_stats`)
on the :attr:`.Event.AFTER_TRAIN_BATCH` and reports different memory statistics.
Example:
.. doctest::
>>> from composer import Trainer
>>> from composer.callbacks import MemoryMonitor
>>> # constructing trainer object with this callback
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... optimizers=optimizer,
... max_duration="1ep",
... callbacks=[MemoryMonitor()],
... )
The memory statistics are logged by the :class:`.Logger` to the following keys as
described below.
+--------------------------+-------------------------------------------------------------+
| Key | Logged data |
+==========================+=============================================================+
| | Several memory usage statistics |
| ``memory/{statistic}`` | are logged on |
| | :attr:`.Event.AFTER_TRAIN_BATCH` event. |
+--------------------------+-------------------------------------------------------------+
The following statistics are recorded:
+----------------+-----------------------------------------------------------------------------------+
| Statistic | Description |
+================+===================================================================================+
| allocated_mem | Amount of allocated memory in gigabytes. |
+----------------+-----------------------------------------------------------------------------------+
| active_mem | Amount of active memory in gigabytes at the time of recording. |
+----------------+-----------------------------------------------------------------------------------+
| inactive_mem | Amount of inactive, non-releaseable memory in gigabytes at the time of recording. |
+----------------+-----------------------------------------------------------------------------------+
| reserved_mem | Amount of reserved memory in gigabytes at the time of recording. |
+----------------+-----------------------------------------------------------------------------------+
| alloc_retries | Number of failed cudaMalloc calls that result in a cache flush and retry. |
+----------------+-----------------------------------------------------------------------------------+
.. note::
Memory usage monitoring is only supported for GPU devices.
Args:
memory_keys (Dict[str, str], optional): A dict specifying memory statistics to log. Keys
are the names of memory statistics to log from `torch.cuda.memory_stats()`, and values
are the names they will be logged under. If not provided, the above statistics are
logged. Defaults to None.
"""
def __init__(self, memory_keys: Optional[Dict[str, str]] = None) -> None:
self.memory_keys = memory_keys
def init(self, state: State, logger: Logger) -> None:
# Not relying on `torch.cuda.is_available()` since the model could be on CPU.
model_device = next(state.model.parameters()).device
if model_device.type != 'cuda':
warnings.warn(f'The memory monitor only works on CUDA devices, but the model is on {model_device.type}.')
def after_train_batch(self, state: State, logger: Logger):
memory_report = {}
model_device = next(state.model.parameters()).device
if model_device.type != 'cuda':
return
memory_report = _get_memory_report(self.memory_keys)
logger.log_metrics({f'memory/{mem_stat}': val for (mem_stat, val) in memory_report.items()})
_MEMORY_KEYS = {
'allocated_bytes.all.current': 'allocated_mem',
'active_bytes.all.current': 'active_mem',
'inactive_split_bytes.all.current': 'inactive_mem',
'reserved_bytes.all.current': 'reserved_mem',
'num_alloc_retries': 'alloc_retries',
}
def _get_memory_report(memory_keys: Optional[Dict[str, str]] = None) -> Dict[str, Union[int, float]]:
memory_stats = torch.cuda.memory_stats()
memory_keys = memory_keys or _MEMORY_KEYS
# simplify and reformat the memory_stats
memory_report = {}
for (torch_name, name) in memory_keys.items():
if torch_name in memory_stats:
# Convert to gigabytes
if 'bytes' in torch_name:
gigabytes = memory_stats[torch_name] / 1.0e9
# Round to preserve 5 significant digits
if gigabytes != 0:
order_of_magnitude = int(math.floor(math.log10(abs(gigabytes))))
gigabytes = round(gigabytes, -order_of_magnitude + 4)
memory_report[name.replace('bytes', 'gigabytes')] = gigabytes
else:
memory_report[name] = memory_stats[torch_name]
return memory_report
| composer-dev | composer/callbacks/memory_monitor.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""C4 (Colossal Cleaned Common Crawl) dataset.
This dataset is a colossal, cleaned version of Common Crawl's web crawl corpus and it is based on the `Common Crawl
<https://commoncrawl.org>`_ dataset.
"""
import logging
from typing import Any, Dict, Optional
from torch.utils.data import DataLoader
from composer.core import DataSpec
from composer.utils import MissingConditionalImportError, dist
log = logging.getLogger(__name__)
__all__ = ['build_streaming_c4_dataloader']
def build_streaming_c4_dataloader(
global_batch_size: int,
remote: str = 's3://mosaicml-internal-dataset-c4/mds/2/',
local: str = '/tmp/mds-cache/mds-c4/',
split: str = 'train',
shuffle: bool = True,
drop_last: bool = True,
tokenizer_name: str = 'bert-base-uncased',
max_seq_len: int = 512,
group_method: str = 'truncate',
mlm: bool = False,
mlm_probability: float = 0.15,
predownload: Optional[int] = 100_000,
keep_zip: Optional[bool] = None,
download_retry: int = 2,
download_timeout: float = 60,
validate_hash: Optional[str] = None,
shuffle_seed: Optional[int] = None,
num_canonical_nodes: Optional[int] = None,
**dataloader_kwargs: Dict[str, Any],
):
"""Builds a :class:`.DataSpec` for the StreamingC4 (Colossal Cleaned Common Crawl) dataset.
Args:
global_batch_size (int): Global batch size.
remote (str): Remote directory (S3 or local filesystem) where dataset is stored.
Default: ``'s3://mosaicml-internal-dataset-c4/mds/2/'``
local (str): Local filesystem directory where dataset is cached during operation.
Default: ``'/tmp/mds-cache/mds-c4/'``
split (str): What split of the dataset to use. Either ``'train'`` or ``'val'``.
Default: ``'train'``.
shuffle (bool): whether to shuffle the dataset. Default: ``True``.
drop_last (bool): whether to drop last samples. Default: ``True``.
tokenizer_name (str): The name of the HuggingFace tokenizer to preprocess text with. Default:
``'bert-base-uncased'``.
max_seq_len (int): The max sequence length of each token sample. Default: ``512``.
group_method (str): How to group text samples into token samples. Currently only `truncate` is supported.
mlm (bool): Whether or not to use masked language modeling. Default: ``False``.
mlm_probability (float): If ``mlm==True``, the probability that tokens are masked. Default: ``0.15``.
predownload (int, optional): Target number of samples ahead to download the shards of while
iterating. Defaults to ``100_000``.
keep_zip (bool, optional): Whether to keep or delete the compressed file when
decompressing downloaded shards. If set to None, keep iff remote is local. Defaults to
``None``.
download_retry (int): Number of download re-attempts before giving up. Defaults to ``2``.
download_timeout (float): Number of seconds to wait for a shard to download before raising
an exception. Defaults to ``60``.
validate_hash (str, optional): Optional hash or checksum algorithm to use to validate
shards. Defaults to ``None``.
shuffle_seed (int, optional): Seed for shuffling, or ``None`` for random seed. Defaults to
``None``.
num_canonical_nodes (int, optional): Canonical number of nodes for shuffling with resumption.
Defaults to ``None``, which is interpreted as the number of nodes of the initial run.
**dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
try:
import transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
try:
from streaming.text import StreamingC4
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='streaming', conda_package='mosaicml-streaming') from e
dataset = StreamingC4(
tokenizer_name=tokenizer_name,
max_seq_len=max_seq_len,
group_method=group_method,
local=local,
remote=remote,
split=split,
shuffle=shuffle,
predownload=predownload,
keep_zip=keep_zip,
download_retry=download_retry,
download_timeout=download_timeout,
validate_hash=validate_hash,
shuffle_seed=shuffle_seed,
num_canonical_nodes=num_canonical_nodes,
batch_size=batch_size,
)
collate_fn = transformers.DataCollatorForLanguageModeling(
tokenizer=dataset.tokenizer,
mlm=mlm,
mlm_probability=mlm_probability,
)
dataloader = DataLoader(
dataset=dataset,
batch_size=batch_size,
drop_last=drop_last,
collate_fn=collate_fn,
**dataloader_kwargs,
)
return DataSpec(dataloader=dataloader)
| composer-dev | composer/datasets/c4.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Synthetic datasets used for testing, profiling, and debugging."""
from __future__ import annotations
from typing import Callable, Optional, Sequence, Union
import torch
import torch.utils.data
from PIL import Image
from torchvision.datasets import VisionDataset
from composer.core import MemoryFormat
from composer.utils import StringEnum
__all__ = ['SyntheticDataType', 'SyntheticDataLabelType', 'SyntheticBatchPairDataset', 'SyntheticPILDataset']
class SyntheticDataType(StringEnum):
"""Defines the distribution of the synthetic data.
Attributes:
GAUSSIAN: Standard Gaussian distribution.
SEPARABLE: Gaussian distributed, but classes will be mean-shifted for
separability.
"""
GAUSSIAN = 'gaussian'
SEPARABLE = 'separable'
class SyntheticDataLabelType(StringEnum):
"""Defines the class label type of the synthetic data.
Attributes:
CLASSIFICATION_INT: Class labels are ints.
CLASSIFICATION_ONE_HOT: Class labels are one-hot vectors.
"""
CLASSIFICATION_INT = 'classification_int'
CLASSIFICATION_ONE_HOT = 'classification_one_hot'
class SyntheticBatchPairDataset(torch.utils.data.Dataset):
"""Emulates a dataset of provided size and shape.
Args:
total_dataset_size (int): The total size of the dataset to emulate.
data_shape (List[int]): Shape of the tensor for input samples.
num_unique_samples_to_create (int): The number of unique samples to allocate memory for.
data_type (str or SyntheticDataType, optional), Type of synthetic data to create.
Default: ``SyntheticDataType.GAUSSIAN``.
label_type (str or SyntheticDataLabelType, optional), Type of synthetic data to
create. Default: ``SyntheticDataLabelType.CLASSIFICATION_INT``.
num_classes (int, optional): Number of classes to use. Required if
``SyntheticDataLabelType`` is ``CLASSIFICATION_INT``
or``CLASSIFICATION_ONE_HOT``. Default: ``None``.
label_shape (List[int], optional): Shape of the tensor for each sample label.
Default: ``None``.
device (str): Device to store the sample pool. Set to ``'cuda'`` to store samples
on the GPU and eliminate PCI-e bandwidth with the dataloader. Set to ``'cpu'``
to move data between host memory and the gpu on every batch. Default:
``'cpu'``.
memory_format (:class:`composer.core.MemoryFormat`, optional): Memory format for the sample pool.
Default: `MemoryFormat.CONTIGUOUS_FORMAT`.
transform (Callable, optional): Transform(s) to apply to data. Default: ``None``.
"""
def __init__(self,
*,
total_dataset_size: int,
data_shape: Sequence[int],
num_unique_samples_to_create: int = 100,
data_type: Union[str, SyntheticDataType] = SyntheticDataType.GAUSSIAN,
label_type: Union[str, SyntheticDataLabelType] = SyntheticDataLabelType.CLASSIFICATION_INT,
num_classes: Optional[int] = None,
label_shape: Optional[Sequence[int]] = None,
device: str = 'cpu',
memory_format: Union[str, MemoryFormat] = MemoryFormat.CONTIGUOUS_FORMAT,
transform: Optional[Callable] = None):
self.total_dataset_size = total_dataset_size
self.data_shape = data_shape
self.num_unique_samples_to_create = num_unique_samples_to_create
self.data_type = SyntheticDataType(data_type)
self.label_type = SyntheticDataLabelType(label_type)
self.num_classes = num_classes
self.label_shape = label_shape
self.device = device
self.memory_format = MemoryFormat(memory_format)
self.transform = transform
self._validate_label_inputs(label_type=self.label_type,
num_classes=self.num_classes,
label_shape=self.label_shape)
# The synthetic data
self.input_data = None
self.input_target = None
def _validate_label_inputs(self, label_type: SyntheticDataLabelType, num_classes: Optional[int],
label_shape: Optional[Sequence[int]]):
if label_type == SyntheticDataLabelType.CLASSIFICATION_INT or label_type == SyntheticDataLabelType.CLASSIFICATION_ONE_HOT:
if num_classes is None or num_classes <= 0:
raise ValueError('classification label_types require num_classes > 0')
def __len__(self) -> int:
return self.total_dataset_size
def __getitem__(self, idx: int):
idx = idx % self.num_unique_samples_to_create
if self.input_data is None:
# Generating data on the first call to __getitem__ so that data is stored on the correct gpu,
# after DeviceSingleGPU calls torch.cuda.set_device
# This does mean that the first batch will be slower
# generating samples so all values for the sample are the sample index
# e.g. all(input_data[1] == 1). Helps with debugging.
assert self.input_target is None
input_data = torch.randn(self.num_unique_samples_to_create, *self.data_shape, device=self.device)
input_data = torch.clone(input_data) # allocate actual memory
input_data = input_data.contiguous(memory_format=getattr(torch, self.memory_format.value))
if self.label_type == SyntheticDataLabelType.CLASSIFICATION_ONE_HOT:
assert self.num_classes is not None
input_target = torch.zeros((self.num_unique_samples_to_create, self.num_classes), device=self.device)
input_target[:, 0] = 1.0
elif self.label_type == SyntheticDataLabelType.CLASSIFICATION_INT:
assert self.num_classes is not None
if self.label_shape:
label_batch_shape = (self.num_unique_samples_to_create, *self.label_shape)
else:
label_batch_shape = (self.num_unique_samples_to_create,)
input_target = torch.randint(0, self.num_classes, label_batch_shape, device=self.device)
else:
raise ValueError(f'Unsupported label type {self.data_type}')
# If separable, force the positive examples to have a higher mean than the negative examples
if self.data_type == SyntheticDataType.SEPARABLE:
assert self.label_type == SyntheticDataLabelType.CLASSIFICATION_INT, \
'SyntheticDataType.SEPARABLE requires integer classes.'
assert torch.max(input_target) == 1 and torch.min(input_target) == 0, \
'SyntheticDataType.SEPARABLE only supports binary labels'
# Make positive examples have mean = 3 and negative examples have mean = -3
# so they are easier to separate with a classifier
input_data[input_target == 0] -= 3
input_data[input_target == 1] += 3
self.input_data = input_data
self.input_target = input_target
assert self.input_target is not None
if self.transform is not None:
return self.transform(self.input_data[idx]), self.input_target[idx]
else:
return self.input_data[idx], self.input_target[idx]
class SyntheticPILDataset(VisionDataset):
"""Similar to :class:`SyntheticBatchPairDataset`, but yields samples of type :class:`~PIL.Image.Image` and supports
dataset transformations.
Args:
total_dataset_size (int): The total size of the dataset to emulate.
data_shape (List[int]): Shape of the tensor for input samples.
num_unique_samples_to_create (int): The number of unique samples to allocate memory for.
data_type (str or SyntheticDataType, optional), Type of synthetic data to create.
Default: ``SyntheticDataType.GAUSSIAN``.
label_type (str or SyntheticDataLabelType, optional), Type of synthetic data to
create. Default: ``SyntheticDataLabelType.CLASSIFICATION_INT``.
num_classes (int, optional): Number of classes to use. Required if
``SyntheticDataLabelType`` is ``CLASSIFICATION_INT``
or ``CLASSIFICATION_ONE_HOT``. Default: ``None``.
label_shape (List[int], optional): Shape of the tensor for each sample label.
Default: ``None``.
transform (Callable, optional): Transform(s) to apply to data. Default: ``None``.
"""
def __init__(self,
*,
total_dataset_size: int,
data_shape: Sequence[int] = (64, 64, 3),
num_unique_samples_to_create: int = 100,
data_type: Union[str, SyntheticDataType] = SyntheticDataType.GAUSSIAN,
label_type: Union[str, SyntheticDataLabelType] = SyntheticDataLabelType.CLASSIFICATION_INT,
num_classes: Optional[int] = None,
label_shape: Optional[Sequence[int]] = None,
transform: Optional[Callable] = None):
super().__init__(root='', transform=transform)
self._dataset = SyntheticBatchPairDataset(
total_dataset_size=total_dataset_size,
data_shape=data_shape,
data_type=data_type,
num_unique_samples_to_create=num_unique_samples_to_create,
label_type=label_type,
num_classes=num_classes,
label_shape=label_shape,
)
def __len__(self) -> int:
return len(self._dataset)
def __getitem__(self, idx: int):
input_data, target = self._dataset[idx]
input_data = input_data.numpy()
# Shift and scale to be [0, 255]
input_data = (input_data - input_data.min())
input_data = (input_data * (255 / input_data.max())).astype('uint8')
sample = Image.fromarray(input_data)
if self.transform is not None:
return self.transform(sample), target
else:
return sample, target
| composer-dev | composer/datasets/synthetic.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Optional
import numpy as np
from composer.core import Dataset
from composer.utils import MissingConditionalImportError
try:
import ffcv
ffcv_installed = True
except ImportError:
ffcv_installed = False
log = logging.getLogger(__name__)
__all__ = ['write_ffcv_dataset', 'ffcv_monkey_patches']
def _require_ffcv():
if not ffcv_installed:
raise MissingConditionalImportError(extra_deps_group='ffcv', conda_package='ffcv')
def ffcv_monkey_patches():
_require_ffcv()
# ffcv's __len__ function is expensive as it always calls self.next_traversal_order which does shuffling.
# Composer calls len(dataloader) function in training loop for every batch and thus len function causes 2x slowdown.
# ffcv's __len__ is fixed in 1.0.0 branch but for another reason (https://github.com/libffcv/ffcv/issues/163).
def new_len(self):
if not hasattr(self, 'init_traversal_order'):
self.init_traversal_order = self.next_traversal_order()
if self.drop_last:
return len(self.init_traversal_order) // self.batch_size
else:
return int(np.ceil(len(self.init_traversal_order) / self.batch_size))
ffcv.loader.loader.Loader.__len__ = new_len
def write_ffcv_dataset(dataset: Optional[Dataset] = None,
write_path: str = '/tmp/dataset.ffcv',
max_resolution: Optional[int] = None,
num_workers: int = 16,
write_mode: str = 'raw',
compress_probability: float = 0.50,
jpeg_quality: float = 90,
chunk_size: int = 100):
"""Converts PyTorch compatible ``dataset`` into FFCV format at filepath ``write_path``.
Args:
dataset (Iterable[Sample]): A PyTorch dataset. Default: ``None``.
write_path (str): Write results to this file. Default: ``"/tmp/dataset.ffcv"``.
max_resolution (int): Limit resolution if provided. Default: ``None``.
num_workers (int): Numbers of workers to use. Default: ``16``.
write_mode (str): Write mode for the dataset. Default: ``'raw'``.
compress_probability (float): Probability with which image is JPEG-compressed. Default: ``0.5``.
jpeg_quality (float): Quality to use for jpeg compression. Default: ``90``.
chunk_size (int): Size of chunks processed by each worker during conversion. Default: ``100``.
"""
_require_ffcv()
if dataset is None:
raise ValueError('dataset should not be None.')
log.info(f'Writing dataset in FFCV <file>.ffcv format to {write_path}.')
writer = ffcv.writer.DatasetWriter(write_path, {
'image':
ffcv.fields.RGBImageField(write_mode=write_mode,
max_resolution=max_resolution,
compress_probability=compress_probability,
jpeg_quality=jpeg_quality),
'label':
ffcv.fields.IntField()
},
num_workers=num_workers)
writer.from_indexed_dataset(dataset, chunksize=chunk_size)
| composer-dev | composer/datasets/ffcv_utils.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""ImageNet classification streaming dataset.
The most widely used dataset for Image Classification algorithms. Please refer to the `ImageNet 2012 Classification
Dataset <http://image-net.org/>`_ for more details.
"""
import os
from typing import Any, Dict, List, Optional
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
from composer.core import DataSpec, MemoryFormat
from composer.datasets.ffcv_utils import ffcv_monkey_patches, write_ffcv_dataset
from composer.datasets.synthetic import SyntheticBatchPairDataset
from composer.datasets.utils import NormalizationFn, pil_image_collate
from composer.utils import MissingConditionalImportError, dist
__all__ = [
'build_imagenet_dataloader',
'build_streaming_imagenet1k_dataloader',
'build_synthetic_imagenet_dataloader',
'write_ffcv_imagenet',
'build_ffcv_imagenet_dataloader',
]
IMAGENET_CHANNEL_MEAN = (0.485 * 255, 0.456 * 255, 0.406 * 255)
IMAGENET_CHANNEL_STD = (0.229 * 255, 0.224 * 255, 0.225 * 255)
def build_imagenet_dataloader(
datadir: str,
global_batch_size: int,
is_train: bool = True,
drop_last: bool = True,
shuffle: bool = True,
resize_size: int = -1,
crop_size: int = 224,
**dataloader_kwargs: Dict[str, Any],
) -> DataSpec:
"""Builds an ImageNet dataloader.
Args:
datadir (str): path to location of dataset.
global_batch_size (int): Global batch size.
is_train (bool): Whether to load the training data or validation data. Default:
``True``.
drop_last (bool): whether to drop last samples. Default: ``True``.
shuffle (bool): whether to shuffle the dataset. Default: ``True``.
resize_size (int, optional): The resize size to use. Use ``-1`` to not resize. Default: ``-1``.
crop size (int): The crop size to use. Default: ``224``.
**dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
if is_train:
# include fixed-size resize before RandomResizedCrop in training only
# if requested (by specifying a size > 0)
train_transforms: List[torch.nn.Module] = []
if resize_size > 0:
train_transforms.append(transforms.Resize(resize_size))
train_transforms += [
transforms.RandomResizedCrop(crop_size, scale=(0.08, 1.0), ratio=(0.75, 4.0 / 3.0)),
transforms.RandomHorizontalFlip()
]
transformation = transforms.Compose(train_transforms)
split = 'train'
else:
val_transforms: List[torch.nn.Module] = []
if resize_size > 0:
val_transforms.append(transforms.Resize(resize_size))
val_transforms.append(transforms.CenterCrop(crop_size))
transformation = transforms.Compose(val_transforms)
split = 'val'
device_transform_fn = NormalizationFn(mean=IMAGENET_CHANNEL_MEAN, std=IMAGENET_CHANNEL_STD)
dataset = ImageFolder(os.path.join(datadir, split), transformation)
sampler = dist.get_sampler(dataset, drop_last=drop_last, shuffle=shuffle)
return DataSpec(
DataLoader(
dataset=dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=drop_last,
collate_fn=pil_image_collate,
**dataloader_kwargs,
),
device_transforms=device_transform_fn,
)
def build_synthetic_imagenet_dataloader(
global_batch_size: int,
num_unique_samples: int = 100,
device: str = 'cpu',
memory_format: MemoryFormat = MemoryFormat.CONTIGUOUS_FORMAT,
is_train: bool = True,
crop_size: int = 224,
drop_last: bool = True,
shuffle: bool = True,
**dataloader_kwargs: Dict[str, Any],
) -> DataSpec:
"""Builds a synthetic ImageNet dataloader.
Args:
global_batch_size (int): Global batch size.
num_unique_samples (int): number of unique samples in synthetic dataset. Default: ``100``.
device (str): device with which to load the dataset. Default: ``cpu``.
memory_format (:class:`composer.core.MemoryFormat`): memory format of the tensors. Default: ``CONTIGUOUS_FORMAT``.
is_train (bool): Whether to load the training data or validation data. Default:
``True``.
crop size (int): The crop size to use. Default: ``224``.
drop_last (bool): whether to drop last samples. Default: ``True``.
shuffle (bool): whether to shuffle the dataset. Default: ``True``.
**dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
total_dataset_size = 1_281_167 if is_train else 50_000
dataset = SyntheticBatchPairDataset(
total_dataset_size=total_dataset_size,
data_shape=[3, crop_size, crop_size],
num_classes=1000,
num_unique_samples_to_create=num_unique_samples,
device=device,
memory_format=memory_format,
)
sampler = dist.get_sampler(dataset, drop_last=drop_last, shuffle=shuffle)
return DataSpec(
DataLoader(
dataset=dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=drop_last,
collate_fn=pil_image_collate,
**dataloader_kwargs,
),)
def write_ffcv_imagenet(
datadir: str,
savedir: str = '/tmp/imagenet_train.ffcv',
split: str = 'train',
num_workers: int = 8,
):
"""Converts an ImageNet dataset to FFCV format.
datadir (str): Path of ImageNet dataset, in ImageFolder format.
savedir (str): Path to save the FFCV dataset. Default: ``/tmp/imagenet_train.ffcv``.
split (str): 'train' or 'val'. Default: ``train``.
num_workers (int): Number of workers to use for conversion. Default: ``8``.
"""
if dist.get_local_rank() == 0:
ds = ImageFolder(os.path.join(datadir, split))
write_ffcv_dataset(dataset=ds,
write_path=savedir,
max_resolution=500,
num_workers=num_workers,
compress_probability=0.50,
jpeg_quality=90)
# wait for rank 0 to finish conversion
dist.barrier()
def build_ffcv_imagenet_dataloader(
datadir: str,
global_batch_size: int,
is_train: bool = True,
resize_size: int = -1,
crop_size: int = 224,
cpu_only: bool = False,
drop_last: bool = True,
prefetch_factor: int = 2,
num_workers: int = 8,
):
"""Builds an FFCV ImageNet dataloader.
Args:
datadir (str): path to location of dataset.
global_batch_size (int): Global batch size.
is_train (bool): Whether to load the training data or validation data. Default:
``True``.
resize_size (int, optional): The resize size to use. Use ``-1`` to not resize. Default: ``-1``.
crop size (int): The crop size to use. Default: ``224``.
cpu_only (int): Only perform transforms on 'cpu'. Default: ``False``.
drop_last (bool): whether to drop last samples. Default: ``True``.
prefetch_factor (int): Number of batches to prefect. Default: ``2``.
num_workers (int): Number of workers. Default: ``8``.
"""
try:
import ffcv
from ffcv.fields.decoders import CenterCropRGBImageDecoder, IntDecoder, RandomResizedCropRGBImageDecoder
from ffcv.pipeline.operation import Operation
except ImportError:
raise ImportError('Composer was installed without ffcv support.'
'To use ffcv with Composer, please install ffcv.')
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
device = torch.device(f'cuda:{dist.get_local_rank()}')
label_pipeline: List[Operation] = [
IntDecoder(),
ffcv.transforms.ToTensor(),
ffcv.transforms.Squeeze(),
ffcv.transforms.ToDevice(device, non_blocking=True)
]
image_pipeline: List[Operation] = []
if is_train:
image_pipeline.extend(
[RandomResizedCropRGBImageDecoder((crop_size, crop_size)),
ffcv.transforms.RandomHorizontalFlip()])
dtype = np.float16
else:
ratio = crop_size / resize_size if resize_size > 0 else 1.0
image_pipeline.extend([CenterCropRGBImageDecoder((crop_size, crop_size), ratio=ratio)])
dtype = np.float32
# Common transforms for train and test
if cpu_only:
image_pipeline.extend([
ffcv.transforms.NormalizeImage(np.array(IMAGENET_CHANNEL_MEAN), np.array(IMAGENET_CHANNEL_STD), dtype),
ffcv.transforms.ToTensor(),
ffcv.transforms.ToTorchImage(),
])
else:
image_pipeline.extend([
ffcv.transforms.ToTensor(),
ffcv.transforms.ToDevice(device, non_blocking=True),
ffcv.transforms.ToTorchImage(),
ffcv.transforms.NormalizeImage(np.array(IMAGENET_CHANNEL_MEAN), np.array(IMAGENET_CHANNEL_STD), dtype),
])
is_distributed = dist.get_world_size() > 1
ffcv_monkey_patches()
ordering = ffcv.loader.OrderOption.RANDOM if is_train else ffcv.loader.OrderOption.SEQUENTIAL
return ffcv.Loader(
datadir,
batch_size=batch_size,
num_workers=num_workers,
order=ordering,
distributed=is_distributed,
pipelines={
'image': image_pipeline,
'label': label_pipeline
},
batches_ahead=prefetch_factor,
drop_last=drop_last,
)
def build_streaming_imagenet1k_dataloader(
global_batch_size: int,
remote: str,
*,
local: str = '/tmp/mds-cache/mds-imagenet1k',
split: str = 'train',
drop_last: bool = True,
shuffle: bool = True,
resize_size: int = -1,
crop_size: int = 224,
predownload: Optional[int] = 100_000,
keep_zip: Optional[bool] = None,
download_retry: int = 2,
download_timeout: float = 60,
validate_hash: Optional[str] = None,
shuffle_seed: Optional[int] = None,
num_canonical_nodes: Optional[int] = None,
**dataloader_kwargs: Dict[str, Any],
) -> DataSpec:
"""Builds an imagenet1k streaming dataset
Args:
global_batch_size (int): Global batch size.
remote (str): Remote directory (S3 or local filesystem) where dataset is stored.
local (str, optional): Local filesystem directory where dataset is cached during operation.
Defaults to ``'/tmp/mds-cache/mds-imagenet1k/```.
split (str): Which split of the dataset to use. Either ['train', 'val']. Default:
``'train```.
drop_last (bool, optional): whether to drop last samples. Default: ``True``.
shuffle (bool, optional): whether to shuffle dataset. Defaults to ``True``.
resize_size (int, optional): The resize size to use. Use ``-1`` to not resize. Default: ``-1``.
crop size (int): The crop size to use. Default: ``224``.
predownload (int, optional): Target number of samples ahead to download the shards of while
iterating. Defaults to ``100_000``.
keep_zip (bool, optional): Whether to keep or delete the compressed file when
decompressing downloaded shards. If set to None, keep iff remote is local. Defaults to
``None``.
download_retry (int): Number of download re-attempts before giving up. Defaults to ``2``.
download_timeout (float): Number of seconds to wait for a shard to download before raising
an exception. Defaults to ``60``.
validate_hash (str, optional): Optional hash or checksum algorithm to use to validate
shards. Defaults to ``None``.
shuffle_seed (int, optional): Seed for shuffling, or ``None`` for random seed. Defaults to
``None``.
num_canonical_nodes (int, optional): Canonical number of nodes for shuffling with resumption.
Defaults to ``None``, which is interpreted as the number of nodes of the initial run.
**dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
try:
from streaming.vision import StreamingImageNet
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='streaming', conda_package='mosaicml-streaming') from e
transform = []
if split == 'train':
# include fixed-size resize before RandomResizedCrop in training only
# if requested (by specifying a size > 0)
if resize_size > 0:
transform.append(transforms.Resize(resize_size))
# always include RandomResizedCrop and RandomHorizontalFlip
transform += [
transforms.RandomResizedCrop(crop_size, scale=(0.08, 1.0), ratio=(0.75, 4.0 / 3.0)),
transforms.RandomHorizontalFlip()
]
else:
if resize_size > 0:
transform.append(transforms.Resize(resize_size))
transform.append(transforms.CenterCrop(crop_size))
transform.append(lambda image: image.convert('RGB'))
transform = transforms.Compose(transform)
dataset = StreamingImageNet(
local=local,
remote=remote,
split=split,
shuffle=shuffle,
transform=transform,
predownload=predownload,
keep_zip=keep_zip,
download_retry=download_retry,
download_timeout=download_timeout,
validate_hash=validate_hash,
shuffle_seed=shuffle_seed,
num_canonical_nodes=num_canonical_nodes,
batch_size=batch_size,
)
dataloader = DataLoader(
dataset=dataset,
batch_size=batch_size,
collate_fn=pil_image_collate,
drop_last=drop_last,
**dataloader_kwargs,
)
device_transform_fn = NormalizationFn(mean=IMAGENET_CHANNEL_MEAN, std=IMAGENET_CHANNEL_STD)
return DataSpec(dataloader=dataloader, device_transforms=device_transform_fn)
| composer-dev | composer/datasets/imagenet.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# This code is based on the implementation in https://github.com/EleutherAI/lm-evaluation-harness/blob/8c048e266a22a1c85ccbdb0c209ac712e4f39989/lm_eval/base.py#L221-L330
from __future__ import annotations
import random
from typing import TYPE_CHECKING, Any, Union
import torch
import transformers
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from composer.core import DataSpec
from composer.utils import MissingConditionalImportError, dist, get_file
if TYPE_CHECKING:
import transformers
__all__ = ['InContextLearningLMTaskDataset', 'InContextLearningMultipleChoiceTaskDataset', 'get_icl_task_dataloader']
def _make_padded_input(context_enc, continuation_enc, max_seq_len, pad_tok_id, padding_side='right'):
if len(continuation_enc) + len(context_enc) > max_seq_len:
# clip from the end
context_max_subseq_len = max_seq_len - len(continuation_enc)
if context_max_subseq_len < 0:
raise Exception(f'Dataset included continuation longer than the max seq len')
# can't support continuations which are longer than the max seq len
context_enc = context_enc[-(context_max_subseq_len):]
# continuation span is the _inclusive_ range of indices corresponding to the continuation
continuation_span = torch.tensor(range(len(context_enc), len(context_enc) + len(continuation_enc)))
inp = torch.tensor(
(context_enc + continuation_enc),
dtype=torch.long,
)
(inp_len,) = inp.shape
# pad length from seq to padding_length
if padding_side == 'right':
inp = torch.cat(
[
inp, # [seq]
torch.LongTensor((max_seq_len - inp_len) * [pad_tok_id]),
],
dim=0,
)
elif padding_side == 'left':
inp = torch.cat(
[
torch.LongTensor((max_seq_len - inp_len) * [pad_tok_id]),
inp, # [seq]
],
dim=0,
)
else:
raise ValueError(f"Unknown padding_side {padding_side}. padding_side must be either 'left' or 'right'")
return inp, continuation_span
def _get_fewshot_sample_idxs(dataset_size, num_fewshot, sample_idx):
# samples without replacement. if num_fewshot exceeds the number of unique samples,
# then we will have fewer than num_fewshot examples in context
num_fewshot = min(dataset_size - 1, num_fewshot)
fewshot_idxs = set(random.sample(range(0, dataset_size), num_fewshot))
if sample_idx in fewshot_idxs:
fewshot_idxs.remove(sample_idx)
if len(fewshot_idxs) >= dataset_size - 1:
return fewshot_idxs
replacement_sample = random.choice(range(0, dataset_size))
while replacement_sample in fewshot_idxs or replacement_sample == sample_idx:
replacement_sample = random.choice(range(0, dataset_size))
fewshot_idxs.add(replacement_sample)
return fewshot_idxs
class InContextLearningQATaskDataset(Dataset):
"""A dataset that construct batches for in-context learning question answering evaluation
The input format is expected to be a jsonl file with the following fields:
- context: the question
- answer: the preferred answer to the question
- aliases: a list of aliases for the answer
Args:
dataset_uri (str): Either a local path, or a remote path beginning with ``s3://``, or another backend
supported by :meth:`composer.utils.maybe_create_object_store_from_uri`. Dataset must consist of rows of JSON data points with "context",
"answer", and "aliases". See tests/datasets/local_data/triviaqa_small.jsonl.
tokenizer (Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast]): The tokenizer used to map between strings and token ids
batch_size (int): Size of a batch used for eval
max_seq_len (int): The maximum sequence length supported by the model
pad_tok_id (int): The special token reserved for padding batches
num_fewshot (int): The number of complete fewshot examples to prepend before each test example
prompt_string (str): Prompt string to put once before all fewshot examples/test examples (e.g. 'translate english to french')
example_delimiter (str): Separator that goes between individual (context, answer) pairs (e.g. '\n')
continuation_delimiter: (str): Separator that goes between context and answer in each example (e.g. '\nA: ')
destination_path (str): Temporary path to store downloaded datasets
question_prelimiter (str): String to put before each question (e.g. 'Q: ')
padding_side (str): Whether to pad on the left or right side of the sequence
"""
def __init__(self, dataset_uri: str, tokenizer: Union[transformers.PreTrainedTokenizer,
transformers.PreTrainedTokenizerFast], max_seq_len: int,
pad_tok_id: int, num_fewshot: int, prompt_string: str, example_delimiter: str,
continuation_delimiter: str, destination_path: str, question_prelimiter: str, padding_side: str):
try:
from datasets import load_dataset # pyright: ignore [reportGeneralTypeIssues]
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp',
conda_package='datasets',
conda_channel='conda-forge') from e
with dist.local_rank_zero_download_and_wait(destination_path):
if dist.get_local_rank() == 0:
get_file(dataset_uri, destination_path, overwrite=True)
dataset = load_dataset('json', data_files=destination_path, split='train', streaming=False)
self.samples = list(
dataset.map(lambda examples: {
'context': examples['context'],
'answer': examples['answer'],
'aliases': examples['aliases']
}))
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
self.pad_tok_id = pad_tok_id
self.padding_side = padding_side
self.max_answer_length = 0
self.encoded_dataset = self.prep_examples(num_fewshot, prompt_string, example_delimiter, continuation_delimiter,
question_prelimiter)
def prep_examples(self, num_fewshot: int, prompt_string: str, example_delimiter: str, continuation_delimiter: str,
question_prelimiter: str):
"""Prepares a set of language modeling tasks into tokenized format with prompt and fewshot examples.
Each task consists of a context and a continuation as well as an optional prompt and optional list of
example context/continuation pairs which precede the test context/continuation pair.
Args:
num_fewshot (int): Number of examples context/continuation pairs to prepend to the test pair
prompt_string (str): The prompt to prepend to all inputs
example_delimiter (str): The delimiter used to separate each individual context/continuation pair
continuation_delimiter (str): The delimiter used to separate each context from its continuation
question_prelimiter (str): The text to prepend to each question
Returns:
dict: Contains the context, the continuation, and the preamble (prompt + fewshot examples)
"""
max_answer_length = 0
examples = []
for sample_idx in tqdm(range(len(self.samples))):
encoded_example = {}
preamble = prompt_string
if num_fewshot > 0:
fewshot_idxs = _get_fewshot_sample_idxs(len(self.samples), num_fewshot, sample_idx)
for fewshot_idx in fewshot_idxs:
ctxt, cont = self.samples[fewshot_idx]['context'], self.samples[fewshot_idx]['answer']
ctxt = f'{question_prelimiter}{ctxt}'
if len(preamble) > 0:
ctxt = f'{example_delimiter}{ctxt}'
preamble += f'{ctxt}{continuation_delimiter}{cont}'
ctxt = self.samples[sample_idx]['context']
ctxt = f'{question_prelimiter}{ctxt}'
if len(preamble) > 0:
ctxt = f'{example_delimiter}{ctxt}'
# rstrip the continuation delimiter, because the prompt ending in a space results in degenerate output
continuation_delimiter_stripped = continuation_delimiter.rstrip()
ctxt = f'{ctxt}{continuation_delimiter_stripped}'
# If the preamble is empty then this will be a 0-length list, unless the tokenizer adds special tokens to empty strings (e.g. OPT tokenizer)
encoded_example['preamble'] = self.tokenizer(preamble)
# If there is an EOS token added, we need to remove it so it is not in the middle of the prompt
if self.tokenizer.eos_token_id is not None and len(
encoded_example['preamble']
['input_ids']) > 0 and encoded_example['preamble']['input_ids'][-1] == self.tokenizer.eos_token_id:
encoded_example['preamble'] = encoded_example['preamble']['input_ids'][:-1]
encoded_example['context'] = self.tokenizer(ctxt, add_special_tokens=False)
encoded_example['aliases'] = self.samples[sample_idx]['aliases']
examples.append(encoded_example)
max_answer_length = max(
max_answer_length,
max(map(lambda x: len(self.tokenizer(x)['input_ids']), self.samples[sample_idx]['aliases'])))
self.max_answer_length = max_answer_length
return examples
def __getitem__(self, index):
return self.encoded_dataset[index]
def __len__(self):
return len(self.encoded_dataset)
def collate_fn(self, data):
inputs, answers = [], []
for sample in data:
preamble, context, aliases = (sample['preamble'], sample['context'], sample['aliases'])
context_enc = preamble['input_ids'] + context['input_ids']
inp, _ = _make_padded_input(context_enc, [],
self.max_seq_len - self.max_answer_length,
self.pad_tok_id,
padding_side=self.padding_side)
inputs.append(inp)
answers.append(aliases)
batch = {
'input_ids': torch.stack(inputs),
'mode': 'generate',
'labels': answers,
'generation_length': self.max_answer_length,
'generation_kwargs': {
'pad_token_id': self.pad_tok_id
}
}
batch['attention_mask'] = ~(batch['input_ids'] == self.pad_tok_id)
return batch
def get_num_samples_in_batch(self, batch) -> int:
return batch['input_ids'].shape[0]
class InContextLearningLMTaskDataset(Dataset):
"""A dataset that construct batches for in-context learning language modeling evaluation
Args:
dataset_uri (str): Either a local path, or a remote path beginning with ``s3://``, or another backend
supported by :meth:`composer.utils.maybe_create_object_store_from_uri`. Dataset must consist of rows of JSON data points with "context",
and "continuation". See tests/datasets/local_data/lambada_small.jsonl.
tokenizer (Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast]): The tokenizer used to transform data into batches
batch_size (int): Size of a batch used for eval
max_seq_len (int): The sequence length expected by the model
pad_tok_id (int): The special token reserved for padding the ends of batches
num_fewshot (int): The number of complete fewshot examples to prepend before each test example
prompt_string (str): Prompt string to put once before all fewshot examples/test examples (e.g. 'translate english to french')
example_delimiter (str): Separator that goes between individual (context, continuation) pairs (e.g. '\n') continuation_delimiter: (str): Separator that goes between context and continuation in each example (e.g. '->')
destination_path (str): Temporary path to store downloaded datasets
"""
def __init__(
self,
dataset_uri: str,
tokenizer: Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast],
max_seq_len: int,
pad_tok_id: int,
num_fewshot: int,
prompt_string: str,
example_delimiter: str,
continuation_delimiter: str,
destination_path: str,
):
try:
from datasets import load_dataset # pyright: ignore [reportGeneralTypeIssues]
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp',
conda_package='datasets',
conda_channel='conda-forge') from e
with dist.local_rank_zero_download_and_wait(destination_path):
if dist.get_local_rank() == 0:
get_file(dataset_uri, destination_path, overwrite=True)
dataset = load_dataset('json', data_files=destination_path, split='train', streaming=False)
self.samples = list(
dataset.map(lambda examples: {
'continuation': examples['continuation'],
'context': examples['context'],
}))
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
self.pad_tok_id = pad_tok_id
self.encoded_dataset = self.prep_examples(num_fewshot, prompt_string, example_delimiter, continuation_delimiter)
def prep_examples(self, num_fewshot: int, prompt_string: str, example_delimiter: str, continuation_delimiter: str):
"""Prepares a set of language modeling tasks into tokenized format with prompt and fewshot examples.
Each task consists of a context and a continuation as well as an optional prompt and optional list of
example context/continuation pairs which precede the test context/continuation pair.
Args:
num_fewshot (int): Number of examples context/continuation pairs to prepend to the test pair
prompt_string (str): The prompt to prepend to all inputs
example_delimiter (str): The delimiter used to separate each individual context/continuation pair
continuation_delimiter (str): The delimiter used to separate each context from its continuation
Returns:
dict: Contains the context, the continuation, and the preamble (prompt + fewshot examples)
"""
examples = []
for sample_idx in tqdm(range(len(self.samples))):
encoded_example = {}
preamble = prompt_string
if num_fewshot > 0:
fewshot_idxs = _get_fewshot_sample_idxs(len(self.samples), num_fewshot, sample_idx)
for fewshot_idx in fewshot_idxs:
ctxt, cont = self.samples[fewshot_idx]['context'], self.samples[fewshot_idx]['continuation']
if len(preamble) > 0:
ctxt = f'{example_delimiter}{ctxt}'
preamble += f'{ctxt}{continuation_delimiter}{cont}'
ctxt, cont = self.samples[sample_idx]['context'], self.samples[sample_idx]['continuation']
if len(preamble) > 0:
ctxt = f'{example_delimiter}{ctxt}'
cont = f'{continuation_delimiter}{cont}'
encoded_example['preamble'] = self.tokenizer(
preamble
) # if the preamble is empty then these will be 0-length lists, unless the tokenizer adds special tokens to empty strings (e.g. OPT tokenizer)
encoded_example['context'] = self.tokenizer(ctxt, add_special_tokens=False)
encoded_example['continuation'] = self.tokenizer(cont, add_special_tokens=False)
examples.append(encoded_example)
return examples
def __getitem__(self, index):
return self.encoded_dataset[index]
def __len__(self):
return len(self.encoded_dataset)
def collate_fn(self, data):
inputs = []
continuation_indices = []
for data_pair in data:
preamble, context, continuation = (data_pair['preamble'], data_pair['context'], data_pair['continuation'])
context_enc = preamble['input_ids'] + context['input_ids']
continuation_enc = continuation['input_ids']
inp, continuation_span = _make_padded_input(context_enc, continuation_enc, self.max_seq_len,
self.pad_tok_id)
inputs.append(inp)
continuation_indices.append(continuation_span)
batch = {
'input_ids': torch.stack(inputs),
'continuation_indices': continuation_indices,
'mode': 'icl_task',
'labels': torch.stack(inputs),
}
batch['attention_mask'] = ~(batch['input_ids'] == self.pad_tok_id)
return batch
def get_num_samples_in_batch(self, batch) -> int:
return batch['input_ids'].shape[0]
class InContextLearningMultipleChoiceTaskDataset(Dataset):
"""A dataset that construct batches for in-context learning multiple choice evaluation
If each question has N answer choices, we construct N distinct inputs per question. In order to ensure
consistency across multi-GPU, we set the batch size to be `min(N, batch_size)` so that all N
inputs per question can stored in the same batch.
Each batch then consists of batch_size // N distinct questions and has the following the structure
'input_ids': Input tensor batch x seqlen x # tokens
'continuation_indices': List of |batch| consisting of tensors indicating which indices in the sequence correspond to the question answer (aka continuation)
'mode': Indicates to the model that this is an ICL task and may rely on a custom code path to properly update metrics
'labels': Identical to the input, used by the model to calculate loss/metrics
'gold_indices': List of length |batch_size // N| indicating for each question, which of the answers is correct (via an integer [0, N-1])
'choice_groupings': Indicates which indices of the batch correspond to which questions
Args:
dataset_uri (str): Either a local path, or a remote path beginning with ``s3://``, or another backend
supported by :meth:`composer.utils.maybe_create_object_store_from_uri`. Dataset must consist of rows of JSON data points with "query",
"choices", and "gold" index. See tests/datasets/local_data/piqa_small.jsonl.
tokenizer (Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast]): The tokenizer used to transform data into batches
batch_size (int): Size of a batch used for eval
max_seq_len (int): The sequence length expected by the model
pad_tok_id (int): The special token reserved for padding the ends of batches
num_fewshot (int): The number of complete fewshot examples to prepend before each test example
prompt_string (str): Prompt string to put once before all fewshot examples/test examples (e.g. 'translate english to french')
example_delimiter (str): Separator that goes between individual (context, continuation) pairs (e.g. '\n') continuation_delimiter: (str): Separator that goes between context and continuation in each example (e.g. '->')
destination_path (str): Temporary path to store downloaded datasets
"""
def __init__(
self,
dataset_uri: str,
tokenizer: Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast],
max_seq_len: int,
pad_tok_id: int,
num_fewshot: int,
prompt_string: str,
example_delimiter: str,
continuation_delimiter: str,
destination_path: str,
):
try:
from datasets import load_dataset # pyright: ignore [reportGeneralTypeIssues]
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp',
conda_package='datasets',
conda_channel='conda-forge') from e
with dist.local_rank_zero_download_and_wait(destination_path):
if dist.get_local_rank() == 0:
get_file(dataset_uri, destination_path, overwrite=True)
dataset = load_dataset('json', data_files=destination_path, split='train', streaming=False)
self.samples = list(
dataset.map(lambda examples: {
'query': examples['query'],
'choices': examples['choices'],
'gold': examples['gold']
}))
self.num_choices = len(self.samples[0]['choices'])
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
self.pad_tok_id = pad_tok_id
self.encoded_dataset = self.prep_examples(num_fewshot, prompt_string, example_delimiter, continuation_delimiter)
def prep_examples(self, num_fewshot: int, prompt_string: str, example_delimiter: str, continuation_delimiter: str):
"""Prepares a set of multiple choice questions into tokenized format with prompt and few shot examples.
Each question consists of a query and set of answer choices, only one of which is correct. At inference time
we construct individual inference examples consisting of the query + a single choice, as well as an optional (prompt) and optional list
of example query + correct answers, which precede the test query + choice.
For multiple choice, this method provides information relaying which of the answer choices is the correct one. This
information is used for computing accuracy metrics.
Args:
num_fewshot (int): Number of examples context/continuation pairs to prepend to the test pair
prompt_string (str): The prompt to prepend to all inputs
example_delimiter (str): The delimiter used to separate each example query/answer pair
continuation_delimiter (str): The delimiter used to separate each query from its answer
Returns:
dict: Contains the query, the list of encoded potential answer choices, the preamble (prompt + fewshot examples), and
the index of the correct answer choice.
"""
examples = []
for sample_idx in tqdm(range(len(self.samples))):
preamble = prompt_string
if num_fewshot > 0:
fewshot_idxs = _get_fewshot_sample_idxs(len(self.samples), num_fewshot, sample_idx)
for fewshot_idx in fewshot_idxs:
query, choices, gold_idx = self.samples[fewshot_idx]['query'], self.samples[fewshot_idx][
'choices'], self.samples[fewshot_idx]['gold']
if len(preamble) > 0:
query = f'{example_delimiter}{query}'
preamble += f'{query}{continuation_delimiter}{choices[gold_idx]}'
encoded_example = {}
query, choices, gold_idx = self.samples[sample_idx]['query'], self.samples[sample_idx][
'choices'], self.samples[sample_idx]['gold'],
if len(preamble) > 0:
query = f'{example_delimiter}{query}'
choices = [f'{continuation_delimiter}{choice}' for choice in choices]
encoded_example['preamble'] = self.tokenizer(
preamble
) # if the preamble is empty then these will be 0-length lists, unless the tokenizer adds special tokens to empty strings (e.g. OPT tokenizer)
encoded_example['gold_idx'] = gold_idx
encoded_example['query'] = self.tokenizer(query, add_special_tokens=False)
encoded_example['choices'] = [self.tokenizer(choice, add_special_tokens=False) for choice in choices]
examples.append(encoded_example)
return examples
def __getitem__(self, index):
return self.encoded_dataset[index]
def __len__(self):
return len(self.encoded_dataset)
def collate_fn(self, data):
inputs = []
continuation_indices = []
gold_idxs = []
choice_groupings = []
for data_pair in data:
choice_start_idx = len(continuation_indices)
preamble, context, choices, gold_idx = (data_pair['preamble'], data_pair['query'], data_pair['choices'],
data_pair['gold_idx'])
for choice in choices:
context_enc = preamble['input_ids'] + context['input_ids']
continuation_enc = choice['input_ids']
inp, continuation_span = _make_padded_input(context_enc, continuation_enc, self.max_seq_len,
self.pad_tok_id)
inputs.append(inp)
continuation_indices.append(continuation_span)
gold_idxs.append(gold_idx)
choice_end_idx = len(continuation_indices)
choice_groupings.append((choice_start_idx, choice_end_idx))
# We run each distinct query + answer choice through the model separately and determine which
# answer has the lowest per-token-perplexity.
#
# If each question has N possible choices, all N must be grouped together as distinct elements of the batch
# since the batch may consist of multiple questions, the choice_groupings indicates
# which contiguous sequences of elements in the batch correspond to which question
# gold_indices indicates which of the [0, N-1] choices is the correct one for each question.
batch = {
'input_ids': torch.stack(inputs),
'continuation_indices': continuation_indices,
'mode': 'icl_task',
'labels': torch.stack(inputs),
'gold_indices': gold_idxs,
'choice_groupings': choice_groupings
}
batch['attention_mask'] = ~(batch['input_ids'] == self.pad_tok_id)
return batch
def get_num_samples_in_batch(self, batch) -> int:
return batch['input_ids'].shape[0]
def split_batch(self, batch: Any, microbatch_size: int):
if self.get_num_samples_in_batch(batch) // self.num_choices > microbatch_size:
raise Exception('Multiple choice tasks do not currently support batch splitting. Please set '
'dataloader batch size to a value less than or equal to the microbatch size. '
'Accordingly, auto microbatching does not work, so the microbatch size '
'should be manually set if using a batch size which does not fit in memory.')
return [batch]
def get_icl_task_dataloader(
icl_task_type: str,
dataset_uri: str,
tokenizer: Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast],
batch_size: int,
max_seq_len: int,
pad_tok_id: int,
num_fewshot: int,
prompt_string: str, # e.g. 'translate english to french:'
example_delimiter: str, # e.g. '\n'
continuation_delimiter: str, # e.g. ''
destination_path: str,
question_prelimiter: str = '', # e.g. 'Question: '
padding_side: str = 'left',
) -> DataSpec:
"""This constructs a dataloader capable of evaluating LLMs on in-context learning language modeling tasks, for example LAMBADA. An example usage is below:
>>> dl = get_icl_task_dataloader(
... 'language_modeling',
... dataset_uri,
... tokenizer,
... batch_size=2,
... max_seq_len=2048,
... pad_tok_id=tokenizer.pad_token_id,
... num_fewshot=10,
... prompt_string='translate english to french',
... example_delimiter='\n',
... continuation_delimiter=''
)
>>> eval_evaluator = Evaluator(
... label="lambada",
... dataloader=dl,
... metric_names=['InContextLearningLMAccuracy']
... )
>>> trainer = Trainer(
... model=model,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_evaluator,
... optimizers=optimizer,
... max_duration="1ep",
... )
Args:
dataset_uri (str): Either a local path, or a remote path beginning with ``s3://``, or another backend
supported by :meth:`composer.utils.maybe_create_object_store_from_uri`.
tokenizer (Union[transformers.PreTrainedTokenizer, transformers.PreTrainedTokenizerFast]): The tokenizer used to transform data into batches
batch_size (int): Size of a batch used for eval
max_seq_len (int): The sequence length expected by the model
pad_tok_id (int): The special token reserved for padding the ends of batches
num_fewshot (int): The number of complete fewshot examples to pad each test example with
prompt_string (str): Prompt string to put once before all fewshot examples/test examples (e.g. 'translate english to french')
example_delimiter (str): Separator that goes between individual examples (e.g. '\n')
continuation_delimiter: (str): Separator that goes between context and continuation in each example (e.g. '->')
Returns:
DataLoader: A dataloader used for performing in-context learning evaluation on the dataset provided.
"""
if icl_task_type == 'multiple_choice':
dataset = InContextLearningMultipleChoiceTaskDataset(dataset_uri,
tokenizer,
max_seq_len,
pad_tok_id,
num_fewshot,
prompt_string,
example_delimiter,
continuation_delimiter,
destination_path=destination_path)
batch_size = max(dataset.num_choices, batch_size)
effective_batchsize = batch_size // dataset.num_choices
elif icl_task_type == 'language_modeling':
dataset = InContextLearningLMTaskDataset(dataset_uri,
tokenizer,
max_seq_len,
pad_tok_id,
num_fewshot,
prompt_string,
example_delimiter,
continuation_delimiter,
destination_path=destination_path)
effective_batchsize = batch_size
elif icl_task_type == 'question_answering':
dataset = InContextLearningQATaskDataset(dataset_uri,
tokenizer,
max_seq_len,
pad_tok_id,
num_fewshot,
prompt_string,
example_delimiter,
continuation_delimiter,
destination_path=destination_path,
question_prelimiter=question_prelimiter,
padding_side=padding_side)
effective_batchsize = batch_size
else:
raise Exception(f'Unrecognized ICL task type: {icl_task_type}')
sampler = dist.get_sampler(dataset, drop_last=False, shuffle=False)
return DataSpec(
DataLoader(
dataset,
batch_size=effective_batchsize,
sampler=sampler,
collate_fn=dataset.collate_fn,
),
device_transforms=None,
get_num_samples_in_batch=dataset.get_num_samples_in_batch,
split_batch=dataset.split_batch if isinstance(dataset, InContextLearningMultipleChoiceTaskDataset) else None,
)
| composer-dev | composer/datasets/in_context_learning_evaluation.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Natively supported datasets."""
from composer.datasets.ade20k import (ADE20k, build_ade20k_dataloader, build_streaming_ade20k_dataloader,
build_synthetic_ade20k_dataloader)
from composer.datasets.brats import PytTrain, PytVal
from composer.datasets.c4 import build_streaming_c4_dataloader
from composer.datasets.cifar import (build_cifar10_dataloader, build_ffcv_cifar10_dataloader,
build_streaming_cifar10_dataloader, build_synthetic_cifar10_dataloader)
from composer.datasets.imagenet import (build_ffcv_imagenet_dataloader, build_imagenet_dataloader,
build_streaming_imagenet1k_dataloader, build_synthetic_imagenet_dataloader)
from composer.datasets.lm_dataset import build_lm_dataloader
from composer.datasets.mnist import build_mnist_dataloader, build_synthetic_mnist_dataloader
from composer.datasets.synthetic import (SyntheticBatchPairDataset, SyntheticDataLabelType, SyntheticDataType,
SyntheticPILDataset)
__all__ = [
'ADE20k',
'PytTrain',
'PytVal',
'SyntheticBatchPairDataset',
'SyntheticDataLabelType',
'SyntheticDataType',
'SyntheticPILDataset',
'build_ade20k_dataloader',
'build_streaming_ade20k_dataloader',
'build_streaming_c4_dataloader',
'build_cifar10_dataloader',
'build_streaming_cifar10_dataloader',
'build_ffcv_cifar10_dataloader',
'build_synthetic_ade20k_dataloader',
'build_synthetic_cifar10_dataloader',
'build_ffcv_imagenet_dataloader',
'build_imagenet_dataloader',
'build_streaming_imagenet1k_dataloader',
'build_synthetic_imagenet_dataloader',
'build_mnist_dataloader',
'build_synthetic_mnist_dataloader',
'build_lm_dataloader',
]
| composer-dev | composer/datasets/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""BraTS (Brain Tumor Segmentation) dataset.
Please refer to the `Brain Tumor Segmentation (BraTS) challenge <https://www.med.upenn.edu/cbica/brats2021/>`_ for more
details about this dataset.
"""
import glob
import os
import random
import numpy as np
import torch
import torch.utils.data
import torchvision
from composer.utils import MissingConditionalImportError, dist
PATCH_SIZE = [1, 192, 160]
__all__ = ['PytTrain', 'PytVal']
def build_brats_dataloader(datadir: str,
global_batch_size: int,
oversampling: float = 0.33,
is_train: bool = True,
drop_last: bool = True,
shuffle: bool = True,
**dataloader_kwargs):
"""Builds a BRaTS dataloader
Args:
global_batch_size (int): Global batch size.
**dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
x_train, y_train, x_val, y_val = get_data_split(datadir)
dataset = PytTrain(x_train, y_train, oversampling) if is_train else PytVal(x_val, y_val)
collate_fn = None if is_train else _my_collate
sampler = dist.get_sampler(dataset, drop_last=drop_last, shuffle=shuffle)
return torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=drop_last,
collate_fn=collate_fn,
**dataloader_kwargs)
def _my_collate(batch):
"""Custom collate function to handle images with different depths."""
data = [item[0] for item in batch]
target = [item[1] for item in batch]
return [torch.Tensor(data), torch.Tensor(target)]
def _coin_flip(prob):
return random.random() < prob
def _random_augmentation(probability, augmented, original):
condition = _coin_flip(probability)
neg_condition = condition ^ True
return condition * augmented + neg_condition * original
class Crop(object):
def __call__(self, data, oversampling):
img, lbl = data['image'], data['label']
def randrange(max_range):
return 0 if max_range == 0 else random.randrange(max_range)
def get_cords(cord, idx):
return cord[idx], cord[idx] + PATCH_SIZE[idx]
def _rand_crop(image, label):
ranges = [s - p for s, p in zip(image.shape[1:], PATCH_SIZE)]
cord = [randrange(x) for x in ranges]
low_x, high_x = get_cords(cord, 0)
low_y, high_y = get_cords(cord, 1)
image = image[:, low_x:high_x, low_y:high_y]
label = label[:, low_x:high_x, low_y:high_y]
return image, label, [low_x, high_x, low_y, high_y]
def rand_foreg_cropd(image, label):
import scipy.ndimage
cl = np.random.choice(np.unique(label[label > 0]))
foreg_slices = scipy.ndimage.find_objects(scipy.ndimage.measurements.label(label == cl)[0])
foreg_slices = [x for x in foreg_slices if x is not None]
slice_volumes = [np.prod([s.stop - s.start for s in sl]) for sl in foreg_slices]
slice_idx = np.argsort(slice_volumes)[-2:]
foreg_slices = [foreg_slices[i] for i in slice_idx]
if not foreg_slices:
return _rand_crop(image, label)
foreg_slice = foreg_slices[random.randrange(len(foreg_slices))]
low_x, high_x = adjust(foreg_slice, PATCH_SIZE, label, 1)
low_y, high_y = adjust(foreg_slice, PATCH_SIZE, label, 2)
image = image[:, low_x:high_x, low_y:high_y]
label = label[:, low_x:high_x, low_y:high_y]
return image, label, [low_x, high_x, low_y, high_y]
def adjust(foreg_slice, patch_size, label, idx):
diff = patch_size[idx - 1] - (foreg_slice[idx].stop - foreg_slice[idx].start)
sign = -1 if diff < 0 else 1
diff = abs(diff)
ladj = randrange(diff)
hadj = diff - ladj
low = max(0, foreg_slice[idx].start - sign * ladj)
high = min(label.shape[idx], foreg_slice[idx].stop + sign * hadj)
diff = patch_size[idx - 1] - (high - low)
if diff > 0 and low == 0:
high += diff
elif diff > 0:
low -= diff
return low, high
if random.random() < oversampling:
img, lbl, _ = rand_foreg_cropd(img, lbl)
else:
img, lbl, _ = _rand_crop(img, lbl)
return {'image': img, 'label': lbl}
class Noise(object):
def __call__(self, data, oversampling):
img, lbl = data['image'], data['label']
std = np.random.uniform(0.0, oversampling)
noise = np.random.normal(0, scale=std, size=img.shape)
img_noised = img + noise
img = _random_augmentation(0.15, img_noised, img)
return {'image': img, 'label': lbl}
class Blur(object):
def __call__(self, data):
img, lbl = data['image'], data['label']
transf = torchvision.transforms.GaussianBlur(kernel_size=3, sigma=(0.5, 1.5))
img_blured = transf(torch.Tensor(img)).numpy()
img = _random_augmentation(0.15, img_blured, img)
return {'image': img, 'label': lbl}
class Brightness(object):
def __call__(self, data):
img, lbl = data['image'], data['label']
brightness_scale = _random_augmentation(0.15, np.random.uniform(0.7, 1.3), 1.0)
img = img * brightness_scale
return {'image': img, 'label': lbl}
class Contrast(object):
def __call__(self, data):
img, lbl = data['image'], data['label']
min_, max_ = np.min(img), np.max(img)
scale = _random_augmentation(0.15, np.random.uniform(0.65, 1.5), 1.0)
img = torch.clamp(torch.Tensor(img * scale), min_, max_).numpy()
return {'image': img, 'label': lbl}
class Flips(object):
def __call__(self, data):
img, lbl = data['image'], data['label']
axes = [1, 2]
prob = 1 / len(axes)
for axis in axes:
if random.random() < prob:
img = np.flip(img, axis=axis).copy()
lbl = np.flip(lbl, axis=axis).copy()
return {'image': img, 'label': lbl}
class Transpose(object):
def __call__(self, data):
img, lbl = data['image'], data['label']
img, lbl = img.transpose((1, 0, 2, 3)), lbl.transpose((1, 0, 2, 3))
return {'image': img, 'label': lbl}
class PytTrain(torch.utils.data.Dataset):
def __init__(self, images, labels, oversampling, transform=None):
self.images, self.labels = images, labels
self.oversampling = oversampling
self.transform = transform
self.rand_crop = Crop()
self.transpose = Transpose()
self.contrast = Contrast()
self.noise = Noise()
self.blur = Blur()
self.flips = Flips()
self.bright = Brightness()
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
data = {'image': np.load(self.images[idx]), 'label': np.load(self.labels[idx])}
data = self.rand_crop(data, self.oversampling)
data = self.flips(data)
data = self.noise(data, self.oversampling)
data = self.blur(data)
data = self.bright(data)
data = self.contrast(data)
data = self.transpose(data)
return data['image'], data['label']
class PytVal(torch.utils.data.Dataset):
def __init__(self, images, labels):
self.images, self.labels = images, labels
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
data = {'image': np.load(self.images[idx]), 'label': np.load(self.labels[idx])}
return data['image'], data['label']
def load_data(path, files_pattern):
data = sorted(glob.glob(os.path.join(path, files_pattern)))
assert len(data) > 0, f'Found no data at {path}'
return data
def get_split(data, idx):
return list(np.array(data)[idx])
def get_data_split(path: str):
try:
from sklearn.model_selection import KFold
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='unet',
conda_channel='conda-forge',
conda_package='scikit-learn') from e
kfold = KFold(n_splits=5, shuffle=True, random_state=0)
imgs = load_data(path, '*_x.npy')
lbls = load_data(path, '*_y.npy')
assert len(imgs) == len(lbls), f'Found {len(imgs)} volumes but {len(lbls)} corresponding masks'
train_imgs, train_lbls, val_imgs, val_lbls = [], [], [], []
train_idx, val_idx = list(kfold.split(imgs))[0]
train_imgs = get_split(imgs, train_idx)
train_lbls = get_split(lbls, train_idx)
val_imgs = get_split(imgs, val_idx)
val_lbls = get_split(lbls, val_idx)
return train_imgs, train_lbls, val_imgs, val_lbls
| composer-dev | composer/datasets/brats.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""ADE20K Semantic segmentation and scene parsing dataset.
Please refer to the `ADE20K dataset <https://groups.csail.mit.edu/vision/datasets/ADE20K/>`_ for more details about this
dataset.
"""
import os
from math import ceil
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
import torchvision.transforms.functional as TF
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from composer.core import DataSpec, MemoryFormat
from composer.datasets.synthetic import SyntheticBatchPairDataset
from composer.datasets.utils import NormalizationFn, pil_image_collate
from composer.utils import MissingConditionalImportError, dist
__all__ = [
'ADE20k', 'build_ade20k_dataloader', 'build_streaming_ade20k_dataloader', 'build_synthetic_ade20k_dataloader'
]
IMAGENET_CHANNEL_MEAN = (0.485 * 255, 0.456 * 255, 0.406 * 255)
IMAGENET_CHANNEL_STD = (0.229 * 255, 0.224 * 255, 0.225 * 255)
def build_ade20k_transformations(split,
base_size: int = 512,
min_resize_scale: float = 0.5,
max_resize_scale: float = 2.0,
final_size: int = 512):
"""Builds the transformations for the ADE20k dataset.
Args:
base_size (int): Initial size of the image and target before other augmentations. Default: ``512``.
min_resize_scale (float): The minimum value the samples can be rescaled. Default: ``0.5``.
max_resize_scale (float): The maximum value the samples can be rescaled. Default: ``2.0``.
final_size (int): The final size of the image and target. Default: ``512``.
Returns:
both_transforms (torch.nn.Module): Transformations to apply to a 2-tuple containing the input image and the
target semantic segmentation mask.
image_transforms (torch.nn.Module): Transformations to apply to the input image only.
target_transforms (torch.nn.Module): Transformations to apply to the target semantic segmentation mask only.
"""
if split == 'train':
both_transforms = torch.nn.Sequential(
RandomResizePair(
min_scale=min_resize_scale,
max_scale=max_resize_scale,
base_size=(base_size, base_size),
),
RandomCropPair(
crop_size=(final_size, final_size),
class_max_percent=0.75,
num_retry=10,
),
RandomHFlipPair(),
)
# Photometric distoration values come from mmsegmentation:
# https://github.com/open-mmlab/mmsegmentation/blob/aa50358c71fe9c4cccdd2abe42433bdf702e757b/mmseg/datasets/pipelines/transforms.py#L861
r_mean, g_mean, b_mean = IMAGENET_CHANNEL_MEAN
image_transforms = torch.nn.Sequential(
PhotometricDistoration(brightness=32. / 255, contrast=0.5, saturation=0.5, hue=18. / 255),
PadToSize(size=(final_size, final_size), fill=(int(r_mean), int(g_mean), int(b_mean))))
target_transforms = PadToSize(size=(final_size, final_size), fill=0)
else:
both_transforms = None
image_transforms = transforms.Resize(size=(final_size, final_size), interpolation=TF.InterpolationMode.BILINEAR)
target_transforms = transforms.Resize(size=(final_size, final_size), interpolation=TF.InterpolationMode.NEAREST)
return both_transforms, image_transforms, target_transforms
def build_ade20k_dataloader(
global_batch_size: int,
datadir: str,
*,
split: str = 'train',
drop_last: bool = True,
shuffle: bool = True,
base_size: int = 512,
min_resize_scale: float = 0.5,
max_resize_scale: float = 2.0,
final_size: int = 512,
ignore_background: bool = True,
**dataloader_kwargs,
):
"""Builds an ADE20k dataloader.
Args:
global_batch_size (int): Global batch size.
datadir (str): Path to location of dataset.
split (str): The dataset split to use either 'train', 'val', or 'test'. Default: ``'train```.
drop_last (bool): Whether to drop last samples. Default: ``True``.
shuffle (bool): Whether to shuffle the dataset. Default: ``True``.
base_size (int): Initial size of the image and target before other augmentations. Default: ``512``.
min_resize_scale (float): The minimum value the samples can be rescaled. Default: ``0.5``.
max_resize_scale (float): The maximum value the samples can be rescaled. Default: ``2.0``.
final_size (int): The final size of the image and target. Default: ``512``.
ignore_background (bool): If true, ignore the background class when calculating the training loss.
Default: ``true``.
**dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
both_transforms, image_transforms, target_transforms = build_ade20k_transformations(
split=split,
base_size=base_size,
min_resize_scale=min_resize_scale,
max_resize_scale=max_resize_scale,
final_size=final_size)
dataset = ADE20k(datadir=datadir,
split=split,
both_transforms=both_transforms,
image_transforms=image_transforms,
target_transforms=target_transforms)
sampler = dist.get_sampler(dataset, drop_last=drop_last, shuffle=shuffle)
device_transform_fn = NormalizationFn(mean=IMAGENET_CHANNEL_MEAN,
std=IMAGENET_CHANNEL_STD,
ignore_background=ignore_background)
return DataSpec(
dataloader=DataLoader(dataset=dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=drop_last,
collate_fn=pil_image_collate,
**dataloader_kwargs),
device_transforms=device_transform_fn,
)
def build_streaming_ade20k_dataloader(
global_batch_size: int,
remote: str,
*,
local: str = '/tmp/mds-cache/mds-ade20k/',
split: str = 'train',
drop_last: bool = True,
shuffle: bool = True,
base_size: int = 512,
min_resize_scale: float = 0.5,
max_resize_scale: float = 2.0,
final_size: int = 512,
ignore_background: bool = True,
predownload: Optional[int] = 100_000,
keep_zip: Optional[bool] = None,
download_retry: int = 2,
download_timeout: float = 60,
validate_hash: Optional[str] = None,
shuffle_seed: Optional[int] = None,
num_canonical_nodes: Optional[int] = None,
**dataloader_kwargs: Dict[str, Any],
):
"""Build an ADE20k streaming dataset.
Args:
global_batch_size (int): Global batch size.
remote (str): Remote directory (S3 or local filesystem) where dataset is stored.
local (str): Local filesystem directory where dataset is cached during operation.
Default: ``'/tmp/mds-cache/mds-ade20k/```.
split (str): The dataset split to use, either 'train' or 'val'. Default: ``'train```.
base_size (int): Initial size of the image and target before other augmentations. Default: ``512``.
min_resize_scale (float): The minimum value the samples can be rescaled. Default: ``0.5``.
max_resize_scale (float): The maximum value the samples can be rescaled. Default: ``2.0``.
final_size (int): The final size of the image and target. Default: ``512``.
ignore_background (bool): If true, ignore the background class when calculating the training loss.
Default: ``true``.
predownload (int, optional): Target number of samples ahead to download the shards of while
iterating. Defaults to ``100_000``.
keep_zip (bool, optional): Whether to keep or delete the compressed file when
decompressing downloaded shards. If set to None, keep iff remote is local. Defaults to
``None``.
download_retry (int): Number of download re-attempts before giving up. Defaults to ``2``.
download_timeout (float): Number of seconds to wait for a shard to download before raising
an exception. Defaults to ``60``.
validate_hash (str, optional): Optional hash or checksum algorithm to use to validate
shards. Defaults to ``None``.
shuffle_seed (int, optional): Seed for shuffling, or ``None`` for random seed. Defaults to
``None``.
num_canonical_nodes (int, optional): Canonical number of nodes for shuffling with resumption.
Defaults to ``None``, which is interpreted as the number of nodes of the initial run.
**dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
try:
from streaming.vision import StreamingADE20K
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='streaming', conda_package='mosaicml-streaming') from e
# Build the sets of transformations for ADE20k
joint_transform, image_transform, target_transform = build_ade20k_transformations(
split=split,
base_size=base_size,
min_resize_scale=min_resize_scale,
max_resize_scale=max_resize_scale,
final_size=final_size,
)
dataset = StreamingADE20K(
local=local,
remote=remote,
split=split,
shuffle=shuffle,
joint_transform=joint_transform,
transform=image_transform,
target_transform=target_transform,
predownload=predownload,
keep_zip=keep_zip,
download_retry=download_retry,
download_timeout=download_timeout,
validate_hash=validate_hash,
shuffle_seed=shuffle_seed,
num_canonical_nodes=num_canonical_nodes,
batch_size=batch_size,
)
dataloader = DataLoader(
dataset=dataset,
batch_size=batch_size,
collate_fn=pil_image_collate,
drop_last=drop_last,
**dataloader_kwargs,
)
device_transform_fn = NormalizationFn(
mean=IMAGENET_CHANNEL_MEAN,
std=IMAGENET_CHANNEL_STD,
ignore_background=ignore_background,
)
return DataSpec(dataloader=dataloader, device_transforms=device_transform_fn)
def build_synthetic_ade20k_dataloader(
global_batch_size: int,
*,
split: str = 'train',
drop_last: bool = True,
shuffle: bool = True,
final_size: int = 512,
num_unique_samples: int = 100,
device: str = 'cpu',
memory_format: MemoryFormat = MemoryFormat.CONTIGUOUS_FORMAT,
**dataloader_kwargs: Dict[str, Any],
):
"""Builds a synthetic ADE20k dataloader.
Args:
batch_size (int): Global batch size.
split (str): The dataset split to use either 'train', 'val', or 'test'. Default: ``'train```.
drop_last (bool): Whether to drop last samples. Default: ``True``.
shuffle (bool): Whether to shuffle the dataset. Default: ``True``.
final_size (int): The final size of the image and target. Default: ``512``.
num_unique_samples (int): Number of unique samples in synthetic dataset. Default: ``100``.
device (str): Device with which to load the dataset. Default: ``cpu``.
memory_format (:class:`composer.core.MemoryFormat`): Memory format of the tensors. Default: ``CONTIGUOUS_FORMAT``.
**dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
if split == 'train':
total_dataset_size = 20_206
elif split == 'val':
total_dataset_size = 2_000
else:
total_dataset_size = 3_352
dataset = SyntheticBatchPairDataset(
total_dataset_size=total_dataset_size,
data_shape=[3, final_size, final_size],
label_shape=[final_size, final_size],
num_classes=150,
num_unique_samples_to_create=num_unique_samples,
device=device,
memory_format=memory_format,
)
sampler = dist.get_sampler(dataset, drop_last=drop_last, shuffle=shuffle)
return DataSpec(
DataLoader(
dataset=dataset,
sampler=sampler,
batch_size=batch_size,
drop_last=drop_last,
**dataloader_kwargs,
))
class RandomResizePair(torch.nn.Module):
"""Resize the image and target to ``base_size`` scaled by a randomly sampled value.
Args:
min_scale (float): the minimum value the samples can be rescaled.
max_scale (float): the maximum value the samples can be rescaled.
base_size (Tuple[int, int]): a specified base size (height x width) to scale to get the resized dimensions.
When this is None, use the input image size. Default: ``None``.
"""
def __init__(self, min_scale: float, max_scale: float, base_size: Optional[Tuple[int, int]] = None):
super().__init__()
self.min_scale = min_scale
self.max_scale = max_scale
self.base_size = base_size
def forward(self, sample: Tuple[Image.Image, Image.Image]):
image, target = sample
resize_scale = np.random.random_sample() * (self.max_scale - self.min_scale) + self.min_scale
base_height, base_width = self.base_size if self.base_size else (image.height, image.width)
resized_dims = (int(base_height * resize_scale), int(base_width * resize_scale))
resized_image = TF.resize(image, resized_dims, interpolation=TF.InterpolationMode.BILINEAR) # type: ignore
resized_target = TF.resize(target, resized_dims, interpolation=TF.InterpolationMode.NEAREST) # type: ignore
return resized_image, resized_target
# Based on: https://github.com/open-mmlab/mmsegmentation/blob/aa50358c71fe9c4cccdd2abe42433bdf702e757b/mmseg/datasets/pipelines/transforms.py#L584
class RandomCropPair(torch.nn.Module):
"""Crop the image and target at a randomly sampled position.
Args:
crop_size (Tuple[int, int]): the size (height x width) of the crop.
class_max_percent (float): the maximum percent of the image area a single class should occupy. Default is 1.0.
num_retry (int): the number of times to resample the crop if ``class_max_percent`` threshold is not reached.
Default is 1.
"""
def __init__(self, crop_size: Tuple[int, int], class_max_percent: float = 1.0, num_retry: int = 1):
super().__init__()
self.crop_size = crop_size
self.class_max_percent = class_max_percent
self.num_retry = num_retry
def forward(self, sample: Tuple[Image.Image, Image.Image]):
image, target = sample
# if image size is smaller than crop size, no cropping necessary
if image.height <= self.crop_size[0] and image.width <= self.crop_size[1]:
return image, target
# generate crop
crop = transforms.RandomCrop.get_params(
image, output_size=self.crop_size) # type: ignore - transform typing excludes PIL.Image
if self.class_max_percent < 1.0:
for _ in range(self.num_retry):
# Crop target
target_crop = TF.crop(target, *crop) # type: ignore - transform typing excludes PIL.Image
# count the number of each class represented in cropped target
labels, counts = np.unique(np.array(target_crop), return_counts=True)
counts = counts[labels != 0]
# if the class with the most area is within the class_max_percent threshold, stop retrying
if len(counts) > 1 and (np.max(counts) / np.sum(counts)) < self.class_max_percent:
break
crop = transforms.RandomCrop.get_params(
image, output_size=self.crop_size) # type: ignore - transform typing excludes PIL.Image
image = TF.crop(image, *crop) # type: ignore - transform typing excludes PIL.Image
target = TF.crop(target, *crop) # type: ignore - transform typing excludes PIL.Image
return image, target
class RandomHFlipPair(torch.nn.Module):
"""Flip the image and target horizontally with a specified probability.
Args:
probability (float): the probability of flipping the image and target. Default: ``0.5``.
"""
def __init__(self, probability: float = 0.5):
super().__init__()
self.probability = probability
def forward(self, sample: Tuple[Image.Image, Image.Image]):
image, target = sample
if np.random.random_sample() > self.probability:
image = TF.hflip(image) # type: ignore - transform typing does not include PIL.Image
target = TF.hflip(target) # type: ignore - transform typing does not include PIL.Image
return image, target
class PadToSize(torch.nn.Module):
"""Pad an image to a specified size.
Args:
size (Tuple[int, int]): the size (height x width) of the image after padding.
fill (Union[int, Tuple[int, int, int]]): the value to use for the padded pixels. Default: ``0``.
"""
def __init__(self, size: Tuple[int, int], fill: Union[int, Tuple[int, int, int]] = 0):
super().__init__()
self.size = size
self.fill = fill
def forward(self, image: Image.Image):
padding = max(self.size[0] - image.height, 0), max(self.size[1] - image.width, 0)
padding = (padding[1] // 2, padding[0] // 2, ceil(padding[1] / 2), ceil(padding[0] / 2))
image = TF.pad(image, padding, fill=self.fill) # type: ignore - transform typing does not include PIL.Image
return image
class PhotometricDistoration(torch.nn.Module):
"""Applies a combination of brightness, contrast, saturation, and hue jitters with random intensity.
This is a less severe form of PyTorch's ColorJitter used by the mmsegmentation library here:
https://github.com/open-mmlab/mmsegmentation/blob/aa50358c71fe9c4cccdd2abe42433bdf702e757b/mmseg/datasets/pipelines/transforms.py#L861
Args:
brightness (float): max and min to jitter brightness.
contrast (float): max and min to jitter contrast.
saturation (float): max and min to jitter saturation.
hue (float): max and min to jitter hue.
"""
def __init__(self, brightness: float, contrast: float, saturation: float, hue: float):
super().__init__()
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def forward(self, image: Image.Image):
if np.random.randint(2):
brightness_factor = np.random.uniform(1 - self.brightness, 1 + self.brightness)
image = TF.adjust_brightness(
image, brightness_factor) # type: ignore - transform typing does not include PIL.Image
contrast_mode = np.random.randint(2)
if contrast_mode == 1 and np.random.randint(2):
contrast_factor = np.random.uniform(1 - self.contrast, 1 + self.contrast)
image = TF.adjust_contrast(
image, # type: ignore - transform typing does not include PIL.Image
contrast_factor)
if np.random.randint(2):
saturation_factor = np.random.uniform(1 - self.saturation, 1 + self.saturation)
image = TF.adjust_saturation(
image, saturation_factor) # type: ignore - transform typing does not include PIL.Image
if np.random.randint(2):
hue_factor = np.random.uniform(-self.hue, self.hue)
image = TF.adjust_hue(image, hue_factor) # type: ignore - transform typing does not include PIL.Image
if contrast_mode == 0 and np.random.randint(2):
contrast_factor = np.random.uniform(1 - self.contrast, 1 + self.contrast)
image = TF.adjust_contrast(
image, # type: ignore - transform typing does not include PIL.Image
contrast_factor)
return image
class ADE20k(Dataset):
"""PyTorch Dataset for ADE20k.
Args:
datadir (str): the path to the ADE20k folder.
split (str): the dataset split to use, either 'training', 'validation', or 'test'. Default: ``'training'``.
both_transforms (torch.nn.Module): transformations to apply to the image and target simultaneously.
Default: ``None``.
image_transforms (torch.nn.Module): transformations to apply to the image only. Default: ``None``.
target_transforms (torch.nn.Module): transformations to apply to the target only. Default ``None``.
"""
def __init__(self,
datadir: str,
split: str = 'training',
both_transforms: Optional[torch.nn.Module] = None,
image_transforms: Optional[torch.nn.Module] = None,
target_transforms: Optional[torch.nn.Module] = None):
super().__init__()
self.datadir = datadir
self.split = split
self.both_transforms = both_transforms
self.image_transforms = image_transforms
self.target_transforms = target_transforms
# Check datadir value
if self.datadir is None:
raise ValueError('datadir must be specified')
elif not os.path.exists(self.datadir):
raise FileNotFoundError(f'datadir path does not exist: {self.datadir}')
# Check split value
if self.split not in ['training', 'validation', 'test']:
raise ValueError(f'split must be one of [`training`, `validation`, `test`] but is: {self.split}')
self.image_dir = os.path.join(self.datadir, 'images', self.split)
if not os.path.exists(self.image_dir):
raise FileNotFoundError(f'ADE20k directory structure is not as expected: {self.image_dir} does not exist')
self.image_files = os.listdir(self.image_dir)
# Filter for ADE files
self.image_files = [f for f in self.image_files if f[:3] == 'ADE']
# Remove grayscale samples
if self.split == 'training':
corrupted_samples = ['00003020', '00001701', '00013508', '00008455']
for sample in corrupted_samples:
sample_file = f'ADE_train_{sample}.jpg'
if sample_file in self.image_files:
self.image_files.remove(sample_file)
def __getitem__(self, index):
# Load image
image_file = self.image_files[index]
image_path = os.path.join(self.image_dir, image_file)
image = Image.open(image_path)
# Load annotation target if using either train or val splits
if self.split in ['training', 'validation']:
target_path = os.path.join(self.datadir, 'annotations', self.split, image_file.split('.')[0] + '.png')
target = Image.open(target_path)
if self.both_transforms:
image, target = self.both_transforms((image, target))
if self.target_transforms:
target = self.target_transforms(target)
if self.image_transforms:
image = self.image_transforms(image)
if self.split in ['training', 'validation']:
return image, target # type: ignore
else:
return image
def __len__(self):
return len(self.image_files)
| composer-dev | composer/datasets/ade20k.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import List, cast
from torch.utils.data import DataLoader, Dataset
from composer.utils import MissingConditionalImportError, dist
log = logging.getLogger(__name__)
def build_lm_dataloader(
datadir: List[str],
tokenizer_name: str,
global_batch_size: int,
*,
split: str = 'train',
shuffle: bool = True,
drop_last: bool = True,
use_masked_lm: bool = False,
num_tokens: int = 0,
mlm_probability: float = 0.15,
subsample_ratio: float = 1.0,
**dataloader_kwargs,
):
"""Builds a dataloader for a generic language modeling dataset.
Args:
datadir (list): List containing the string of the path to the HuggingFace
Datasets directory.
dataloader_hparams (DataLoaderHparams): DataLoaderHparams object.
tokenizer_name (str): The name of the HuggingFace tokenizer to
preprocess text with. See `HuggingFace documentation
<https://huggingface.co/models>`_.
global_batch_size (int): Global batch size.
split (str): the dataset split to use either 'train', 'val', or 'test'. Default: ``'train```. Default: ``'train'``.
shuffle (bool): whether to shuffle the dataset. Default: ``True``.
drop_last (bool): whether to drop last samples. Default: ``True``.
use_masked_lm (bool): Whether the dataset should be encoded with masked
language modeling or not.
num_tokens (int, optional): Number of tokens to train on. ``0``
will train on all tokens in the dataset. Default: ``0``.
mlm_probability (float, optional): If using masked language modeling, the
probability with which tokens will be masked. Default: ``0.15``.
subsample_ratio (float, optional): Proportion of the dataset to use. Default:
``1.0``.
**dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
try:
import transformers
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e
try:
import datasets
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='datasets') from e
assert tokenizer_name is not None
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name)
# loads a dataset that is assumed to be pre-tokenized
lm_datasets = [datasets.load_from_disk(i) for i in datadir] #type: ignore (thirdparty)
# merge the dataset to re-sample from
if split is None:
raise ValueError('A dataset split is required')
merged_dataset = [[d[split]] for d in lm_datasets]
# flatten merged_dataset
merged_dataset = [item for sublist in merged_dataset for item in sublist]
lm_datasets = datasets.concatenate_datasets(merged_dataset) #type: ignore (thirdparty)
total_num_samples = len(lm_datasets) # type: ignore
tokens_per_sample = len(lm_datasets[0]['input_ids']) #type: ignore (thirdparty)
total_num_tokens = total_num_samples * tokens_per_sample
# truncate the dataset to a specified size
num_samples = total_num_samples
if num_tokens > 0:
assert num_tokens <= total_num_tokens, f'Requested {num_tokens} tokens must be <= total_num_tokens={total_num_tokens}'
assert num_tokens % tokens_per_sample == 0, f'Requested {num_tokens} tokens is not divisible by tokens_per_sample={tokens_per_sample}'
num_samples = num_tokens // tokens_per_sample
subsample_ratio = num_samples / total_num_samples
elif subsample_ratio < 1.0:
num_samples = round(total_num_samples * subsample_ratio)
num_tokens = num_samples * tokens_per_sample
elif subsample_ratio == 1.0 and num_tokens == 0:
num_tokens = total_num_tokens
else:
log.warning('No subsampling going on!')
lm_datasets = lm_datasets.select(range(num_samples)) # type: ignore (thirdparty)
log.info(f'LM datasets: {lm_datasets}')
log.info(f'Subsample ratio: {subsample_ratio}')
log.info(f'Total number of samples: {num_samples:e}')
log.info(f'Total number of tokens: {num_tokens:e}')
dataset = lm_datasets
# for some tokenizers, e.g. GPT-2, they don't have padding tokens. Hence, we cannot use the LM collator.
if tokenizer.pad_token_id is None:
data_collator = transformers.default_data_collator
else:
data_collator = transformers.DataCollatorForLanguageModeling(tokenizer=tokenizer,
mlm=use_masked_lm,
mlm_probability=mlm_probability)
sampler = dist.get_sampler(
cast(Dataset, dataset), # HF datasets do not subclass torch datasets, so this cast is needed
drop_last=drop_last,
shuffle=shuffle)
return DataLoader(
dataset=dataset, # type: ignore
batch_size=batch_size,
sampler=sampler,
drop_last=drop_last,
collate_fn=data_collator,
**dataloader_kwargs)
| composer-dev | composer/datasets/lm_dataset.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Utility and helper functions for datasets."""
import logging
import textwrap
from typing import Callable, List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from torchvision.datasets import VisionDataset
from composer.core import Batch
__all__ = [
'add_vision_dataset_transform',
'NormalizationFn',
'pil_image_collate',
]
log = logging.getLogger(__name__)
class NormalizationFn:
"""Normalizes input data and removes the background class from target data if desired.
An instance of this class can be used as the ``device_transforms`` argument
when constructing a :class:`~composer.core.data_spec.DataSpec`. When used here,
the data will normalized after it has been loaded onto the device (i.e., GPU).
Args:
mean (Tuple[float, float, float]): The mean pixel value for each channel (RGB) for
the dataset.
std (Tuple[float, float, float]): The standard deviation pixel value for each
channel (RGB) for the dataset.
ignore_background (bool): If ``True``, ignore the background class in the training
loss. Only used in semantic segmentation. Default: ``False``.
"""
def __init__(self,
mean: Tuple[float, float, float],
std: Tuple[float, float, float],
ignore_background: bool = False):
self.mean = mean
self.std = std
self.ignore_background = ignore_background
def __call__(self, batch: Batch):
xs, ys = batch
assert isinstance(xs, torch.Tensor)
assert isinstance(ys, torch.Tensor)
device = xs.device
if not isinstance(self.mean, torch.Tensor):
self.mean = torch.tensor(self.mean, device=device)
self.mean = self.mean.view(1, 3, 1, 1)
if not isinstance(self.std, torch.Tensor):
self.std = torch.tensor(self.std, device=device)
self.std = self.std.view(1, 3, 1, 1)
xs = xs.float()
xs = xs.sub_(self.mean).div_(self.std)
if self.ignore_background:
ys = ys.sub_(1)
return xs, ys
def pil_image_collate(
batch: List[Tuple[Image.Image, Union[Image.Image, np.ndarray]]],
memory_format: torch.memory_format = torch.contiguous_format) -> Tuple[torch.Tensor, torch.Tensor]:
"""Constructs a length 2 tuple of torch.Tensors from datasets that yield samples of type
:class:`PIL.Image.Image`.
This function can be used as the ``collate_fn`` argument of a :class:`torch.utils.data.DataLoader`.
Args:
batch (List[Tuple[Image.Image, Union[Image.Image, np.ndarray]]]): List of (image, target) tuples
that will be aggregated and converted into a single (:class:`~torch.Tensor`, :class:`~torch.Tensor`)
tuple.
memory_format (torch.memory_format): The memory format for the input and target tensors.
Returns:
(torch.Tensor, torch.Tensor): Tuple of (image tensor, target tensor)
The image tensor will be four-dimensional (NCHW or NHWC, depending on the ``memory_format``).
"""
imgs = [sample[0] for sample in batch]
w, h = imgs[0].size
image_tensor = torch.zeros((len(imgs), 3, h, w), dtype=torch.uint8).contiguous(memory_format=memory_format)
# Convert targets to torch tensor
targets = [sample[1] for sample in batch]
if isinstance(targets[0], Image.Image):
target_dims = (len(targets), targets[0].size[1], targets[0].size[0])
else:
target_dims = (len(targets),)
target_tensor = torch.zeros(target_dims, dtype=torch.int64).contiguous(memory_format=memory_format)
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if nump_array.ndim < 3:
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2).copy()
if nump_array.shape[0] != 3:
assert nump_array.shape[0] == 1, 'unexpected shape'
nump_array = np.resize(nump_array, (3, h, w))
assert image_tensor.shape[1:] == nump_array.shape, 'shape mismatch'
image_tensor[i] += torch.from_numpy(nump_array)
target_tensor[i] += torch.from_numpy(np.array(targets[i], dtype=np.int64))
return image_tensor, target_tensor
def add_vision_dataset_transform(dataset: VisionDataset, transform: Callable, is_tensor_transform: bool = False):
"""Add a transform to a dataset's collection of transforms.
Args:
dataset (VisionDataset): A torchvision dataset.
transform (Callable): Function to be added to the dataset's collection of
transforms.
is_tensor_transform (bool): Whether ``transform`` acts on data of the type
:class:`~torch.Tensor`. default: ``False``.
* If ``True``, and :class:`~torchvision.transforms.ToTensor` is present in the transforms of the
``dataset``, then ``transform`` will be inserted after the
:class:`~torchvision.transforms.ToTensor` transform.
* If ``False`` and :class:`~torchvision.transforms.ToTensor` is present, the ``transform`` will be
inserted before :class:`~torchvision.transforms.ToTensor`.
* If :class:`~torchvision.transforms.ToTensor` is not present, the transform will be appended to
the end of collection of transforms.
Returns:
None: The ``dataset`` is modified in-place.
"""
transform_added_logstring = textwrap.dedent(f"""\
Transform {transform} added to dataset.
Dataset now has the following transforms: {dataset.transform}""")
if dataset.transform is None:
dataset.transform = transform
log.warning(transform_added_logstring)
elif isinstance(dataset.transform, transforms.Compose):
insertion_index = len(dataset.transform.transforms)
for i, t in enumerate(dataset.transform.transforms):
if isinstance(t, transforms.ToTensor):
insertion_index = i
break
if is_tensor_transform:
insertion_index += 1
dataset.transform.transforms.insert(insertion_index, transform)
log.warning(transform_added_logstring)
else: # transform is some other basic transform, join using Compose
if isinstance(dataset.transform, transforms.ToTensor) and not is_tensor_transform:
dataset.transform = transforms.Compose([transform, dataset.transform])
log.warning(transform_added_logstring)
else:
dataset.transform = transforms.Compose([dataset.transform, transform])
log.warning(transform_added_logstring)
| composer-dev | composer/datasets/utils.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""CIFAR image classification dataset.
The CIFAR datasets are a collection of labeled 32x32 colour images. Please refer to the `CIFAR dataset
<https://www.cs.toronto.edu/~kriz/cifar.html>`_ for more details.
"""
import os
import textwrap
from typing import Any, Dict, List, Optional, Union
import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from composer.core import DataSpec, MemoryFormat
from composer.datasets.ffcv_utils import write_ffcv_dataset
from composer.datasets.synthetic import SyntheticBatchPairDataset
from composer.datasets.utils import pil_image_collate
from composer.utils import MissingConditionalImportError, dist
__all__ = [
'build_cifar10_dataloader', 'build_ffcv_cifar10_dataloader', 'build_streaming_cifar10_dataloader',
'build_synthetic_cifar10_dataloader'
]
CIFAR10_CHANNEL_MEAN = 0.4914, 0.4822, 0.4465
CIFAR10_CHANNEL_STD = 0.247, 0.243, 0.261
def build_cifar10_dataloader(
datadir: str,
global_batch_size: int,
is_train: bool = True,
download: bool = True,
drop_last: bool = True,
shuffle: bool = True,
**dataloader_kwargs: Any,
) -> DataSpec:
"""Builds a CIFAR-10 dataloader with default transforms.
Args:
datadir (str): Path to the data directory
global_batch_size (int): Global batch size
is_train (bool): Whether to load the training data or validation data. Default:
``True``.
download (bool, optional): Whether to download the dataset, if needed. Default:
``True``.
drop_last (bool): Drop remainder samples. Default: ``True``.
shuffle (bool): Shuffle the dataset. Default: ``True``.
**dataloader_kwargs (Any): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
if is_train:
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR10_CHANNEL_MEAN, CIFAR10_CHANNEL_STD),
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR10_CHANNEL_MEAN, CIFAR10_CHANNEL_STD),
])
with dist.run_local_rank_zero_first():
dataset = datasets.CIFAR10(
datadir,
train=is_train,
download=dist.get_local_rank() == 0 and download,
transform=transform,
)
sampler = dist.get_sampler(dataset, drop_last=drop_last, shuffle=shuffle)
return DataSpec(
DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=drop_last,
**dataloader_kwargs,
),)
def build_ffcv_cifar10_dataloader(
global_batch_size: int,
is_train: bool = True,
download: bool = True,
drop_last: bool = True,
prefetch_factor: int = 2,
num_workers: int = 8,
ffcv_dir: str = '/tmp',
ffcv_dest: str = 'cifar_train.ffcv',
ffcv_write_dataset: Union[str, bool] = False,
datadir: Union[str, None] = None,
) -> DataSpec:
"""Builds an FFCV CIFAR10 dataloader.
Args:
global_batch_size (int): Global batch size.
is_train (bool): Whether to load the training data or validation data. Default:
``True``.
download (bool, optional): Whether to download the dataset, if needed. Default:
``True``.
drop_last (bool): Whether to drop last samples. Default: ``True``.
prefetch_factor (int): Number of batches to prefect. Default: ``2``.
ffcv_dir (str, optional): A directory containing train/val <file>.ffcv files. If
these files don't exist and ``ffcv_write_dataset`` is ``True``, train/val
<file>.ffcv files will be created in this dir. Default: ``"/tmp"``.
ffcv_dest (str, optional): <file>.ffcv file that has dataset samples. Default: ``"cifar_train.ffcv"``.
ffcv_write_dataset (str | bool, optional): Whether to create dataset in FFCV format (<file>.ffcv) if it doesn't exist. Default:
``False``.
datadir (str | None, optional): Path to the non-FFCV data directory.
"""
try:
import ffcv
from ffcv.fields.decoders import IntDecoder, SimpleRGBImageDecoder
from ffcv.pipeline.operation import Operation
except ImportError:
raise ImportError(
textwrap.dedent("""\
Composer was installed without ffcv support.
To use ffcv with Composer, please install ffcv in your environment."""))
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
dataset_filepath = os.path.join(ffcv_dir, ffcv_dest)
# always create if ffcv_write_dataset is true
if ffcv_write_dataset:
if dist.get_local_rank() == 0:
if datadir is None:
raise ValueError('datadir is required if use_synthetic is False and ffcv_write_dataset is True.')
ds = datasets.CIFAR10(
datadir,
train=is_train,
download=download,
)
write_ffcv_dataset(dataset=ds, write_path=dataset_filepath)
# Wait for the local rank 0 to be done creating the dataset in ffcv format.
dist.barrier()
if not os.path.exists(dataset_filepath):
raise ValueError(
f'Dataset file containing samples not found at {dataset_filepath}. Use ffcv_dir flag to point to a dir containing {dataset_filepath}.'
)
# Please note that this mean/std is different from the mean/std used for regular PyTorch dataloader as
# ToTensor does the normalization for PyTorch dataloaders.
cifar10_mean_ffcv = [125.307, 122.961, 113.8575]
cifar10_std_ffcv = [51.5865, 50.847, 51.255]
label_pipeline: List[Operation] = [IntDecoder(), ffcv.transforms.ToTensor(), ffcv.transforms.Squeeze()]
image_pipeline: List[Operation] = [SimpleRGBImageDecoder()]
if is_train:
image_pipeline.extend([
ffcv.transforms.RandomHorizontalFlip(),
ffcv.transforms.RandomTranslate(padding=2, fill=tuple(map(int, cifar10_mean_ffcv))),
ffcv.transforms.Cutout(4, tuple(map(int, cifar10_mean_ffcv))),
])
# Common transforms for train and test
image_pipeline.extend([
ffcv.transforms.ToTensor(),
ffcv.transforms.ToTorchImage(channels_last=False, convert_back_int16=False),
ffcv.transforms.Convert(torch.float32),
transforms.Normalize(cifar10_mean_ffcv, cifar10_std_ffcv),
])
ordering = ffcv.loader.OrderOption.RANDOM if is_train else ffcv.loader.OrderOption.SEQUENTIAL
return DataSpec(
ffcv.Loader(
dataset_filepath,
batch_size=batch_size,
num_workers=num_workers,
order=ordering,
distributed=False,
pipelines={
'image': image_pipeline,
'label': label_pipeline,
},
batches_ahead=prefetch_factor,
drop_last=drop_last,
),)
def build_synthetic_cifar10_dataloader(
global_batch_size: int,
is_train: bool = True,
drop_last: bool = True,
shuffle: bool = True,
num_unique_samples: int = 100,
device: str = 'cpu',
memory_format: MemoryFormat = MemoryFormat.CONTIGUOUS_FORMAT,
**dataloader_kwargs: Any,
) -> DataSpec:
"""Builds a synthetic CIFAR-10 dataset for debugging or profiling.
Args:
global_batch_size (int): Global batch size
is_train (bool): Whether to load the training data or validation data. Default:
``True``.
drop_last (bool): Drop remainder samples. Default: ``True``.
shuffle (bool): Shuffle the dataset. Default: ``True``.
num_unique_samples (int): number of unique samples in synthetic dataset. Default: ``100``.
device (str): device with which to load the dataset. Default: ``cpu``.
memory_format (:class:`composer.core.MemoryFormat`): memory format of the tensors. Default: ``CONTIGUOUS_FORMAT``.
**dataloader_kwargs (Any): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
dataset = SyntheticBatchPairDataset(
total_dataset_size=50_000 if is_train else 10_000,
data_shape=[3, 32, 32],
num_classes=10,
num_unique_samples_to_create=num_unique_samples,
device=device,
memory_format=memory_format,
)
sampler = dist.get_sampler(dataset, drop_last=drop_last, shuffle=shuffle)
return DataSpec(
DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=drop_last,
**dataloader_kwargs,
),)
def build_streaming_cifar10_dataloader(
global_batch_size: int,
remote: str,
*,
local: str = '/tmp/mds-cache/mds-cifar10',
split: str = 'train',
drop_last: bool = True,
shuffle: bool = True,
predownload: Optional[int] = 100_000,
keep_zip: Optional[bool] = None,
download_retry: int = 2,
download_timeout: float = 60,
validate_hash: Optional[str] = None,
shuffle_seed: Optional[int] = None,
num_canonical_nodes: Optional[int] = None,
**dataloader_kwargs: Dict[str, Any],
) -> DataSpec:
"""Builds a streaming CIFAR10 dataset
Args:
global_batch_size (int): Global batch size.
remote (str): Remote directory (S3 or local filesystem) where dataset is stored.
local (str, optional): Local filesystem directory where dataset is cached during operation.
Defaults to ``'/tmp/mds-cache/mds-imagenet1k/```.
split (str): Which split of the dataset to use. Either ['train', 'val']. Default:
``'train```.
drop_last (bool, optional): whether to drop last samples. Default: ``True``.
shuffle (bool, optional): whether to shuffle dataset. Defaults to ``True``.
predownload (int, optional): Target number of samples ahead to download the shards of while
iterating. Defaults to ``100_000``.
keep_zip (bool, optional): Whether to keep or delete the compressed file when
decompressing downloaded shards. If set to None, keep iff remote is local. Defaults to
``None``.
download_retry (int): Number of download re-attempts before giving up. Defaults to ``2``.
download_timeout (float): Number of seconds to wait for a shard to download before raising
an exception. Defaults to ``60``.
validate_hash (str, optional): Optional hash or checksum algorithm to use to validate
shards. Defaults to ``None``.
shuffle_seed (int, optional): Seed for shuffling, or ``None`` for random seed. Defaults to
``None``.
num_canonical_nodes (int, optional): Canonical number of nodes for shuffling with resumption.
Defaults to ``None``, which is interpreted as the number of nodes of the initial run.
**dataloader_kwargs (Dict[str, Any]): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
try:
from streaming.vision import StreamingCIFAR10
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='streaming', conda_package='mosaicml-streaming') from e
if split == 'train':
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR10_CHANNEL_MEAN, CIFAR10_CHANNEL_STD),
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR10_CHANNEL_MEAN, CIFAR10_CHANNEL_STD),
])
dataset = StreamingCIFAR10(
local=local,
remote=remote,
split=split,
shuffle=shuffle,
transform=transform,
predownload=predownload,
keep_zip=keep_zip,
download_retry=download_retry,
download_timeout=download_timeout,
validate_hash=validate_hash,
shuffle_seed=shuffle_seed,
num_canonical_nodes=num_canonical_nodes,
batch_size=batch_size,
)
dataloader = DataLoader(
dataset=dataset,
batch_size=batch_size,
collate_fn=pil_image_collate,
sampler=None,
drop_last=drop_last,
**dataloader_kwargs,
)
return DataSpec(dataloader=dataloader)
| composer-dev | composer/datasets/cifar.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Any
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from composer.core import MemoryFormat
from composer.datasets.synthetic import SyntheticBatchPairDataset
from composer.utils import dist
def build_mnist_dataloader(
datadir: str,
global_batch_size: int,
is_train: bool = True,
download: bool = True,
drop_last: bool = True,
shuffle: bool = True,
**dataloader_kwargs: Any,
) -> DataLoader:
"""Builds an MNIST dataloader.
Args:
datadir (str): Path to the data directory
global_batch_size (int): Global batch size.
is_train (bool): Whether to load the training data or validation data. Default:
``True``.
download (bool, optional): Whether to download the dataset, if needed. Default:
``True``.
drop_last (bool): Drop remainder samples. Default: ``True``.
shuffle (bool): Shuffle the dataset. Default: ``True``.
**dataloader_kwargs (Any): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
transform = transforms.Compose([transforms.ToTensor()])
with dist.run_local_rank_zero_first():
dataset = datasets.MNIST(
datadir,
train=is_train,
download=dist.get_local_rank() == 0 and download,
transform=transform,
)
sampler = dist.get_sampler(dataset, drop_last=drop_last, shuffle=shuffle)
return DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=drop_last,
**dataloader_kwargs,
)
def build_synthetic_mnist_dataloader(
global_batch_size: int,
is_train: bool = True,
drop_last: bool = True,
shuffle: bool = True,
num_unique_samples: int = 100,
device: str = 'cpu',
memory_format: MemoryFormat = MemoryFormat.CONTIGUOUS_FORMAT,
**dataloader_kwargs: Any,
) -> DataLoader:
"""Builds a synthetic MNIST dataset.
Args:
global_batch_size (int): Global batch size.
is_train (bool): Whether to load the training data or validation data. Default:
``True``.
drop_last (bool): Drop remainder samples. Default: ``True``.
shuffle (bool): Shuffle the dataset. Default: ``True``.
num_unique_samples (int): number of unique samples in synthetic dataset. Default: ``100``.
device (str): device with which to load the dataset. Default: ``cpu``.
memory_format (:class:`composer.core.MemoryFormat`): memory format of the tensors. Default: ``CONTIGUOUS_FORMAT``.
**dataloader_kwargs (Any): Additional settings for the dataloader (e.g. num_workers, etc.)
"""
if global_batch_size % dist.get_world_size() != 0:
raise ValueError(
f'global_batch_size ({global_batch_size}) must be divisible by world_size ({dist.get_world_size()}).')
batch_size = global_batch_size // dist.get_world_size()
dataset = SyntheticBatchPairDataset(
total_dataset_size=60_000 if is_train else 10_000,
data_shape=[1, 28, 28],
num_classes=10,
num_unique_samples_to_create=num_unique_samples,
device=device,
memory_format=memory_format,
)
sampler = dist.get_sampler(dataset, drop_last=drop_last, shuffle=shuffle)
return DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=drop_last,
**dataloader_kwargs,
)
| composer-dev | composer/datasets/mnist.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
class NoEffectWarning(Warning):
"""Warns when an algorithm did not have an effect.
An algorithm should emit this warning when its application resulted in no changes to the trainer :class:`~.State`.
For example, if surgery algorithms that find and replace layers find no layers to replace.
"""
pass
class NotIntendedUseWarning(Warning):
"""Warns when an algorithm is being applied outside its intended setting.
This is a catch-all generic warning to alert the user to potentially unintended applications. For example, when a
data augmentation that resizes the image is being applied to a very small image, or being applied to text data.
"""
pass
| composer-dev | composer/algorithms/warnings.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Efficiency methods for training.
Examples include :class:`.LabelSmoothing` and adding :class:`.SqueezeExcite` blocks,
among many others.
Algorithms are implemented in both a standalone functional form (see :mod:`composer.functional`)
and as subclasses of :class:`Algorithm` for integration in the Composer :class:`Trainer`.
The former are easier to integrate piecemeal into an existing codebase.
The latter are easier to compose together, since they all have the same public interface
and work automatically with the Composer :py:class:`.Trainer`.
For ease of composability, algorithms in our Trainer are based on the two-way callbacks concept from
`Howard et al, 2020 <https://arxiv.org/abs/2002.04688>`_. Each algorithm implements two methods:
* :meth:`Algorithm.match`: returns ``True`` if the algorithm should be run given the current
:class:`State` and :class:`.Event`.
* :meth:`Algorithm.apply`: performs an in-place modification of the given
:class:`State`
For example, a simple algorithm that shortens training:
.. code-block:: python
from composer import Algorithm, State, Event, Logger
class ShortenTraining(Algorithm):
def match(self, state: State, event: Event, logger: Logger) -> bool:
return event == Event.INIT
def apply(self, state: State, event: Event, logger: Logger):
state.max_duration /= 2 # cut training time in half
For more information about events, see :class:`.Event`.
"""
from composer.algorithms.alibi import Alibi
from composer.algorithms.augmix import AugmentAndMixTransform, AugMix
from composer.algorithms.blurpool import BlurPool
from composer.algorithms.channels_last import ChannelsLast
from composer.algorithms.colout import ColOut, ColOutTransform
from composer.algorithms.cutmix import CutMix
from composer.algorithms.cutout import CutOut
from composer.algorithms.ema import EMA
from composer.algorithms.factorize import Factorize
from composer.algorithms.fused_layernorm import FusedLayerNorm
from composer.algorithms.gated_linear_units import GatedLinearUnits
from composer.algorithms.ghost_batchnorm import GhostBatchNorm
from composer.algorithms.gradient_clipping import GradientClipping
from composer.algorithms.gyro_dropout import GyroDropout
from composer.algorithms.label_smoothing import LabelSmoothing
from composer.algorithms.layer_freezing import LayerFreezing
from composer.algorithms.low_precision_groupnorm import LowPrecisionGroupNorm
from composer.algorithms.low_precision_layernorm import LowPrecisionLayerNorm
from composer.algorithms.mixup import MixUp
from composer.algorithms.no_op_model import NoOpModel
from composer.algorithms.progressive_resizing import ProgressiveResizing
from composer.algorithms.randaugment import RandAugment, RandAugmentTransform
from composer.algorithms.sam import SAM
from composer.algorithms.selective_backprop import SelectiveBackprop
from composer.algorithms.seq_length_warmup import SeqLengthWarmup
from composer.algorithms.squeeze_excite import SqueezeExcite, SqueezeExcite2d, SqueezeExciteConv2d
from composer.algorithms.stochastic_depth import StochasticDepth
from composer.algorithms.swa import SWA
from composer.algorithms.weight_standardization import WeightStandardization
__all__ = [
'Alibi',
'AugmentAndMixTransform',
'AugMix',
'BlurPool',
'ChannelsLast',
'ColOut',
'ColOutTransform',
'CutMix',
'CutOut',
'EMA',
'Factorize',
'FusedLayerNorm',
'GatedLinearUnits',
'GhostBatchNorm',
'GradientClipping',
'LabelSmoothing',
'LayerFreezing',
'LowPrecisionLayerNorm',
'LowPrecisionGroupNorm',
'MixUp',
'NoOpModel',
'ProgressiveResizing',
'RandAugment',
'RandAugmentTransform',
'SAM',
'SelectiveBackprop',
'SeqLengthWarmup',
'SqueezeExcite',
'SqueezeExcite2d',
'SqueezeExciteConv2d',
'StochasticDepth',
'SWA',
'WeightStandardization',
'GyroDropout',
]
| composer-dev | composer/algorithms/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core Exponential Moving Average (EMA) classes and functions."""
from __future__ import annotations
import itertools
import logging
from typing import Any, Dict, Optional, Union
import torch
from composer.callbacks.checkpoint_saver import CheckpointSaver
from composer.core import Algorithm, Event, State, Time, TimeUnit
from composer.loggers import Logger
log = logging.getLogger(__name__)
__all__ = ['EMA', 'compute_ema']
def compute_ema(model: torch.nn.Module,
ema_model: Union[torch.nn.Module, EMAParameters],
smoothing: float = 0.99) -> None:
r"""Updates the weights of ``ema_model`` to be closer to the weights of ``model``
according to an exponential weighted average. Weights are updated according to
.. math::
W_{ema_model}^{(t+1)} = smoothing\times W_{ema_model}^{(t)}+(1-smoothing)\times W_{model}^{(t)}
The update to ``ema_model`` happens in place.
The half life of the weights for terms in the average is given by
.. math::
t_{1/2} = -\frac{\log(2)}{\log(smoothing)}
Therefore, to set smoothing to obtain a target half life, set smoothing according to
.. math::
smoothing = \exp\left[-\frac{\log(2)}{t_{1/2}}\right]
Args:
model (torch.nn.Module): the model containing the latest weights to use to update the moving average weights.
ema_model (torch.nn.Module, EMAParameters): the model containing the moving average weights to be updated.
smoothing (float, optional): the coefficient representing the degree to which older observations are kept.
Must be in the interval :math:`(0, 1)`. Default: ``0.99``.
Example:
.. testcode::
import composer.functional as cf
from torchvision import models
model = models.resnet50()
ema_model = models.resnet50()
cf.compute_ema(model, ema_model, smoothing=0.9)
"""
with torch.no_grad():
# If the ema model is a pytorch module, can just use the state_dict
if isinstance(ema_model, torch.nn.Module):
ema_params = ema_model.state_dict()
for name, param in itertools.chain(model.named_parameters(), model.named_buffers()):
if name in ema_params:
ema_params[name].copy_(ema_params[name] * smoothing + param.data * (1. - smoothing))
# Otherwise, the ema model needs to define the named_parameters and named_buffers dictionaries
# These should contain the parameters and buffers to average.
elif isinstance(ema_model, EMAParameters):
ema_parameters = ema_model.named_parameters_dict
ema_buffers = ema_model.named_buffers_dict
for name, param in itertools.chain(model.named_parameters(), model.named_buffers()):
if name in ema_parameters:
ema_parameters[name].copy_(ema_parameters[name] * smoothing + param.data * (1. - smoothing))
if name in ema_buffers:
ema_buffers[name].copy_(ema_buffers[name] * smoothing + param.data * (1. - smoothing))
else:
raise ValueError('ema_model must be a torch.nn.Module or EMAParameters')
class EMA(Algorithm):
r"""Maintains a set of weights that follow the exponential moving average of the training model weights.
Weights are updated according to
.. math::
W_{ema_model}^{(t+1)} = smoothing\times W_{ema_model}^{(t)}+(1-smoothing)\times W_{model}^{(t)}
Where the smoothing is determined from ``half_life`` according to
.. math::
smoothing = \exp\left[-\frac{\log(2)}{t_{1/2}}\right]
Model evaluation is done with the moving average weights, which can result in better generalization. Because of the
ema weights, EMA can double the model's memory consumption. Note that this does not mean that the total memory
required doubles, since stored activations and the optimizer state are not duplicated. EMA also uses a small
amount of extra compute to update the moving average weights.
See the :doc:`Method Card </method_cards/ema>` for more details.
Args:
half_life (str, optional): The time string specifying the half life for terms in the average. A longer half
life means old information is remembered longer, a shorter half life means old information is discared
sooner. A half life of ``0`` means no averaging is done, an infinite half life means no update is done.
Currently only units of epoch ('ep') and batch ('ba'). Time must be an integer value in the units
specified. Cannot be used if ``smoothing`` is also specified. Default: ``"1000ba"``.
smoothing (float, optional): The coefficient representing the degree to which older observations are kept.
Must be in the interval :math:`(0, 1)`. Cannot be used if ``half_life`` also specified. This value will
not be adjusted if ``update_interval`` is changed. Default: ``None``.
ema_start (str, optional): The time string denoting the amount of training completed before EMA begins.
Currently only units of duration ('dur'), batch ('ba') and epoch ('ep') are supported.
Default: ``'0.0dur'``.
update_interval (str, optional): The time string specifying the period at which updates are done. For example,
an ``update_interval='1ep'`` means updates are done every epoch, while ``update_interval='10ba'`` means
updates are done once every ten batches. Units must match the units used to specify ``half_life`` if not
using ``smoothing``. If not specified, ``update_interval`` will default to ``1`` in the units of
``half_life``, or ``"1ba"`` if ``smoothing`` is specified. Time must be an integer value in the units
specified. Default: ``None``.
Example:
.. testcode::
from composer.algorithms import EMA
algorithm = EMA(half_life='1000ba', update_interval='1ba')
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(self,
half_life: Optional[str] = '1000ba',
smoothing: Optional[float] = None,
ema_start: str = '0.0dur',
update_interval: Optional[str] = None):
self.ema_model = None
self.ema_weights_active = False
self.ema_started = False
self.serialized_attributes = ['ema_model', 'ema_weights_active', 'ema_started']
# Verify that either half_life or smoothing has been specified
if half_life is None and smoothing is None:
raise ValueError(f'Either half_life or smoothing must be specified')
# Verify that only one of half_life or smoothing has been specified
if half_life is not None and smoothing is not None:
raise ValueError(f'Only one of half_life or smoothing can be specified')
# Check timestrings are parsable and convert into time object
if half_life is not None:
self.half_life = Time.from_timestring(half_life)
# Convert start time to a time object
self.ema_start = Time.from_timestring(ema_start)
# Create the update interval if none is specified
if update_interval is None:
if self.half_life:
self.update_interval = Time(1, self.half_life.unit)
else:
self.update_interval = Time(1, TimeUnit.BATCH)
elif type(update_interval) is str:
self.update_interval = Time.from_timestring(update_interval)
else:
raise ValueError(f'update_interval must be None or a time string.')
# Verify that the units of half_life and update_interval are compatible if necessary
if half_life is not None and self.half_life.unit != self.update_interval.unit:
raise ValueError(f'Units of half_life and update_interval must match.')
# Verify that the time strings have supported units.
if self.update_interval.unit not in [TimeUnit.BATCH, TimeUnit.EPOCH]:
raise ValueError(f'Invalid time unit for parameter update_interval: '
f'{self.update_interval.unit}')
# Calculate the appropriate weighting for the moving average
if smoothing is None and self.half_life:
self.smoothing = 2**(-(self.update_interval.value / self.half_life.value))
else:
self.smoothing = smoothing
# Construct the appropriate matching events
self.move_device_events = [Event.EVAL_START, Event.FIT_START, Event.PREDICT_START]
self.move_param_events = [Event.BATCH_START, Event.EVAL_START, Event.EVAL_END]
self.checkpoint_events = [Event.BATCH_CHECKPOINT, Event.EPOCH_CHECKPOINT]
if self.update_interval.unit == TimeUnit.BATCH:
self.update_event = Event.BATCH_END
elif self.update_interval.unit == TimeUnit.EPOCH:
self.update_event = Event.EPOCH_END
def _should_start(self, state: State) -> bool:
if self.ema_start.unit == TimeUnit.DURATION:
current_time = state.get_elapsed_duration()
if current_time is not None:
should_start = (self.ema_start <= current_time)
else:
should_start = False
else:
current_time = state.timestamp.get(self.ema_start.unit).value
should_start = (self.ema_start.value <= current_time)
return should_start
def _ensure_training_weights_active(self, state: State):
if self.ema_weights_active is True and self.ema_model is not None:
self.ema_model.swap_params(model=state.model)
self.ema_weights_active = False
def _ensure_ema_weights_active(self, state: State):
if self.ema_weights_active is False and self.ema_model is not None:
self.ema_model.swap_params(model=state.model)
self.ema_weights_active = True
def match(self, event: Event, state: State) -> bool:
# Always run on init
if event == Event.INIT:
return True
# Check if ema should start running, and if so reinitialize models
if event == self.update_event and self.ema_started is False and self._should_start(state):
self.ema_model = EMAParameters(state.model)
self.ema_started = True
# Match on checkpointing events if a checkpoint is to be saved
if event in [Event.BATCH_CHECKPOINT, Event.EPOCH_CHECKPOINT] and self.ema_started:
checkpoint_savers = [cb for cb in state.callbacks if isinstance(cb, CheckpointSaver)]
for checkpoint_saver in checkpoint_savers:
if checkpoint_saver.save_interval(state, event) is True:
return True
# Otherwise, always run on events where ema params must be moved after ema has started
if event in self.move_param_events and self.ema_started:
return True
# Run on events where ema params must be moved to the correct device
if event in self.move_device_events and self.ema_started:
return True
# Conditionally run on the update event if ema has started
if event == self.update_event and self.ema_started:
return (state.timestamp.get(self.update_interval.unit).value % self.update_interval.value == 0)
return False
def apply(self, event: Event, state: State, logger: Logger) -> None:
assert isinstance(self.update_interval, Time)
assert isinstance(self.smoothing, float)
if event == Event.INIT:
# Create the models so that the checkpoints can be loaded
self.ema_model = EMAParameters(state.model)
assert self.ema_model is not None
if event == Event.FIT_START or event == Event.PREDICT_START:
# Ensure that params are on the right device if a checkpoint has been loaded
self.ema_model.move_params_to_device(destination_model=state.model)
if event == Event.BATCH_START and self.ema_weights_active:
# Ensure the model being trained has the correct weights
self._ensure_training_weights_active(state)
if event in [Event.BATCH_END, Event.EPOCH_END]:
# Update the ema model
compute_ema(state.model, self.ema_model, smoothing=self.smoothing)
if event == Event.EVAL_START:
# Verify that the ema params are on the correct device.
# Needed to ensure doing eval before training can resume correctly.
self.ema_model.move_params_to_device(destination_model=state.model)
# Swap out the training model for the ema model in state
self._ensure_ema_weights_active(state)
if event == Event.EVAL_END:
# Swap out the ema model for the training model in state
self._ensure_training_weights_active(state)
if event in self.checkpoint_events:
# Swap the training model out for the ema model for checkpointing
self._ensure_ema_weights_active(state)
def state_dict(self) -> Dict[str, Any]:
state_dict = super().state_dict()
for attribute_name in self.serialized_attributes:
if attribute_name == 'ema_model':
ema_model = getattr(self, attribute_name)
state_dict[attribute_name] = {}
state_dict[attribute_name]['named_parameters_dict'] = ema_model.named_parameters_dict
state_dict[attribute_name]['named_buffers_dict'] = ema_model.named_buffers_dict
else:
state_dict[attribute_name] = getattr(self, attribute_name)
return state_dict
def ensure_compatible_state_dict(self, state: Dict[str, Any]):
"""Ensure state dicts created prior to Composer 0.13.0 are compatible with later versions."""
# Version 0.13.0 and later state dicts will not include training_model.
if 'training_model' not in state:
return state
# Prior to version 0.13.0, the state dict contained a separate training_model and ema_model.
# Only one of these needs to be loaded as the ema_model.
if state['ema_weights_active'] is True:
# If EMA weights are active, load training weights into the ema_model storage
state_dict = state['training_model']
else:
# If EMA weights are not active, load the ema weights into the ema_model storage
state_dict = state['ema_model']
named_parameters_dict = {}
named_buffers_dict = {}
# Rewrite the state dict in the newer format.
if isinstance(self.ema_model, EMAParameters):
for key in self.ema_model.named_parameters_dict.keys():
if key in state_dict:
named_parameters_dict[key] = state_dict[key]
for key in self.ema_model.named_buffers_dict.keys():
if key in state_dict:
named_buffers_dict[key] = state_dict[key]
else:
ValueError(f'ema_model must be initialized before loading state dicts from versions earlier than 0.13.0')
# Update the state dict with the new format
del state['training_model']
state['ema_model'] = {}
state['ema_model']['named_parameters_dict'] = named_parameters_dict
state['ema_model']['named_buffers_dict'] = named_buffers_dict
return state
def load_state_dict(self, state: Dict[str, Any], strict: bool = False):
state_dict = self.ensure_compatible_state_dict(state)
for attribute_name, serialized_value in state_dict.items():
if attribute_name != 'repr': # skip attribute added by parent class
if attribute_name == 'ema_model':
self.ema_model = EMAParameters(None)
self.ema_model.named_parameters_dict = serialized_value['named_parameters_dict']
self.ema_model.named_buffers_dict = serialized_value['named_buffers_dict']
else:
setattr(self, attribute_name, serialized_value)
def get_ema_model(self, model: torch.nn.Module) -> torch.nn.Module:
"""Replaces the parameters of the supplied model with the ema parameters if they are not already active.
Args:
model (torch.nn.Module): The model to replace the parameters of.
Returns:
torch.nn.Module: The model with the ema parameters.
"""
assert self.ema_model is not None
# Ensure that self.ema_model contains the ema weights. If not raise an error.
if self.ema_weights_active == True:
raise ValueError('The ema weight are currently contained in the composer model.')
self.ema_model.transfer_ema_params(model=model)
return model
def get_training_model(self, model: torch.nn.Module) -> torch.nn.Module:
"""Replaces the parameters of the supplied model with the training parameters if they are not already active.
Args:
model (torch.nn.Module): The model to replace the parameters of.
Returns:
torch.nn.Module: The model with the training parameters.
"""
assert self.ema_model is not None
# Ensure that self.ema_model contains the training weights. If not raise an error.
if self.ema_weights_active == False:
raise ValueError('The training weights are currently contained in the composer model.')
self.ema_model.transfer_ema_params(model=model)
return model
class EMAParameters:
"""A class that stores the parameters and buffers of a model needed for averaging."""
def __init__(self, model: Union[None, torch.nn.Module]):
if model is not None:
# Copy the trainable parameters and buffers.
self.named_parameters_dict = {
name: param.data.clone() for name, param in model.named_parameters() if param.requires_grad
}
self.named_buffers_dict = {name: buffer.data.clone() for name, buffer in model.named_buffers()}
else:
# Empty storage
self.named_parameters_dict = {}
self.named_buffers_dict = {}
def named_parameters(self):
return self.named_parameters_dict.items()
def named_buffers(self):
return self.named_buffers_dict.items()
def swap_params(self, model: torch.nn.Module):
"""Swaps the parameters and buffers of a model with the ema parameters."""
with torch.no_grad():
ema_params = self.named_parameters_dict
ema_buffers = self.named_buffers_dict
for name, param in model.named_parameters():
if name in ema_params:
# Use copy instead of raw data access (eg .data) doesn't work with FSDP
dummy_param = param.clone()
param.copy_(ema_params[name])
ema_params[name].copy_(dummy_param)
for name, buffer in model.named_buffers():
if name in ema_buffers:
# Use copy instead of raw data access (eg .data) doesn't work with FSDP
dummy_buffer = buffer.clone()
buffer.copy_(ema_buffers[name])
ema_buffers[name].copy_(dummy_buffer)
def transfer_ema_params(self, model: torch.nn.Module):
"""Transfers the parameters and buffers from the ema model to the supplied model."""
with torch.no_grad():
for name, param in model.named_parameters():
if name in self.named_parameters_dict:
param.copy_(self.named_parameters_dict[name])
for name, buffer in model.named_buffers():
if name in self.named_buffers_dict:
buffer.copy_(self.named_buffers_dict[name])
def move_params_to_device(self, destination_model: torch.nn.Module):
"""Moves the ema parameters and buffers to the device of a destination model."""
for name, param in destination_model.named_parameters():
if name in self.named_parameters_dict:
self.named_parameters_dict[name] = self.named_parameters_dict[name].to(param.device)
for name, buffer in destination_model.named_buffers():
if name in self.named_buffers_dict:
self.named_buffers_dict[name] = self.named_buffers_dict[name].to(buffer.device)
| composer-dev | composer/algorithms/ema/ema.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Exponential moving average maintains a moving average of model parameters and uses these at test time.
See the :doc:`Method Card </method_cards/ema>` for more details.
"""
from composer.algorithms.ema.ema import EMA as EMA
from composer.algorithms.ema.ema import compute_ema as compute_ema
__all__ = ['EMA', 'compute_ema']
| composer-dev | composer/algorithms/ema/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Low Precision GroupNorm."""
from __future__ import annotations
import logging
import warnings
from typing import Dict, Optional, Sequence, Type, Union
import torch
import torch.nn.functional as F
from torch.optim import Optimizer
from composer.algorithms.warnings import NoEffectWarning
from composer.core import Algorithm, Event, Precision, State
from composer.loggers import Logger
from composer.utils import module_surgery
log = logging.getLogger(__name__)
def apply_low_precision_groupnorm(model,
precision: Optional[Precision] = None,
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None):
if (precision != Precision.AMP_FP16 and precision != Precision.AMP_BF16):
warnings.warn(NoEffectWarning('Low Precision GroupNorm only applies to AMP_FP16 and AMP_BF16 precisions.'))
return model
policy: Dict[Type[torch.nn.Module], module_surgery.ReplacementFunction] = {torch.nn.GroupNorm: _to_LPGroupNorm}
replaced_instances = module_surgery.replace_module_classes(module=model, optimizers=optimizers, policies=policy)
if len(replaced_instances) == 0:
warnings.warn(NoEffectWarning('No instances of torch.nn.GroupNorm found.'))
log.info(f'Successfully replaced {len(replaced_instances)} instances of GroupNorm with LowPrecisionGroupNorm')
class LowPrecisionGroupNorm(Algorithm):
"""
Replaces all instances of :class:`torch.nn.GroupNorm` with class:`.LPGroupNorm`.
LPGroupNorm is a thin wrapper around :class:`torch.nn.GroupNorm` which forces the layer to run
in lower precision (torch.float16 or torch.bfloat16) if autocast is enabled. This algorithm has
no effect in FP32 or DeepSpeed FP16 mode, where autocast is disabled.
This algorithm is intended to be used instead of Fused GroupNorm. They have similar behavior and performance.
Args:
apply_at (Event): Event where algorithm is applied.
"""
def __init__(self, apply_at: Event = Event.INIT):
self.apply_at = apply_at
if self.apply_at not in {Event.INIT, Event.AFTER_LOAD}:
raise ValueError('LowPrecisionGroupNorm only supports application on Event.INIT and Event.AFTER_LOAD.')
def __repr__(self) -> str:
return f'{self.__class__.__name__}(apply_at={self.apply_at})'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
del state # unused
return event == self.apply_at
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
del event, logger # unused
apply_low_precision_groupnorm(model=state.model, optimizers=state.optimizers, precision=state._precision)
class LPGroupNorm(torch.nn.GroupNorm):
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True, device=None, dtype=None):
super().__init__(
num_groups=num_groups,
num_channels=num_channels,
eps=eps,
affine=affine,
device=device,
dtype=dtype,
)
def forward(self, x):
module_device = x.device
downcast_x = _cast_if_autocast_enabled(x)
downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
downcast_bias = _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
with torch.autocast(enabled=False, device_type=module_device.type):
return F.group_norm(downcast_x, self.num_groups, downcast_weight, downcast_bias, self.eps)
def _cast_if_autocast_enabled(tensor):
if torch.is_autocast_enabled():
if tensor.device.type == 'cuda':
dtype = torch.get_autocast_gpu_dtype()
elif tensor.device.type == 'cpu':
dtype = torch.get_autocast_cpu_dtype()
else:
raise NotImplementedError()
return tensor.to(dtype=dtype)
return tensor
def _to_LPGroupNorm(layer: torch.nn.Module, module_index: int) -> LPGroupNorm:
"""Defines a replacement policy from a `torch.nn.GroupNorm` to a `LPGroupNorm`"""
if not isinstance(layer, torch.nn.GroupNorm):
raise TypeError(f'Expected torch.nn.GroupNorm, got {type(layer)}')
lp_groupnorm = LPGroupNorm(layer.num_groups, layer.num_channels, layer.eps, layer.affine)
with torch.no_grad():
if layer.weight is None:
lp_groupnorm.register_parameter('weight', None)
else:
lp_groupnorm.weight.copy_(layer.weight) # type: ignore
if layer.bias is None:
lp_groupnorm.register_parameter('bias', None)
else:
lp_groupnorm.bias.copy_(layer.bias) # type: ignore
return lp_groupnorm
| composer-dev | composer/algorithms/low_precision_groupnorm/low_precision_groupnorm.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Replaces all instances of :class:`torch.nn.GroupNorm` with a low precision :class:`torch.nn.GroupNorm` (either float16 or bfloat16).
By default, torch.autocast always runs torch.nn.GroupNorm in float32, so this surgery forces a lower precision.
"""
from composer.algorithms.low_precision_groupnorm.low_precision_groupnorm import (LowPrecisionGroupNorm,
apply_low_precision_groupnorm)
__all__ = ['LowPrecisionGroupNorm', 'apply_low_precision_groupnorm']
| composer-dev | composer/algorithms/low_precision_groupnorm/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core CutOut classes and functions."""
from __future__ import annotations
import logging
from typing import Any, Callable, Optional, TypeVar, Union
import numpy as np
import torch
from PIL.Image import Image as PillowImage
from torch import Tensor
from composer.algorithms.utils.augmentation_common import image_as_type
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
log = logging.getLogger(__name__)
__all__ = ['CutOut', 'cutout_batch']
ImgT = TypeVar('ImgT', torch.Tensor, PillowImage)
def cutout_batch(input: ImgT, num_holes: int = 1, length: float = 0.5, uniform_sampling: bool = False) -> ImgT:
"""See :class:`.CutOut`.
Args:
input (PIL.Image.Image | torch.Tensor): Image or batch of images. If
a :class:`torch.Tensor`, must be a single image of shape ``(C, H, W)``
or a batch of images of shape ``(N, C, H, W)``.
num_holes: Integer number of holes to cut out. Default: ``1``.
length (float, optional): Relative side length of the masked region.
If specified, ``length`` is interpreted as a fraction of ``H`` and
``W``, and the resulting box is a square with side length
``length * min(H, W)``. Must be in the interval :math:`(0, 1)`.
Default: ``0.5``.
uniform_sampling (bool, optional): If ``True``, sample the bounding
box such that each pixel has an equal probability of being masked.
If ``False``, defaults to the sampling used in the original paper
implementation. Default: ``False``.
Returns:
X_cutout: Batch of images with ``num_holes`` square holes with
dimension determined by ``length`` replaced with zeros.
Example:
.. testcode::
from composer.algorithms.cutout import cutout_batch
new_input_batch = cutout_batch(X_example, num_holes=1, length=0.25)
"""
X_tensor = image_as_type(input, torch.Tensor)
h = X_tensor.shape[-2]
w = X_tensor.shape[-1]
length = int(min(h, w) * length)
mask = torch.ones_like(X_tensor)
for _ in range(num_holes):
if uniform_sampling is True:
y = np.random.randint(-length // 2, high=h + length // 2)
x = np.random.randint(-length // 2, high=w + length // 2)
else:
y = np.random.randint(h)
x = np.random.randint(w)
mask = _generate_mask(mask, w, h, x, y, length)
X_cutout = X_tensor * mask
X_out = image_as_type(X_cutout, input.__class__) # pyright struggling with unions
return X_out
class CutOut(Algorithm):
"""`CutOut <https://arxiv.org/abs/1708.04552>`_ is a data augmentation technique
that works by masking out one or more square regions of an input image.
This implementation cuts out the same square from all images in a batch.
Example:
.. testcode::
from composer.algorithms import CutOut
from composer.trainer import Trainer
cutout_algorithm = CutOut(num_holes=1, length=0.25)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[cutout_algorithm],
optimizers=[optimizer]
)
Args:
num_holes (int, optional): Integer number of holes to cut out.
Default: ``1``.
length (float, optional): Relative side length of the masked region.
If specified, ``length`` is interpreted as a fraction of ``H`` and
``W``, and the resulting box is a square with side length
``length * min(H, W)``. Must be in the interval :math:`(0, 1)`.
Default: ``0.5``.
uniform_sampling (bool, optional): If ``True``, sample the bounding
box such that each pixel has an equal probability of being masked.
If ``False``, defaults to the sampling used in the original paper
implementation. Default: ``False``.
input_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the input
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 0, which corresponds to any sequence, where the first element
is the input. Default: ``0``.
"""
def __init__(self,
num_holes: int = 1,
length: float = 0.5,
uniform_sampling: bool = False,
input_key: Union[str, int, Callable, Any] = 0):
self.num_holes = num_holes
self.length = length
self.uniform_sampling = uniform_sampling
self.input_key = input_key
def match(self, event: Event, state: State) -> bool:
return event == Event.AFTER_DATALOADER
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
x = state.batch_get_item(self.input_key)
assert isinstance(x, Tensor), 'Multiple tensors not supported for Cutout.'
new_x = cutout_batch(x, num_holes=self.num_holes, length=self.length, uniform_sampling=self.uniform_sampling)
state.batch_set_item(self.input_key, new_x)
def _generate_mask(mask: Tensor, width: int, height: int, x: int, y: int, cutout_length: int) -> Tensor:
y1 = np.clip(y - cutout_length // 2, 0, height)
y2 = np.clip(y + cutout_length // 2, 0, height)
x1 = np.clip(x - cutout_length // 2, 0, width)
x2 = np.clip(x + cutout_length // 2, 0, width)
mask[..., y1:y2, x1:x2] = 0.
return mask
| composer-dev | composer/algorithms/cutout/cutout.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""`Cutout <https://arxiv.org/abs/1708.04552>`_ is a data augmentation technique that works by masking out one or more
square regions of an input image.
See the :doc:`Method Card </method_cards/cutout>` for more details.
"""
from composer.algorithms.cutout.cutout import CutOut as CutOut
from composer.algorithms.cutout.cutout import cutout_batch as cutout_batch
__all__ = ['CutOut', 'cutout_batch']
| composer-dev | composer/algorithms/cutout/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Applies Fastai's `progressive resizing <https://github.com/fastai/fastbook/blob/780b76bef3127ce5b64f8230fce60e915a
7e0735/07_sizing_and_tta.ipynb>`__ data augmentation to speed up training.
Progressive resizing initially reduces input resolution to speed up early training. Throughout training, the
downsampling factor is gradually increased, yielding larger inputs up to the original input size. A final finetuning
period is then run to finetune the model using the full-sized inputs.
See the :doc:`Method Card </method_cards/progressive_resizing>` for more details.
"""
from composer.algorithms.progressive_resizing.progressive_resizing import ProgressiveResizing as ProgressiveResizing
from composer.algorithms.progressive_resizing.progressive_resizing import resize_batch as resize_batch
__all__ = ['ProgressiveResizing', 'resize_batch']
| composer-dev | composer/algorithms/progressive_resizing/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core Progressive Resizing classes and functions."""
from __future__ import annotations
import logging
import textwrap
from functools import partial
from typing import Any, Callable, Optional, Tuple, Union
import torch
import torch.nn.functional as F
import torchvision.transforms.functional
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.loss.utils import check_for_index_targets
log = logging.getLogger(__name__)
_VALID_MODES = ('crop', 'resize')
T_ResizeTransform = Callable[[torch.Tensor], torch.Tensor]
__all__ = ['resize_batch', 'ProgressiveResizing']
def resize_batch(input: torch.Tensor,
target: torch.Tensor,
scale_factor: float,
mode: str = 'resize',
resize_targets: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:
"""Resize inputs and optionally outputs by cropping or interpolating.
Args:
input (torch.Tensor): input tensor of shape ``(N, C, H, W)``.
Resizing will be done along dimensions H and W using the constant
factor ``scale_factor``.
target (torch.Tensor): output tensor of shape ``(N, H, W)`` or
``(N, C, H, W)`` that will also be resized if ``resize_targets``
is ``True``,
scale_factor (float): scaling coefficient for the height and width of the
input/output tensor. 1.0 keeps the original size.
mode (str, optional): type of scaling to perform. Value must be one of ``'crop'`` or
``'resize'``. ``'crop'`` performs a random crop, whereas ``'resize'``
performs a nearest neighbor interpolation. Default: ``"resize"``.
resize_targets (bool, optional): whether to resize the targets, ``y``. Default: ``False``.
Returns:
X_sized: resized input tensor of shape ``(N, C, H * scale_factor, W * scale_factor)``.
y_sized: if ``resized_targets`` is ``True``, resized output tensor
of shape ``(N, H * scale_factor, W * scale_factor)`` or ``(N, C, H * scale_factor, W * scale_factor)``.
Depending on the input ``y``. Otherwise returns original ``y``.
Example:
.. testcode::
from composer.algorithms.progressive_resizing import resize_batch
X_resized, y_resized = resize_batch(X_example,
y_example,
scale_factor=0.5,
mode='resize',
resize_targets=False)
"""
# Verify dimensionalities are enough to support resizing
assert input.dim() > 2, 'Input dimensionality not large enough for resizing'
if resize_targets is True:
assert target.dim() > 2, 'Target dimensionality not large enough for resizing'
# Short-circuit if nothing should be done
if scale_factor >= 1:
return input, target
# Prep targets for resizing if necessary
if check_for_index_targets(target) and resize_targets is True:
# Add a dimension to match shape of the input and change type for resizing
y_sized = target.float().unsqueeze(1)
else:
y_sized = target
if mode.lower() == 'crop' and resize_targets is False:
# Make a crop transform for X
resize_transform = _make_crop(tensor=input, scale_factor=scale_factor)
X_sized, y_sized = resize_transform(input), target
elif mode.lower() == 'crop' and resize_targets is True:
# Make a crop transform for X and y
resize_transform, resize_y = _make_crop_pair(X=input, y=y_sized, scale_factor=scale_factor)
X_sized, y_sized = resize_transform(input), resize_y(y_sized)
elif mode.lower() == 'resize':
# Make a resize transform (can be used for X or y)
resize_transform = _make_resize(scale_factor=scale_factor)
X_sized = resize_transform(input)
if resize_targets:
y_sized = resize_transform(y_sized)
else:
raise ValueError(f"Progressive mode '{mode}' not supported.")
# Revert targets to their original format if they were modified
if check_for_index_targets(target) and resize_targets is True:
# Convert back to original format for training
y_sized = y_sized.squeeze(dim=1).to(target.dtype)
# Log results
log.debug(
textwrap.dedent(f"""\
Applied Progressive Resizing with scale_factor={scale_factor} and mode={mode}.
Old input dimensions: (H,W)={input.shape[2], input.shape[3]}.
New input dimensions: (H,W)={X_sized.shape[2], X_sized.shape[2]}"""))
return X_sized, y_sized
class ProgressiveResizing(Algorithm):
r"""Resize inputs and optionally outputs by cropping or interpolating.
Apply Fastai's `progressive resizing <https://\
github.com/fastai/fastbook/blob/780b76bef3127ce5b64f8230fce60e915a7e0735/07_sizing_and_tta.ipynb>`__ data
augmentation to speed up training.
Progressive resizing initially reduces input resolution to speed up early training.
Throughout training, the downsampling factor is gradually increased, yielding larger inputs
up to the original input size. A final finetuning period is then run to finetune the
model using the full-sized inputs.
Example:
.. testcode::
from composer.algorithms import ProgressiveResizing
from composer.trainer import Trainer
progressive_resizing_algorithm = ProgressiveResizing(
mode='resize',
initial_scale=1.0,
finetune_fraction=0.2,
delay_fraction=0.2,
size_increment=32,
resize_targets=False
)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[progressive_resizing_algorithm],
optimizers=[optimizer]
)
Args:
mode (str, optional): Type of scaling to perform. Value must be one of ``'crop'`` or ``'resize'``.
``'crop'`` performs a random crop, whereas ``'resize'`` performs a bilinear
interpolation. Default: ``'resize'``.
initial_scale (float, optional): Initial scale factor used to shrink the inputs. Must be a
value in between 0 and 1. Default: ``0.5``.
finetune_fraction (float, optional): Fraction of training to reserve for finetuning on the
full-sized inputs. Must be a value in between 0 and 1. Default: ``0.2``.
delay_fraction (float, optional): Fraction of training before resizing ramp begins.
Must be a value in between 0 and 1. Default: ``0.5``.
size_increment (int, optional): Align sizes to a multiple of this number. Default: ``4``.
resize_targets (bool, optional): If True, resize targets also. Default: ``False``.
input_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the input
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 0, which corresponds to any sequence, where the first element
is the input. Default: ``0``.
target_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the target
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 1, which corresponds to any sequence, where the second element
is the target. Default: ``1``.
"""
def __init__(
self,
mode: str = 'resize',
initial_scale: float = .5,
finetune_fraction: float = .2,
delay_fraction: float = .5,
size_increment: int = 4,
resize_targets: bool = False,
input_key: Union[str, int, Tuple[Callable, Callable], Any] = 0,
target_key: Union[str, int, Tuple[Callable, Callable], Any] = 1,
):
if mode not in _VALID_MODES:
raise ValueError(f"mode '{mode}' is not supported. Must be one of {_VALID_MODES}")
if not (0 <= initial_scale <= 1):
raise ValueError(f'initial_scale must be between 0 and 1: {initial_scale}')
if not (0 <= finetune_fraction <= 1):
raise ValueError(f'finetune_fraction must be between 0 and 1: {finetune_fraction}')
if not (delay_fraction + finetune_fraction <= 1):
raise ValueError(
f'delay_fraction + finetune_fraction must be less than 1: {delay_fraction + finetune_fraction}')
self.mode = mode
self.initial_scale = initial_scale
self.finetune_fraction = finetune_fraction
self.delay_fraction = delay_fraction
self.size_increment = size_increment
self.resize_targets = resize_targets
self.input_key, self.target_key = input_key, target_key
def match(self, event: Event, state: State) -> bool:
return event == Event.AFTER_DATALOADER
def apply(self, event: Event, state: State, logger: Optional[Logger] = None) -> None:
input, target = state.batch_get_item(key=self.input_key), state.batch_get_item(key=self.target_key)
assert isinstance(input, torch.Tensor) and isinstance(target, torch.Tensor), \
'Multiple tensors not supported for this method yet.'
# Calculate the current size of the inputs to use
elapsed_duration = state.get_elapsed_duration()
assert elapsed_duration is not None, 'elapsed duration should be set on Event.AFTER_DATALOADER'
if elapsed_duration.value >= self.delay_fraction:
scale_frac_elapsed = min([
(elapsed_duration.value - self.delay_fraction) / (1 - self.finetune_fraction - self.delay_fraction), 1
])
else:
scale_frac_elapsed = 0.0
# Linearly increase to full size at the start of the fine tuning period
scale_factor = self.initial_scale + (1 - self.initial_scale) * scale_frac_elapsed
# adjust scale factor so that we make width a multiple of size_increment
width = input.shape[3]
scaled_width_pinned = round(width * scale_factor / self.size_increment) * self.size_increment
scale_factor_pinned = scaled_width_pinned / width
new_input, new_target = resize_batch(input=input,
target=target,
scale_factor=scale_factor_pinned,
mode=self.mode,
resize_targets=self.resize_targets)
state.batch_set_item(self.input_key, new_input)
state.batch_set_item(self.target_key, new_target)
if logger is not None:
logger.log_metrics({
'progressive_resizing/height': new_input.shape[2],
'progressive_resizing/width': new_input.shape[3],
'progressive_resizing/scale_factor': scale_factor
})
def _make_crop(tensor: torch.Tensor, scale_factor: float) -> T_ResizeTransform:
"""Makes a random crop transform for an input image."""
Hc = int(scale_factor * tensor.shape[2])
Wc = int(scale_factor * tensor.shape[3])
top = torch.randint(tensor.shape[2] - Hc, size=(1,))
left = torch.randint(tensor.shape[3] - Wc, size=(1,))
resize_transform = partial(torchvision.transforms.functional.crop,
top=int(top),
left=int(left),
height=Hc,
width=Wc)
return resize_transform
def _make_crop_pair(
X: torch.Tensor,
y: torch.Tensor,
scale_factor: float,
) -> Tuple[T_ResizeTransform, T_ResizeTransform]:
"""Makes a pair of random crops for an input image ``X`` and target tensor ``y``.
The same region is selected from both.
"""
# New height and width for X
HcX = int(scale_factor * X.shape[2])
WcX = int(scale_factor * X.shape[3])
# New height and width for y
Hcy = int(scale_factor * y.shape[2])
Wcy = int(scale_factor * y.shape[3])
# Select a corner for the crop from X
topX = torch.randint(X.shape[2] - HcX, size=(1,))
leftX = torch.randint(X.shape[3] - WcX, size=(1,))
# Find the corresponding point for X
height_ratio = y.shape[2] / X.shape[2]
width_ratio = y.shape[3] / X.shape[3]
topy = int(height_ratio * topX)
lefty = int(width_ratio * leftX)
# Make the two transforms
resize_X = partial(torchvision.transforms.functional.crop, top=int(topX), left=int(leftX), height=HcX, width=WcX)
resize_y = partial(torchvision.transforms.functional.crop, top=topy, left=lefty, height=Hcy, width=Wcy)
return resize_X, resize_y
def _make_resize(scale_factor: float) -> T_ResizeTransform:
"""Makes a nearest-neighbor interpolation transform at the specified scale factor."""
resize_transform = partial(F.interpolate, scale_factor=scale_factor, mode='nearest', recompute_scale_factor=False)
return resize_transform
| composer-dev | composer/algorithms/progressive_resizing/progressive_resizing.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""ALiBi (Attention with Linear Biases; `Press et al, 2021 <https://arxiv.org/abs/2108.12409>`_) dispenses with position
embeddings for tokens in transformer-based NLP models, instead encoding position information by biasing the query-key
attention scores proportionally to each token pair's distance.
See the :doc:`Method Card </method_cards/alibi>` for more details.
"""
from composer.algorithms.alibi.alibi import Alibi, apply_alibi
__all__ = ['Alibi', 'apply_alibi']
| composer-dev | composer/algorithms/alibi/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core ALiBi classes and functions."""
from __future__ import annotations
import logging
from typing import Optional, Sequence, Union
import torch
from torch.optim import Optimizer
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import MissingConditionalImportError, module_surgery
log = logging.getLogger(__name__)
__all__ = ['Alibi', 'apply_alibi']
def apply_alibi(
model: torch.nn.Module,
max_sequence_length: int,
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None,
) -> None:
"""Removes position embeddings and replaces the attention function and attention mask
as per :class:`.Alibi`. Note that the majority of the training speed-up from using ALiBi
comes from being able to train on shorter sequence lengths; this function does not scale
the training sequence length as :class:`.Alibi` does, so little speedup will be
observed from using it alone. See the :doc:`Method Card </method_cards/alibi>` for
more details. This function should be called after the model is instantiated and
before training begins.
Example:
.. code-block:: python
import composer.functional as cf
cf.apply_alibi(
model=model,
max_sequence_length=512
)
Args:
model (torch.nn.Module): Model to transform.
max_sequence_length (int): Maximum sequence length that the
model will be able to accept. Internally, the transformations applied by alibi
change sequence-shaped tensors to handle sequences up to ``max_sequence_length``.
Depending on ``max_sequence_length`` and ``model`` these changes could increase
or decrease the model's maximum sequence length.
At minimum, ``max_sequence_length`` should be set to the sequence length used
during training. However, if evaluating on sequence lengths longer than those
used in training, ``max_sequence_length`` should be set accordingly.
Note that larger ``max_sequence_length`` means a larger memory footprint of
the model. So, it is best to set this parameter equal the longest
sequence length that will be seen during training and/or evaluation.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional):
Existing optimizers bound to ``model.parameters()``. All optimizers that have already been
constructed with ``model.parameters()`` must be specified here so
they will optimize the correct parameters.
If the optimizer(s) are constructed *after* calling this function,
then it is safe to omit this parameter. These optimizers will see the correct
model parameters.
"""
try:
from composer.algorithms.alibi.attention_surgery_functions import policy_registry
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e
# To use model surgery utilities, we need to define a policy of type
# Mapping[Type[torch.nn.Module], ReplacementFunction], where ReplacementFunction is
# Callable[[torch.nn.Module, Optional[int]], Optional[torch.nn.Module]].
#
# This mapping is built by the source code in `./attention_surgery_functions/` but
# needs to be completed here by "freezing" alibi-specific arguments.
#
# For additional details, see `./attention_surgery_functions/utils.py`.
def as_replacement_function(surgery_function):
def replacement_function(module: torch.nn.Module, module_index: int):
return surgery_function(module, module_index, max_sequence_length=max_sequence_length)
return replacement_function
# Wrap each alibi_surgery_function as a ReplacementFunction by "freezing" `max_sequence_length`
policies = {
module_class: as_replacement_function(alibi_surgery_function)
for module_class, alibi_surgery_function in policy_registry.items()
}
# Note: `policies` defines replacements for _all_ the modules registered in `policy_registry`,
# meaning that some replacements may be irrelevant for `model`.
# Conversely, attention modules within `model` may be ignored if they are not registered by the
# implementations within `./attention_surgery_functions/`.
replaced_pairs = module_surgery.replace_module_classes(model, optimizers=optimizers, policies=policies)
count = len(replaced_pairs)
if count == 0:
supported_modules = ''.join(sorted(['\n\t' + c.__module__ + '.' + c.__name__ for c in policy_registry.keys()]))
log.warning(f'ALiBi had no effect on the model! Support for ALiBi surgery '
f'is currently limited to the following classes: {supported_modules}')
else:
log.info(f' {count} instances of ALiBi added')
class Alibi(Algorithm):
"""ALiBi (Attention with Linear Biases; `Press et al, 2021 <https://arxiv.org/abs/2108.12409>`_) dispenses with
position embeddings and instead directly biases attention matrices such that nearby tokens attend to one another
more strongly.
ALiBi yields excellent extrapolation to unseen sequence lengths
compared to other position embedding schemes. We leverage this
extrapolation capability by training with shorter sequence lengths,
which reduces the memory and computation load.
This algorithm runs on :attr:`.Event.INIT` to modify the model
before the model has been moved to accelerators. It also runs on
:attr:`.Event.AFTER_DATALOADER` to modify the shape of a batch of
data after the model and data have been moved to accelerators.
See the :doc:`Method Card </method_cards/alibi>` for more details.
Example:
.. code-block::
from composer.algorithms import Alibi
from composer.trainer import Trainer
alibi = Alibi(
max_sequence_length=512,
train_sequence_length_scaling=0.25,
)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration="1ep",
algorithms=[alibi]
)
Args:
max_sequence_length (int): Maximum sequence length that the
model will be able to accept. This is sometimes necessary for evaluating
on sequence lengths longer than the model was initialized to
accommodate.
train_sequence_length_scaling (float, optional): Amount by which to scale
training sequence length. One batch of training data will be
reshaped from shape :math:`(sequence\\_length, batch)` to
:math:`(sequence\\_length \\times train\\_sequence\\_length\\_scaling,
\\frac{batch}{train\\_sequence\\_length\\_scaling})`. Default: ``0.25``.
"""
def __init__(self, max_sequence_length: int, train_sequence_length_scaling: float = 0.25) -> None:
self.max_sequence_length = max_sequence_length
self.train_sequence_length_scaling = train_sequence_length_scaling
self._applied = False
def __repr__(self) -> str:
return f'{self.__class__.__name__}(max_sequence_length={self.max_sequence_length},train_sequence_length_scaling={self.train_sequence_length_scaling})'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
return (event == Event.INIT and not self._applied) or event == Event.AFTER_DATALOADER
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
if event == Event.INIT:
apply_alibi(
state.model,
optimizers=state.optimizers,
max_sequence_length=self.max_sequence_length,
)
self._applied = True
elif event == Event.AFTER_DATALOADER:
# Change sequence length by reshaping data
if not self.train_sequence_length_scaling == 1 and \
hasattr(state, 'batch') and isinstance(state.batch, dict):
sequence_scaling = self.train_sequence_length_scaling
for k, v in state.batch.items():
batch_len, sequence_len = v.shape[0], v.shape[1]
state.batch[k] = v.reshape(int(batch_len / sequence_scaling), int(sequence_len * sequence_scaling))
| composer-dev | composer/algorithms/alibi/alibi.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import math
from types import MethodType
from typing import Optional, Tuple
import torch
from torch import nn
from transformers.models.bert.modeling_bert import BertEmbeddings, BertSelfAttention
from transformers.models.roberta.modeling_roberta import RobertaEmbeddings, RobertaSelfAttention
from composer.algorithms.alibi.attention_surgery_functions.utils import (policy_registry, register_alibi,
zero_and_freeze_expand_position_embeddings)
@policy_registry.register(BertEmbeddings, RobertaEmbeddings)
def bert_embedding_converter(module: torch.nn.Module, module_index: int, max_sequence_length: int) -> torch.nn.Module:
"""Removes positional embeddings and expands `position_ids` buffer to support `max_sequence_length` tokens.
"""
assert isinstance(module, (BertEmbeddings, RobertaEmbeddings))
del module_index # unused
zero_and_freeze_expand_position_embeddings(module,
max_sequence_length,
position_embedding_attribute='position_embeddings')
module_device = next(module.parameters()).device
module.register_buffer('position_ids', torch.arange(max_sequence_length).expand((1, -1)).to(module_device))
return module
@policy_registry.register(BertSelfAttention, RobertaSelfAttention)
def bert_attention_converter(module: torch.nn.Module, module_index: int, max_sequence_length: int) -> torch.nn.Module:
"""Adds ALiBi to Bert-style SelfAttention."""
assert isinstance(module, (BertSelfAttention, RobertaSelfAttention))
del module_index # unused
module = register_alibi(module=module,
n_heads=int(module.num_attention_heads),
max_token_length=max_sequence_length,
causal=False)
setattr(module, 'forward', MethodType(forward, module))
return module
# This code is adapted from the HuggingFace Transformers library, so we ignore any type checking issues it triggers
# pyright: reportGeneralTypeIssues = none
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
"""Replication of identically-named attention function function ("forward") in Composer/HuggingFace BERT model's
BERTSelfAttention (:func:`transformers.models.bert.modeling_bert.BERTSelfAttention.forward`), but this function
implements ALiBi and will be used to replace the default attention function."""
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
raise NotImplementedError('ALiBi is not supported for BERT with position_embedding_type: {}'.format(
self.position_embedding_type))
#### REMOVES THE FOLLOWING CODE ########
# seq_length = hidden_states.size()[1]
# position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
# position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
# distance = position_ids_l - position_ids_r
# positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
# positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
#
# if self.position_embedding_type == "relative_key":
# relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
# attention_scores = attention_scores + relative_position_scores
# elif self.position_embedding_type == "relative_key_query":
# relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
# relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
# attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
########################################
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
##### Modification for adding ALiBi #####
seq_len = attention_scores.shape[-1]
# Crop self.alibi to [1, n_heads, seq_len, seq_len]
attention_scores = attention_scores + self.alibi[:, :, :seq_len, :seq_len]
#########################################
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
| composer-dev | composer/algorithms/alibi/attention_surgery_functions/_bert.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# Import files that add functions to the `policy_registry` registry in order to actually
# register those functions.
from composer.utils import MissingConditionalImportError
try:
from composer.algorithms.alibi.attention_surgery_functions import _bert, _gpt2 # pyright: reportUnusedImport=none
from composer.algorithms.alibi.attention_surgery_functions.utils import policy_registry
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group='nlp', conda_package='transformers') from e
__all__ = ['policy_registry']
| composer-dev | composer/algorithms/alibi/attention_surgery_functions/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import inspect
import logging
import math
from operator import attrgetter
from typing import Callable, Dict, Optional, Type
import torch
log = logging.getLogger(__name__)
# Alibi applies module surgery to registered modules using their associated alibi replacement function.
# Such functions must have the following signature:
AlibiReplacementFunction = Callable[[torch.nn.Module, int, int], Optional[torch.nn.Module]]
class PolicyRegistry(Dict[Type[torch.nn.Module], AlibiReplacementFunction]):
"""A registry mapping for ALiBi surgery."""
def register(self,
*modules: Type[torch.nn.Module]) -> Callable[[AlibiReplacementFunction], AlibiReplacementFunction]:
"""This decorator registers mappings from torch module types to their ALiBi surgery functions.
To accommodate the specifics of composer's module surgery, our ALiBi implementation uses a
registry to create a ``Mapping[torch.nn.Module, AlibiReplacementFunction]``, where
`AlibiReplacementFunction` is any function that has a :data:`~.module_surgery.ReplacementFunction`
signature but with an additional ``max_sequence_length`` argument.
Implementation files (e.g., :file:`_gpt2.py`) populate :data:`policy_registry` (an instance of
this class) by defining instances of `AlibiReplacementFunction` functions and decorating them
with :meth:`policy_registry.register` (this method). One or more ``Type[torch.nn.Module]`` source
classes must be supplied as inputs to the decorator, which tells :data:`policy_registry`
to map those classes to the decorated function.
Example:
.. code-block::
from composer.algorithms.alibi.attention_surgery_functions.utils import policy_registry
from transformers.models.gpt2.modeling_gpt2 import GPT2Attention
@policy_registry.register(GPT2Attention)
def convert_gpt2_attention(module: torch.nn.Module, index: int, max_sequence_length: int):
# Do surgery (change ``module`` or generate a new ``module`` instance to return)
# Note that this function should depend on ``max_sequence_length``
# YOUR CODE HERE
return module
In the above example, ``convert_gpt2_attention`` (an instance of a `AlibiReplacementFunction`
function) is decorated with ``@policy_registry.register(GPT2Attention)``. Using the decorator
this way instructs the ALiBi algorithms to apply surgery to any instance of `GPT2Attention`
within the model using ``convert_gpt2_attention`` (the decorated function).
Note that ``convert_gpt2_attention`` follows the specific signature of an `AlibiReplacementFunction`.
:meth:`policy_registry.register` will raise an exception if it is used to decorate a function that
does not follow this signature. The requirements are:
* The function takes 3 input arguments
* Argument 1 has type ``torch.nn.Module``
* Argument 2 has type ``int``
* Argument 3 is named ``max_sequence_length`` and has type ``int``
To better understand these requirements, it may be helpful to review composer's module
surgery (:file:`composer/utils/module_surgery.py`) and the way ALiBi's implementation uses
`policy_registry` in :func:`composer.algorithms.alibi.apply_alibi`.
"""
if len(modules) == 0:
raise ValueError('Registry decoration without any module class inputs has no effect.')
def _validate_signature(func: Callable):
# Necessary to enforce that `func` has a valid signature (i.e. is a AlibiReplacementFunction)
signature = inspect.signature(func)
parameters = signature.parameters
if len(parameters) != 3:
raise ValueError(
f'Each alibi surgery function must accept 3 arguments, {func} accepts {len(parameters)}')
((_, module_param), (_, index_param), (max_seq_name, max_seq_param)) = parameters.items()
if module_param.annotation != torch.nn.Module:
raise TypeError(
f'The first argument of alibi surgery function {func} must be of type "torch.nn.Module"')
if index_param.annotation != int:
raise TypeError(f'The second argument of alibi surgery function {func} must be of type "int"')
if max_seq_param.annotation != int:
raise TypeError(f'The third argument of alibi surgery function {func} must be of type "int"')
if max_seq_name != 'max_sequence_length':
raise NameError(f'The third argument of function {func} must be named "max_sequence_length"')
def _register_module(module: Type[torch.nn.Module], func: Callable) -> None:
if not issubclass(module, torch.nn.Module):
raise TypeError(f'Module {module.__name__} is not a subclass of `torch.nn.Module`.')
if module in self:
raise ValueError(
f'An AlibiReplacementFunction has already been registered for module {module.__name__}.')
self[module] = func
return
def wrapper(func: AlibiReplacementFunction) -> AlibiReplacementFunction:
_validate_signature(func)
for module in modules:
_register_module(module, func)
return func
return wrapper
# Initialize the policy registry that Alibi will reference
policy_registry = PolicyRegistry()
def zero_and_freeze_expand_position_embeddings(
module: torch.nn.Module,
max_sequence_length: int,
position_embedding_attribute: str,
) -> None:
"""Replaces weights with zero tensor and prevents them from being learned further.
This is intended to be used specifically for "removing" positional embeddings.
"""
try:
pos_embedding_module = attrgetter(position_embedding_attribute)(module)
old_weight = getattr(pos_embedding_module, 'weight')
if not isinstance(old_weight, torch.nn.Parameter):
raise TypeError(f'Module {module._get_name()}, position embedding {position_embedding_attribute}, '
f"'weight' attribute must be of type torch.nn.Module")
new_weight = torch.nn.Parameter(
torch.zeros((max_sequence_length, old_weight.shape[1]),
dtype=old_weight.dtype,
layout=old_weight.layout,
device=old_weight.device))
new_weight.requires_grad = False
setattr(pos_embedding_module, 'weight', new_weight)
log.info(f' Position embedding expanded to sequence length {max_sequence_length}, zeroed, and frozen')
except AttributeError:
log.error(f'Unable to zero and freeze position embeddings. Module '
f'{module} may lack attribute {position_embedding_attribute}, or position '
f"embeddings may lack attribute 'weight'.")
raise
def register_alibi(module: torch.nn.Module, n_heads: int, max_token_length: int, causal: bool) -> torch.nn.Module:
"""Adds ALiBi's linear attention biases as a buffer to the module."""
if causal: # e.g., for GPT
# Modified from https://github.com/ofirpress/attention_with_linear_biases/blob/5b327adc6d131e28b40ba58906b30bb469483519/fairseq/models/transformer.py#L742
slopes = torch.Tensor(_get_alibi_head_slopes(n_heads))
# In the next line, the part after the * is what constructs the diagonal matrix
# (right matrix in Figure 3 in the paper).
# If you run it you'll see that it doesn't exactly print out the same matrix as we
# have in Figure 3, but one where all rows are identical.
# This works because the softmax operation is invariant to translation, and our bias
# functions are always linear.
alibi = slopes.unsqueeze(1).unsqueeze(1) * \
torch.arange(max_token_length). \
unsqueeze(0).unsqueeze(0).expand(n_heads, -1, -1)
assert alibi.shape == torch.Size([n_heads, 1, max_token_length])
else: # e.g., for BERT
# Following https://github.com/ofirpress/attention_with_linear_biases/issues/5 (Implementation 1)
# In the causal case, you can exploit the fact that softmax is invariant to a uniform translation
# of the logits, which makes the math work out *after* applying causal masking. If no causal masking
# will be applied, it is necessary to construct the diagonal mask.
context_position = torch.arange(max_token_length)[:, None]
memory_position = torch.arange(max_token_length)[None, :]
relative_position = torch.abs(memory_position - context_position)
# [n_heads, max_token_length, max_token_length]
relative_position = relative_position.unsqueeze(0).expand(n_heads, -1, -1)
slopes = torch.Tensor(_get_alibi_head_slopes(n_heads))
alibi = slopes.unsqueeze(1).unsqueeze(1) * -relative_position
# [1, n_heads, max_token_length, max_token_length]
alibi = alibi.unsqueeze(0)
assert alibi.shape == torch.Size([1, n_heads, max_token_length, max_token_length])
module_device = next(module.parameters()).device
module.register_buffer('alibi', alibi.to(module_device))
return module
def _get_alibi_head_slopes(n_heads: int):
def get_slopes_power_of_2(n_heads):
start = (2**(-2**-(math.log2(n_heads) - 3)))
ratio = start
return [start * ratio**i for i in range(n_heads)]
# In the paper, they only train models that have 2^a heads for some a. This function
# has some good properties that only occur when the input is a power of 2. To
# maintain that even when the number of heads is not a power of 2, we use a
# workaround.
if math.log2(n_heads).is_integer():
return get_slopes_power_of_2(n_heads)
else:
closest_power_of_2 = 2**math.floor(math.log2(n_heads))
return get_slopes_power_of_2(closest_power_of_2) + _get_alibi_head_slopes(
2 * closest_power_of_2)[0::2][:n_heads - closest_power_of_2]
| composer-dev | composer/algorithms/alibi/attention_surgery_functions/utils.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from types import MethodType
from typing import Tuple
import torch
from transformers.models.gpt2.modeling_gpt2 import GPT2Attention, GPT2Model
from composer.algorithms.alibi.attention_surgery_functions.utils import (policy_registry, register_alibi,
zero_and_freeze_expand_position_embeddings)
@policy_registry.register(GPT2Model)
def gpt2_embedding_converter(module: torch.nn.Module, module_index: int, max_sequence_length: int) -> torch.nn.Module:
"""Removes positional embeddings."""
assert isinstance(module, GPT2Model)
del module_index # unused
zero_and_freeze_expand_position_embeddings(module, max_sequence_length, position_embedding_attribute='wpe')
return module
@policy_registry.register(GPT2Attention)
def gpt2_attention_converter(module: torch.nn.Module, module_index: int, max_sequence_length: int) -> torch.nn.Module:
"""Adds ALiBi to GPT2Attention and replaces the attention mask to support `max_sequence_length` tokens."""
assert isinstance(module, GPT2Attention)
del module_index # unused
module = register_alibi(
module=module,
n_heads=int(module.num_heads), #type: ignore num_heads member of GPT2Attention
max_token_length=max_sequence_length,
causal=True)
setattr(module, '_attn', MethodType(_attn, module))
module = enlarge_mask(module, max_sequence_length)
return module
def _attn(self, query, key, value, attention_mask=None, head_mask=None) -> Tuple[torch.Tensor, torch.Tensor]:
"""Replication of identically-named attention function function ("_attn") in Composer/HuggingFace GPT2 model's
GPT2Attention (:func:`transformers.models.gpt2.modeling_gpt2.GPT2Attention._attn`; `GitHub link <https://\\
github.com/huggingface/transformers/blob/2e11a043374a6229ec129a4765ee4ba7517832b9/src/transformers/models/\\
gpt2/modeling_gpt2.py#L192>`_), but this function implements ALiBi and will be used to replace the default attention
function."""
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / (float(value.size(-1))**0.5)
# This is the modification from the original attention
n_tokens = attn_weights.shape[-1]
# Truncate alibi distance weights to size of current batch
alibi = self.alibi[:, :, 0:n_tokens]
# alibi = self.alibi[:, :, :, 0:n_tokens].repeat(batch_size, 1, 1, 1)
attn_weights = attn_weights + alibi
# End modification
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length].bool()
attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = torch.nn.Softmax(dim=-1)(attn_weights)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def enlarge_mask(module: torch.nn.Module, max_sequence_length: int) -> torch.nn.Module:
"""Increases the size of the attention mask in Composer/HuggingFace GPT2 model's GPT2Attention
(:func:`transformers.models.gpt2.modeling_gpt2.GPT2Attention._attn`; `GitHub link <https://\\
github.com/huggingface/transformers/blob/2e11a043374a6229ec129a4765ee4ba7517832b9/src/transformers/\\
models/gpt2/modeling_gpt2.py#L140>`_).
This is necessary for evaluating on sequence lengths longer than the model was initialized to accommodate.
"""
old_mask = module.bias
new_mask = torch.tril(
torch.ones(
(max_sequence_length, max_sequence_length), # type: ignore
dtype=torch.uint8,
device=old_mask.device)).view(1, 1, max_sequence_length, max_sequence_length) # type: ignore
setattr(module, 'bias', new_mask)
return module
| composer-dev | composer/algorithms/alibi/attention_surgery_functions/_gpt2.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""NoOpModel algorithm and class."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, Optional
import torch
import torch.nn.functional as F
from torchmetrics import Metric
from torchmetrics.classification import BinaryAccuracy
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.models.base import ComposerModel
from composer.utils import module_surgery
if TYPE_CHECKING:
from composer.core.types import Batch
log = logging.getLogger(__name__)
__all__ = ['NoOpModelClass', 'NoOpModel']
class NoOpModelClass(ComposerModel):
"""Dummy model used for performance measurements.
The :class:`.NoOpModel` algorithm uses this class to replace a :class:`torch.nn.Module`.
Args:
original_model (torch.nn.Module): Model to replace.
"""
def __init__(self, original_model: torch.nn.Module):
super().__init__()
original_device = next(original_model.parameters()).device
self.weights = torch.nn.Parameter(torch.Tensor([1.5]).to(original_device))
try:
# For classification
self.num_classes = original_model.num_classes
except AttributeError:
pass
def loss(self, outputs: torch.Tensor, batch: Batch):
x, y = batch
assert isinstance(y, torch.Tensor)
del x # unused
return F.mse_loss(outputs, y.to(torch.float32))
def forward(self, batch: Batch):
x, y = batch
del x # unused
assert isinstance(y, torch.Tensor)
return y * self.weights
def get_metrics(self, is_train: bool) -> Dict[str, Metric]:
return {'BinaryAccuracy': BinaryAccuracy()}
def eval_forward(self, batch: Batch, outputs: Optional[Any] = None):
x, y = batch
del x # unused
return y
def update_metric(self, batch: Any, outputs: Any, metric: Metric) -> None:
_, targets = batch
metric.update(outputs, targets)
class NoOpModel(Algorithm):
"""Runs on :attr:`Event.INIT` and replaces the model with a dummy :class:`.NoOpModelClass` instance."""
def __init__(self):
# No arguments
pass
def __repr__(self) -> str:
return f'{self.__class__.__name__}()'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
return event == Event.INIT
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
new_model = NoOpModelClass(state.model)
module_surgery.update_params_in_optimizer(old_params=state.model.parameters(),
new_params=new_model.parameters(),
optimizers=state.optimizers)
state.model = new_model
log.info('Replaced model with a NoOpModel')
| composer-dev | composer/algorithms/no_op_model/no_op_model.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Replaces model with a dummy model of type :class:`NoOpModelClass`.
The algorithm runs on :attr:`Event.INIT`. It replaces the model in the state with
a :class:`.NoOpModelClass` and then updates the parameters in the optimizer
through module surgery.
A dummy model can helpful for profiling the dataloader by eliminating the work
necessary to compute model outputs.
"""
from composer.algorithms.no_op_model.no_op_model import NoOpModel as NoOpModel
from composer.algorithms.no_op_model.no_op_model import NoOpModelClass as NoOpModelClass
__all__ = ['NoOpModel', 'NoOpModelClass']
| composer-dev | composer/algorithms/no_op_model/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from composer.algorithms.weight_standardization.weight_standardization import \
WeightStandardization as WeightStandardization
from composer.algorithms.weight_standardization.weight_standardization import \
apply_weight_standardization as apply_weight_standardization
__all__ = ['WeightStandardization', 'apply_weight_standardization']
| composer-dev | composer/algorithms/weight_standardization/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import logging
import textwrap
import torch
import torch.nn.utils.parametrize as parametrize
from torch import nn
from torch.fx import symbolic_trace
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import module_surgery
log = logging.getLogger(__name__)
__all__ = ['apply_weight_standardization', 'WeightStandardization']
def _standardize_weights(W: torch.Tensor):
"""Function to standardize the input weight ``W``"""
reduce_dims = list(range(1, W.dim()))
W_var, W_mean = torch.var_mean(W, dim=reduce_dims, keepdim=True, unbiased=False)
return (W - W_mean) / (torch.sqrt(W_var + 1e-10))
class WeightStandardizer(nn.Module):
"""Class used to apply weight standardization with torch's parametrization package."""
def forward(self, W):
return _standardize_weights(W)
def apply_weight_standardization(module: torch.nn.Module, n_last_layers_ignore: int = 0):
"""`Weight Standardization <https://arxiv.org/abs/1903.10520>`_ standardizes convolutional weights in a model.
Args:
module (torch.nn.Module): the torch module whose convolutional weights will be parametrized.
n_last_layers_ignore (int, optional): the number of layers at the end of the module to not apply weight standardization.
Default: ``0``.
"""
modules_to_parametrize = (nn.Conv1d, nn.Conv2d, nn.Conv3d)
# Attempt to symbolically trace a module, so the results of .modules() will be in the order of execution
try:
module_trace = symbolic_trace(module)
except:
if n_last_layers_ignore > 0:
log.warning(
textwrap.dedent(f"""\
Module could not be symbolically traced likely due to logic in forward() which is not traceable. Modules
ignored due to n_last_layers={n_last_layers_ignore} may not actually be the last layers of the network.
To determine the error, try torch.fx.symbolic_trace(module)."""))
module_trace = module
# Count the number of convolution modules in the model
conv_count = module_surgery.count_module_instances(module_trace, modules_to_parametrize)
# Calculate how many convs to parametrize based on conv_count and n_last_layers_ignore
target_ws_count = max(conv_count - n_last_layers_ignore, 0)
# Parametrize conv modules to use weight standardization
current_ws_count = 0
for m in module_trace.modules():
# If the target number of weight standardized layers is reached, end for loop
if current_ws_count == target_ws_count:
break
if isinstance(m, modules_to_parametrize):
parametrize.register_parametrization(m, 'weight', WeightStandardizer())
current_ws_count += 1
return current_ws_count
class WeightStandardization(Algorithm):
"""`Weight Standardization <https://arxiv.org/abs/1903.10520>`_ standardizes convolutional weights in a model.
Args:
n_last_layers_ignore (int, optional): the number of layers at the end of the model to not apply weight standardization.
Default: ``0``.
"""
def __init__(self, n_last_layers_ignore: int = 0):
self.n_last_layers_ignore = n_last_layers_ignore
def __repr__(self) -> str:
return f'{self.__class__.__name__}(n_last_layers_ignore={self.n_last_layers_ignore})'
def match(self, event: Event, state: State):
return (event == Event.INIT)
def apply(self, event: Event, state: State, logger: Logger):
count = apply_weight_standardization(state.model, n_last_layers_ignore=self.n_last_layers_ignore)
logger.log_hyperparameters({'WeightStandardization/num_weights_standardized': count})
| composer-dev | composer/algorithms/weight_standardization/weight_standardization.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""SAM (`Foret et al, 2020 <https://arxiv.org/abs/2010.01412>`_) wraps an existing optimizer with a
:class:`SAMOptimizer` which makes the optimizer minimize both loss value and sharpness.This can improves model
generalization and provide robustness to label noise.
See the :doc:`Method Card </method_cards/sam>` for more details.
"""
from composer.algorithms.sam.sam import SAM as SAM
from composer.algorithms.sam.sam import SAMOptimizer as SAMOptimizer
__all__ = ['SAM', 'SAMOptimizer']
| composer-dev | composer/algorithms/sam/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""SAM algorithm and optimizer class."""
from __future__ import annotations
import logging
import warnings
from typing import Optional
import torch
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import ensure_tuple
log = logging.getLogger(__name__)
__all__ = ['SAM', 'SAMOptimizer']
class SAMOptimizer(torch.optim.Optimizer):
"""Wraps an optimizer with sharpness-aware minimization (`Foret et al, 2020 <https://arxiv.org/abs/2010.01412>`_).
See :class:`.SAM` for details.
Implementation based on https://github.com/davda54/sam
Args:
base_optimizer (torch.optim.Optimizer) The optimizer to apply SAM to.
rho (float, optional): The SAM neighborhood size. Must be greater than 0. Default: ``0.05``.
epsilon (float, optional): A small value added to the gradient norm for numerical stability. Default: ``1.0e-12``.
interval (int, optional): SAM will run once per ``interval`` steps. A value of 1 will
cause SAM to run every step. Steps on which SAM runs take
roughly twice as much time to complete. Default: ``1``.
"""
def __init__(self,
base_optimizer: torch.optim.Optimizer,
rho: float = 0.05,
epsilon: float = 1.0e-12,
interval: int = 1,
**kwargs):
if rho < 0:
raise ValueError(f'Invalid rho, should be non-negative: {rho}')
self.base_optimizer = base_optimizer
self.global_step = 0
self.interval = interval
self._step_supports_amp_closure = True # Flag for Composer trainer
defaults = {'rho': rho, 'epsilon': epsilon, **kwargs}
super(SAMOptimizer, self).__init__(self.base_optimizer.param_groups, defaults)
@torch.no_grad()
def sub_e_w(self):
for group in self.param_groups:
for p in group['params']:
if 'e_w' not in self.state[p]:
continue
e_w = self.state[p]['e_w'] # retrieve stale e(w)
p.sub_(e_w) # get back to "w" from "w + e(w)"
@torch.no_grad()
def first_step(self):
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = group['rho'] / (grad_norm + group['epsilon'])
for p in group['params']:
if p.grad is None:
continue
e_w = p.grad * scale.to(p)
p.add_(e_w) # climb to the local maximum "w + e(w)"
self.state[p]['e_w'] = e_w
@torch.no_grad()
def second_step(self):
for group in self.param_groups:
for p in group['params']:
if p.grad is None or 'e_w' not in self.state[p]:
continue
p.sub_(self.state[p]['e_w']) # get back to "w" from "w + e(w)"
self.base_optimizer.step() # do the actual "sharpness-aware" update
@torch.no_grad()
def step(self, closure=None):
assert closure is not None, 'Sharpness Aware Minimization requires closure, but it was not provided'
closure = torch.enable_grad()(closure) # the closure should do a full forward-backward pass
loss = None
if (self.global_step + 1) % self.interval == 0:
# Compute gradient at (w) per-GPU, and do not sync
loss = closure(ddp_sync=False) # type: ignore
if loss:
self.first_step() # Compute e(w) and set weights to (w + (e(w)) separately per-GPU
loss_dict = {} # Dummy loss dict to ignore loss logging from w + e(w)
if closure(loss_dict=loss_dict): # type: ignore Compute gradient at (w + e(w))
self.second_step() # Reset weights to (w) and step base optimizer
else:
self.sub_e_w() # If second forward-backward closure fails, reset weights to (w)
else:
loss = closure()
if loss:
self.base_optimizer.step()
self.global_step += 1
return loss
def _grad_norm(self):
norm = torch.norm(torch.stack(
[p.grad.norm(p=2) for group in self.param_groups for p in group['params'] if p.grad is not None]),
p='fro')
return norm
class SAM(Algorithm):
"""Adds sharpness-aware minimization (`Foret et al, 2020 <https://arxiv.org/abs/2010.01412>`_)
by wrapping an existing optimizer with a :class:`.SAMOptimizer`. SAM can improve model generalization
and provide robustness to label noise.
Runs on :attr:`.Event.INIT`.
Args:
rho (float, optional): The neighborhood size parameter of SAM. Must be greater than 0.
Default: ``0.05``.
epsilon (float, optional): A small value added to the gradient norm for numerical stability.
Default: ``1e-12``.
interval (int, optional): SAM will run once per ``interval`` steps. A value of 1 will
cause SAM to run every step. Steps on which SAM runs take
roughly twice as much time to complete. Default: ``1``.
Example:
.. testcode::
from composer.algorithms import SAM
algorithm = SAM(rho=0.05, epsilon=1.0e-12, interval=1)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer],
)
"""
def __init__(
self,
rho: float = 0.05,
epsilon: float = 1.0e-12,
interval: int = 1,
):
warnings.warn(
'SAM has known issues of weight mismatch when loading from a checkpoint, which will cause an error when resuming without `load_weights_only=True`.'
)
self.rho = rho
self.epsilon = epsilon
self.interval = interval
def match(self, event: Event, state: State) -> bool:
return event == Event.INIT
def apply(self, event: Event, state: State, logger: Optional[Logger]) -> Optional[int]:
assert state.optimizers is not None
state.optimizers = tuple(
SAMOptimizer(
base_optimizer=optimizer,
rho=self.rho,
epsilon=self.epsilon,
interval=self.interval,
) for optimizer in ensure_tuple(state.optimizers))
| composer-dev | composer/algorithms/sam/sam.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Clips all gradients in a model based on their values, their norms,
and their parameters' norms.
See the :doc:`Method Card </method_cards/gradient_clipping>` for more details.
"""
from composer.algorithms.gradient_clipping.gradient_clipping import GradientClipping, apply_gradient_clipping
__all__ = ['GradientClipping', 'apply_gradient_clipping']
| composer-dev | composer/algorithms/gradient_clipping/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core gradient clipping classes and functions."""
from __future__ import annotations
import logging
from typing import Iterable, Optional, Union
import torch
from packaging import version
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.models import ComposerModel
from composer.utils import using_torch_2_0
log = logging.getLogger(__name__)
__all__ = ['GradientClipping', 'apply_gradient_clipping']
def apply_gradient_clipping(model: Union[ComposerModel, torch.nn.Module], clipping_type: str, clipping_threshold: float,
fsdp_enabled: bool):
"""Clips all gradients in model based on specified clipping_type.
Args:
model (ComposerModel or torch.nn.Module): The model that we want to apply gradient clipping.
clipping_type ('adaptive', 'norm', 'value'): String denoting which type of
gradient clipping to do. The options are: 'norm', which clips the gradient norm
and uses `torch.nn.utils.clip_grad_norm_`, 'value', which clips gradient at
a specified value and uses `torch.nn.utils.clip_grad_value_`, and 'adaptive',
which clips all gradients based on gradient norm:parameter norm ratio using
composer.algorithms.gradient_clipping.gradient_clipping._apply_agc.
clipping_threshold (float, optional): Specifies what value to clip the gradients
to (for 'value'), what values to clip the gradient norms to (for 'norm'), and
threshold by which if grad_norm / weight_norm is greater than this threshold then
scale gradients by this threshold * (weight_norm / grad_norm) (for 'adaptive').
fsdp_enabled (bool): Bool of if the model is a FSDP model or not.
"""
if fsdp_enabled:
if version.parse(torch.__version__) < version.parse('1.13.0'):
raise RuntimeError('To use FSDP with Composer, you must use torch>=1.13.0.')
from torch.distributed.fsdp import FullyShardedDataParallel
is_torch_2_0 = using_torch_2_0()
for module in model.modules():
if isinstance(module, FullyShardedDataParallel):
# We can only call grad clip on the parent instance, so we iterate through all
# modules and try grad clipping and FSDP will throw an exception if we
# clip any gradients that aren't a parent module
try:
if clipping_type == 'norm':
module.clip_grad_norm_(max_norm=clipping_threshold)
elif clipping_type == 'value':
module.clip_grad_norm_(max_norm=clipping_threshold, norm_type=float('inf'))
else:
raise ValueError(f"clipping type must be 'norm' or 'value' with FSDP not {clipping_type}")
except (AssertionError, RuntimeError) as e:
if (('clip_grad_norm should only be called on the root (parent) instance' == str(e) and
not is_torch_2_0) or
('`clip_grad_norm_()` should only be called on the root FSDP instance' == str(e) and
is_torch_2_0)):
continue
else:
raise
return
parameters = model.parameters()
if clipping_type == 'adaptive':
_apply_agc(parameters, clipping_threshold=clipping_threshold)
elif clipping_type == 'norm':
torch.nn.utils.clip_grad_norm_(parameters, max_norm=clipping_threshold)
elif clipping_type == 'value':
torch.nn.utils.clip_grad_value_(parameters, clip_value=clipping_threshold)
else:
raise ValueError(f"clipping_type must be 'adaptive', 'norm', or 'value' not {clipping_type} ")
def _apply_agc(
parameters: Union[torch.Tensor, Iterable[torch.Tensor]],
clipping_threshold: float,
) -> None:
"""Clips all gradients in model based on ratio of gradient norms to parameter norms.
Args:
parameters (torch.Tensor or Iterable[torch.Tensor]): The parameters to of the
model for whose gradients we will clip
clipping_threshold (float, optional): The largest acceptable ratio between grad
norms and parameter norms before clipping is done.
"""
for param in parameters:
if param.grad is None:
continue
# Detach weights and gradients, so the clipping operation is not added to
# computational graph.
weights = param.detach()
grad = param.grad.detach()
# Get clipped version of gradients.
clipped_grad_coeff = _get_clipped_gradient_coeff(weights, grad, clipping_threshold=clipping_threshold)
# Copy clipped gradients into param.grad attribute, so they can be accessed by
# optimizer.
grad.mul_(clipped_grad_coeff)
class GradientClipping(Algorithm):
"""Clips all gradients in model based on specified clipping_type.
Runs on ``Event.AFTER_TRAIN_BATCH``.
Example:
.. testcode::
from composer.algorithms import GradientClipping
from composer.trainer import Trainer
gc = GradientClipping(clipping_type='norm', clipping_threshold=0.1)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[gc],
optimizers=[optimizer]
)
Args:
clipping_type ('adaptive', 'norm', 'value'): String denoting which type of
gradient clipping to do. The options are: 'norm', which clips the gradient norm
and uses `torch.nn.utils.clip_grad_norm_`, 'value', which clips gradient at
a specified value and uses `torch.nn.utils.clip_grad_value_`, and 'adaptive',
which clips all gradients based on gradient norm:parameter norm ratio using
composer.algorithms.gradient_clipping.gradient_clipping._apply_agc.
clipping_threshold (float, optional): Specifies what value to clip the gradients
to (for 'value'), what values to clip the gradient norms to (for 'norm'), and
threshold by which if grad_norm / weight_norm is greater than this threshold then
scale gradients by this threshold * (weight_norm / grad_norm) (for 'adaptive').
Raises:
NotImplementedError: if deepspeed is enabled and clipping_type is not 'norm'.
ValueError: if deepspeed is enabled and clipping_type is not 'norm'.
"""
def __init__(self, clipping_type: str, clipping_threshold: float):
self.clipping_type = clipping_type
self.clipping_threshold = clipping_threshold
def match(self, event: Event, state: State) -> bool:
return event in [Event.INIT, Event.AFTER_TRAIN_BATCH]
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
if event == Event.INIT and state.deepspeed_config is not None:
if self.clipping_type == 'norm':
if self.clipping_threshold > 0:
state.deepspeed_config['gradient_clipping'] = self.clipping_threshold
else:
raise ValueError(
f'Deepspeed only supports gradient clipping thresholds that are greater than zero, but the provided one is {self.clipping_threshold}'
)
else:
raise NotImplementedError(
f"Deepspeed only supports gradient clipping of type 'norm' not of type '{self.clipping_type}'")
if event == Event.AFTER_TRAIN_BATCH and not state.deepspeed_enabled:
apply_gradient_clipping(model=state.model,
clipping_type=self.clipping_type,
clipping_threshold=self.clipping_threshold,
fsdp_enabled=state.fsdp_enabled)
def _get_clipped_gradient_coeff(weights: torch.Tensor, grad: torch.Tensor, clipping_threshold: float = 0.01):
"""Clips all gradients in model based on ratio of gradient norms to parameter norms.
Gradients whose norms exceed
.. math:: weight_norm * clipping_threshold
are scaled down by
.. math:: (weight_norm / grad_norm) * clipping_threshold.
Args:
weights (torch.Tensor): Tensor of weights (parameters) from the model.
grad (torch.Tensor): Tensor of gradients of the loss with respect to the weights.
clipping_threshold (float, optional): The largest acceptable ratio between grad
norms and parameter norms before clipping is done.
Return:
clipped_grad_coeff (torch.Tensor): Coefficient of same shape as grad_norm equal to
(weight_norm / grad_norm) * clipping_threshold for gradients whose norms
that exceed weight_norm * clipping_threshold and one otherwise.
"""
# Compute and clamp grad and weight norms.
w_norm = _unitwise_norm(weights)
grad_norm = _unitwise_norm(grad)
# Gradients whose norms are greater than weight_norm * clipping_threhsold are
# scaled down by (weight_norm * clipping_threhsold) / grad_norm.
max_norm = w_norm.mul_(clipping_threshold)
clipped_grad_coeff = max_norm.div_(grad_norm).nan_to_num_(nan=1.0).clamp_(max=1.0)
return clipped_grad_coeff
def _unitwise_norm(tensor: torch.Tensor):
"""Implements unitwise norm as described in Brock et al, 2021.
For 0D scalars of shape [], we trivially normalize with dim=0 which essentially returns the absolute value of the scalar.
For 1D *.bias weights of shape [out_features], we normalize across entire vector -> dim=0.
For 2D torch.nn.Linear weights of shape [out_features, in_features]: we normalize across in_features -> dim = 1
For 4D torch.nn.Conv2d weights [out_channels, in_channels, kernel_height, kernel_width]:
we normalize across [in_channels, kernel_height, kernel_width] -> dim = (1, 2, 3).
If a 3D parameter were somehow in your model, we would normalize buy the last two dimensions -> dim = (1,2).
Args:
tensor (torch.Tensor): A parameter or gradient of the model.
Returns:
The appropriate L2 norm of the parameter or gradient as described above.
"""
# 0D for scalars, 1D for bias vectors.
if tensor.ndim <= 1:
dim = 0
keepdim = False
# 2D corresponds to MLPs and 4D corresponds to ConvNets.
else:
dim = tuple(range(1, tensor.ndim))
keepdim = True
# L2 Norm.
return torch.linalg.vector_norm(tensor, ord=2, dim=dim, keepdim=keepdim)
| composer-dev | composer/algorithms/gradient_clipping/gradient_clipping.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core Label Smoothing classes and functions."""
from __future__ import annotations
from typing import Any, Callable, Optional, Tuple, Union
import torch
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.loss.utils import ensure_targets_one_hot
__all__ = ['LabelSmoothing', 'smooth_labels']
def smooth_labels(logits: torch.Tensor, target: torch.Tensor, smoothing: float = 0.1):
"""Shrink targets towards a uniform distribution as in `Szegedy et al <https://arxiv.org/abs/1512.00567>`_.
The smoothed labels are computed as ``(1 - smoothing) * targets + smoothing * unif``
where ``unif`` is a vector with elements all equal to ``1 / num_classes``.
Args:
logits (torch.Tensor): predicted value for ``target``, or any other tensor
with the same shape. Shape must be ``(N, num_classes, ...)`` for
``N`` examples and ``num_classes`` classes with any number of
optional extra dimensions.
target (torch.Tensor): target tensor of either shape ``N`` or
``(N, num_classes, ...)``. In the former case, elements of
``targets`` must be integer class ids in the range
``0..num_classes``. In the latter case, ``targets`` must have the
same shape as ``logits``.
smoothing (float, optional): strength of the label smoothing, in
:math:`[0, 1]`. ``smoothing=0`` means no label smoothing, and
``smoothing=1`` means maximal smoothing (targets are ignored).
Default: ``0.1``.
Returns:
torch.Tensor: The smoothed targets.
Example:
.. testcode::
import torch
num_classes = 10
targets = torch.randint(num_classes, size=(100,))
from composer.algorithms.label_smoothing import smooth_labels
new_targets = smooth_labels(logits=logits,
target=targets,
smoothing=0.1)
"""
target = ensure_targets_one_hot(logits, target)
n_classes = logits.shape[1]
return (target * (1. - smoothing)) + (smoothing / n_classes)
class LabelSmoothing(Algorithm):
"""Shrink targets towards a uniform distribution as in `Szegedy et al <https://arxiv.org/abs/1512.00567>`_.
The smoothed labels are computed as ``(1 - smoothing) * targets + smoothing * unif``
where ``unif`` is a vector with elements all equal to ``1 / num_classes``.
Args:
smoothing: Strength of the label smoothing, in :math:`[0, 1]`.
``smoothing=0`` means no label smoothing, and
``smoothing=1`` means maximal smoothing (targets are ignored).
Default: ``0.1``.
target_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the target
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 1, which corresponds to any sequence, where the second element
is the target. Default: ``1``.
Example:
.. testcode::
from composer.algorithms import LabelSmoothing
algorithm = LabelSmoothing(smoothing=0.1)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(
self,
smoothing: float = 0.1,
target_key: Union[str, int, Tuple[Callable, Callable], Any] = 1,
):
self.smoothing = smoothing
self.original_labels = torch.Tensor()
self.target_key = target_key
def match(self, event: Event, state: State) -> bool:
return event in [Event.BEFORE_LOSS, Event.AFTER_LOSS]
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
labels = state.batch_get_item(self.target_key)
if event == Event.BEFORE_LOSS:
assert isinstance(state.outputs, torch.Tensor), 'Multiple tensors not supported yet'
assert isinstance(labels, torch.Tensor), 'Multiple tensors not supported yet'
self.original_labels = labels.clone()
smoothed_labels = smooth_labels(
state.outputs,
labels,
smoothing=self.smoothing,
)
state.batch_set_item(self.target_key, smoothed_labels)
elif event == Event.AFTER_LOSS:
# restore the target to the non-smoothed version
state.batch_set_item(self.target_key, self.original_labels)
| composer-dev | composer/algorithms/label_smoothing/label_smoothing.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Shrinks targets towards a uniform distribution to counteract label noise. Introduced in `Rethinking the Inception
Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`_.
See the :doc:`Method Card </method_cards/label_smoothing>` for more details.
"""
from composer.algorithms.label_smoothing.label_smoothing import LabelSmoothing as LabelSmoothing
from composer.algorithms.label_smoothing.label_smoothing import smooth_labels as smooth_labels
__all__ = ['LabelSmoothing', 'smooth_labels']
| composer-dev | composer/algorithms/label_smoothing/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Randomly applies a sequence of image data augmentations
(`Cubuk et al, 2019 <https://arxiv.org/abs/1909.13719>`_) to an image. See
:class:`.RandAugment` or the :doc:`Method Card
</method_cards/randaugment>` for details.
"""
from composer.algorithms.randaugment.randaugment import RandAugment, RandAugmentTransform, randaugment_image
__all__ = ['RandAugment', 'RandAugmentTransform', 'randaugment_image']
| composer-dev | composer/algorithms/randaugment/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core RandAugment code."""
import functools
import textwrap
import weakref
from typing import List, TypeVar
import numpy as np
import torch
import torch.utils.data
from PIL.Image import Image as PillowImage
from torchvision.datasets import VisionDataset
from composer.algorithms.utils import augmentation_sets
from composer.algorithms.utils.augmentation_common import map_pillow_function
from composer.core import Algorithm, Event, State
from composer.datasets.utils import add_vision_dataset_transform
from composer.loggers import Logger
__all__ = ['RandAugment', 'RandAugmentTransform', 'randaugment_image']
ImgT = TypeVar('ImgT', torch.Tensor, PillowImage)
def randaugment_image(img: ImgT,
severity: int = 9,
depth: int = 2,
augmentation_set: List = augmentation_sets['all']) -> ImgT:
"""Randomly applies a sequence of image data augmentations to an image or batch of images.
This technique is adapted from `Cubuk et al, 2019 <https://arxiv.org/abs/1909.13719>`_).
See :class:`.RandAugment` or the :doc:`Method Card </method_cards/randaugment>`
for details. This function only acts on a single image (or batch of images) per call and
is unlikely to be used in a training loop. Use :class:`.RandAugmentTransform` to use
:class:`.RandAugment` as part of a :class:`torchvision.datasets.VisionDataset` ``transform``.
Example:
.. testcode::
import composer.functional as cf
from composer.algorithms.utils import augmentation_sets
randaugmented_image = cf.randaugment_image(
img=image,
severity=9,
depth=2,
augmentation_set=augmentation_sets["all"]
)
Args:
img (PIL.Image.Image | torch.Tensor): Image or batch of images to be RandAugmented.
severity (int, optional): See :class:`.RandAugment`.
depth (int, optional): See :class:`.RandAugment`.
augmentation_set (str, optional): See :class:`.RandAugment`.
Returns:
PIL.Image: RandAugmented image.
"""
def _randaugment_pil_image(img: PillowImage, severity: int, depth: int, augmentation_set: List) -> PillowImage:
# Iterate over augmentations
for _ in range(depth):
aug = np.random.choice(augmentation_set)
img = aug(img, severity)
return img
f_pil = functools.partial(_randaugment_pil_image, severity=severity, depth=depth, augmentation_set=augmentation_set)
return map_pillow_function(f_pil, img)
class RandAugmentTransform(torch.nn.Module):
"""Wraps :func:`.randaugment_image` in a ``torchvision``-compatible transform.
See :class:`.RandAugment` or the :doc:`Method Card </method_cards/randaugment>` for more details.
Example:
.. testcode::
import torchvision.transforms as transforms
from composer.algorithms.randaugment import RandAugmentTransform
randaugment_transform = RandAugmentTransform(
severity=9,
depth=2,
augmentation_set="all"
)
composed = transforms.Compose([
randaugment_transform,
transforms.RandomHorizontalFlip()
])
transformed_image = composed(image)
Args:
severity (int, optional): See :class:`.RandAugment`.
depth (int, optional): See :class:`.RandAugment`.
augmentation_set (str, optional): See
:class:`.RandAugment`.
"""
def __init__(self, severity: int = 9, depth: int = 2, augmentation_set: str = 'all'):
super().__init__()
if severity < 0 or severity > 10:
raise ValueError('RandAugment severity value must satisfy 0 β€ severity β€ 10')
if depth < 0:
raise ValueError('RandAugment depth value must be β₯ 0')
if augmentation_set not in augmentation_sets.keys():
raise KeyError(f'RandAugment augmentation_set is not one of {augmentation_sets.keys()}')
self.severity = severity
self.depth = depth
self.augmentation_set = augmentation_sets[augmentation_set]
def forward(self, img: ImgT) -> ImgT:
return randaugment_image(img=img,
severity=self.severity,
depth=self.depth,
augmentation_set=self.augmentation_set)
class RandAugment(Algorithm):
"""Randomly applies a sequence of image data augmentations to an image.
This algorithm (`Cubuk et al, 2019 <https://arxiv.org/abs/1909.13719>`_) runs on
:attr:`.Event.INIT` to insert a dataset
transformation. It is a no-op if this algorithm already applied itself on the
:attr:`.State.train_dataloader.dataset`.
See the :doc:`Method Card </method_cards/randaugment>` for more details.
Example:
.. testcode::
from composer.algorithms import RandAugment
from composer.trainer import Trainer
randaugment_algorithm = RandAugment(
severity=9,
depth=2,
augmentation_set="all"
)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[randaugment_algorithm],
optimizers=[optimizer]
)
Args:
severity (int, optional): Severity of augmentation operators (between 1 to 10). M
in the original paper. Default: ``9``.
depth (int, optional): Depth of augmentation chain. N in the original paper.
Default: ``2``.
augmentation_set (str, optional): Must be one of the following options
as also described in :attr:`.augmentation_primitives.augmentation_sets`:
* ``"all"``
Uses all augmentations from the paper.
* ``"safe"``
Like ``"all"``, but excludes transforms that are part of
the ImageNet-C/CIFAR10-C test sets
* ``"original"``
Like ``"all"``, but some of the implementations
are identical to the original Github repository, which contains
implementation specificities for the augmentations
``"color"``, ``"contrast"``, ``"sharpness"``, and ``"brightness"``. The
original implementations have an intensity sampling scheme that samples a
value bounded by 0.118 at a minimum, and a maximum value of
:math:`intensity \\times 0.18 + .1`, which ranges from 0.28 (intensity =
1) to 1.9 (intensity 10). These augmentations have different effects
depending on whether they are < 0 or > 0 (or < 1 or > 1).
``"all"`` uses implementations of ``"color"``, ``"contrast"``,
``"sharpness"``, and ``"brightness"`` that account for diverging effects
around 0 (or 1).
Default: ``"all"``.
"""
def __init__(self, severity: int = 9, depth: int = 2, augmentation_set: str = 'all'):
if severity < 0 or severity > 10:
raise ValueError('RandAugment severity value must be 0 β€ severity β€ 10')
if augmentation_set not in augmentation_sets.keys():
raise KeyError(f'randaugment_augmentation_set is not one of {augmentation_sets.keys()}')
self.severity = severity
self.depth = depth
self.augmentation_set = augmentation_set
self._transformed_datasets = weakref.WeakSet()
def match(self, event: Event, state: State) -> bool:
if event != Event.FIT_START:
return False
assert state.dataloader is not None, 'dataloader should be defined on fit start'
if not isinstance(state.dataloader, torch.utils.data.DataLoader):
raise TypeError(f'{type(self).__name__} requires a PyTorch dataloader.')
return state.dataloader.dataset not in self._transformed_datasets
def apply(self, event: Event, state: State, logger: Logger) -> None:
ra = RandAugmentTransform(severity=self.severity, depth=self.depth, augmentation_set=self.augmentation_set)
assert isinstance(state.dataloader, torch.utils.data.DataLoader), 'The dataloader type is checked on match()'
dataset = state.dataloader.dataset
if not isinstance(dataset, VisionDataset):
raise TypeError(
textwrap.dedent(f"""\
To use {type(self).__name__}, the dataset must be a
{VisionDataset.__qualname__}, not {type(dataset).__name__}"""))
add_vision_dataset_transform(dataset, ra, is_tensor_transform=False)
self._transformed_datasets.add(dataset)
| composer-dev | composer/algorithms/randaugment/randaugment.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Replaces batch normalization modules with `Ghost Batch Normalization <https://arxiv.org/abs/1705.08741>`_ modules
that simulate the effect of using a smaller batch size.
See :class:`~composer.algorithms.GhostBatchNorm` or the :doc:`Method Card </method_cards/ghost_batchnorm>` for details.
"""
from composer.algorithms.ghost_batchnorm.ghost_batchnorm import GhostBatchNorm as GhostBatchNorm
from composer.algorithms.ghost_batchnorm.ghost_batchnorm import apply_ghost_batchnorm as apply_ghost_batchnorm
__all__ = ['GhostBatchNorm', 'apply_ghost_batchnorm']
| composer-dev | composer/algorithms/ghost_batchnorm/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import logging
import math
from typing import Optional, Sequence, Union
import torch
from torch.optim import Optimizer
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import module_surgery
log = logging.getLogger(__name__)
_TORCH_BATCHNORM_BASE_CLASS = torch.nn.modules.batchnorm._BatchNorm
def apply_ghost_batchnorm(model: torch.nn.Module,
ghost_batch_size: int = 32,
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None) -> None:
"""Replace batch normalization modules with ghost batch normalization modules.
Ghost batch normalization modules split their input into chunks of
``ghost_batch_size`` samples and run batch normalization on each chunk
separately. ``dim=0`` is assumed to be the sample axis.
Args:
model (torch.nn.Module): The model to modify in-place.
ghost_batch_size (int, optional): Size of sub-batches to normalize over. Default: ``32``.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional):
Existing optimizers bound to ``model.parameters()``. All optimizers that have already been
constructed with ``model.parameters()`` must be specified here so that
they will optimize the correct parameters.
If the optimizer(s) are constructed *after* calling this function,
then it is safe to omit this parameter. These optimizers will see the correct
model parameters.
Returns:
The number of modules modified.
Example:
.. testcode::
import composer.functional as cf
from torchvision import models
model = models.resnet50()
cf.apply_ghost_batchnorm(model)
"""
def maybe_replace(module: torch.nn.Module, module_index: int) -> Optional[torch.nn.Module]:
already_ghost_batchnormed = hasattr(module, '_already_ghost_batchnormed') and module._already_ghost_batchnormed
if isinstance(module, _TORCH_BATCHNORM_BASE_CLASS) and not already_ghost_batchnormed:
return _GhostBatchNorm.from_batchnorm(module, ghost_batch_size=ghost_batch_size)
# we have to specify class names explicitly because replace_module_classes
# now checks if `module.__class__ == cls`, rather than `isinstance(module, cls)`
transforms = {cls: maybe_replace for cls in [torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d]}
module_surgery.replace_module_classes(model, optimizers=optimizers, policies=transforms)
class GhostBatchNorm(Algorithm):
"""Replaces batch normalization modules with
`Ghost Batch Normalization <https://arxiv.org/abs/1705.08741>`_ modules
that simulate the effect of using a smaller batch size.
Works by spliting input into chunks of ``ghost_batch_size`` samples and
running batch normalization on each chunk separately. ``dim=0`` is assumed to
be the sample axis.
Runs on :attr:`.Event.INIT`.
Args:
ghost_batch_size (int, optional): size of sub-batches to normalize over. Default: ``32``.
"""
def __init__(self, ghost_batch_size: int = 32):
self.ghost_batch_size = ghost_batch_size
def __repr__(self) -> str:
return f'{self.__class__.__name__}(ghost_batch_size={self.ghost_batch_size})'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
return event == Event.INIT
def apply(self, event: Event, state: State, logger: Optional[Logger] = None) -> None:
assert state.model is not None, 'Model must be in state'
apply_ghost_batchnorm(model=state.model, optimizers=state.optimizers, ghost_batch_size=self.ghost_batch_size)
self._log_results(event, state, logger)
def _log_results(self, event: Event, state: State, logger: Optional[Logger] = None) -> None:
"""Logs the result of GhostBatchNorm applications, including the number of modules that have been replaced."""
assert state.model is not None
num_new_modules = module_surgery.count_module_instances(state.model, _GhostBatchNorm)
classname = 'GhostBatchNorm'
module_name = 'GhostBatchNorm'
# python logger
log.info(f'Applied {classname} to model {state.model.__class__.__name__} '
f'with ghost_batch_size={self.ghost_batch_size}, '
f'Model now has {num_new_modules} {module_name} modules')
if logger is not None:
logger.log_hyperparameters({
f'{classname}/num_new_modules': num_new_modules,
})
def _corresponding_ghost_batchnorm_type(batchnorm: torch.nn.Module):
if isinstance(batchnorm, torch.nn.BatchNorm1d):
return GhostBatchNorm1d
if isinstance(batchnorm, torch.nn.BatchNorm2d):
return GhostBatchNorm2d
if isinstance(batchnorm, torch.nn.BatchNorm3d):
return GhostBatchNorm3d
raise ValueError(f'Input was of type {type(batchnorm)}, not one of '
'torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d')
class _GhostBatchNorm(torch.nn.Module):
"""`Ghost batch normalization <https://arxiv.org/abs/1705.08741>`_ layer.
Works by spliting input into chunks of ``ghost_batch_size`` samples and
running batch normalization on each chunk separately. ``dim=0`` is assumed to
be the sample axis.
See also `torch.nn.BatchNorm1d <https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm1d.html>`_,
`torch.nn.BatchNorm2d <https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html>`_, and
`torch.nn.BatchNorm3d <https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm3d.html>`_.
Args:
base_batchnorm (torch.nn.modules.batchnorm._BatchNorm): A batch normalization module to be applied to each chunk
ghost_batch_size (int, optional): the size of the chunks passed into the underlying
batch normalization. Default: ``32``.
Raises:
ValueError: If ``ghost_batch_size`` exceeds the number of samples in
the batch provided to `forward`. This might happen when doing
data-parallel training, because the per-worker batch size is usually
much smaller than the overall batch size.
"""
def __init__(self, base_batchnorm: _TORCH_BATCHNORM_BASE_CLASS, ghost_batch_size: int = 32):
super().__init__()
self.ghost_batch_size = ghost_batch_size
self.batchnorm = base_batchnorm
self.batchnorm._already_ghost_batchnormed = True # Mark to avoid rewrapping on duplicate calls
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
batch_size = input.shape[0]
if batch_size < self.ghost_batch_size:
raise ValueError(f'Worker batch size {batch_size} < ghost_batch_size {self.ghost_batch_size}')
nchunks: int = int(math.ceil(batch_size / self.ghost_batch_size))
has_momentum = self.batchnorm.momentum is not None
original_momentum: float = self.batchnorm.momentum
if self.training and has_momentum:
# applying the same batchnorm multiple times greatly increases
# the variance of the moving average statistics; reduce the
# exponential moving average constant proportionally
# to compensate.
self._scale_momentum(nchunks)
normalized_chunks = [self.batchnorm(chunk) for chunk in input.chunk(nchunks, 0)]
if self.training and has_momentum:
self._unscale_momentum(original_momentum)
return torch.cat(normalized_chunks, dim=0)
@staticmethod
def from_batchnorm(module: torch.nn.Module, ghost_batch_size: int) -> '_GhostBatchNorm':
assert isinstance(module, _TORCH_BATCHNORM_BASE_CLASS), 'Module is not a BatchNorm subclass!'
bn_type = _corresponding_ghost_batchnorm_type(module)
return bn_type(ghost_batch_size=ghost_batch_size, base_batchnorm=module)
@torch.jit.unused
def _scale_momentum(self, nchunks: int):
self.batchnorm.momentum = float(self.batchnorm.momentum) / nchunks
@torch.jit.unused
def _unscale_momentum(self, original_momentum: float):
self.batchnorm.momentum = original_momentum
class GhostBatchNorm1d(_GhostBatchNorm):
pass
class GhostBatchNorm2d(_GhostBatchNorm):
pass
class GhostBatchNorm3d(_GhostBatchNorm):
pass
| composer-dev | composer/algorithms/ghost_batchnorm/ghost_batchnorm.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""`CutMix <https://arxiv.org/abs/1905.04899>`_ trains the network on non-overlapping combinations of pairs of examples
and iterpolated targets rather than individual examples and targets.
This is done by taking a non-overlapping combination of a given batch X with a
randomly permuted copy of X.
See the :doc:`Method Card </method_cards/cutmix>` for more details.
"""
from composer.algorithms.cutmix.cutmix import CutMix as CutMix
from composer.algorithms.cutmix.cutmix import cutmix_batch as cutmix_batch
__all__ = ['CutMix', 'cutmix_batch']
| composer-dev | composer/algorithms/cutmix/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core CutMix classes and functions."""
from __future__ import annotations
import logging
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.loss.utils import ensure_targets_one_hot
log = logging.getLogger(__name__)
__all__ = ['CutMix', 'cutmix_batch']
def cutmix_batch(input: Tensor,
target: Tensor,
length: Optional[float] = None,
alpha: float = 1.,
bbox: Optional[Tuple] = None,
indices: Optional[torch.Tensor] = None,
uniform_sampling: bool = False) -> Tuple[torch.Tensor, torch.Tensor, float, Tuple]:
"""Create new samples using combinations of pairs of samples.
This is done by masking a region of each image in ``input`` and filling
the masked region with the corresponding content from a random different
image in``input``.
The position of the masked region is determined by drawing a center point
uniformly at random from all spatial positions.
The area of the masked region is computed using either ``length`` or
``alpha``. If ``length`` is provided, it directly determines the size
of the masked region. If it is not provided, the fraction of the input
area to mask is drawn from a ``Beta(alpha, alpha)`` distribution.
The original paper uses a fixed value of ``alpha = 1``.
Alternatively, one may provide a bounding box to mask directly, in
which case ``alpha`` is ignored and ``length`` must not be provided.
The same masked region is used for the whole batch.
.. note::
The masked region is clipped at the spatial boundaries of the inputs.
This means that there is no padding required, but the actual region
used may be smaller than the nominal size computed using ``length``
or ``alpha``.
Args:
input (torch.Tensor): input tensor of shape ``(N, C, H, W)``.
target (torch.Tensor): target tensor of either shape ``N`` or
``(N, num_classes)``. In the former case, elements of ``target``
must be integer class ids in the range ``0..num_classes``. In the
latter case, rows of ``target`` may be arbitrary vectors of targets,
including, e.g., one-hot encoded class labels, smoothed class
labels, or multi-output regression targets.
length (float, optional): Relative side length of the masked region.
If specified, ``length`` is interpreted as a fraction of ``H`` and
``W``, and the resulting box is of size ``(length * H, length * W)``.
Default: ``None``.
alpha (float, optional): parameter for the Beta distribution over
the fraction of the input to mask. Ignored if ``length`` is
provided. Default: ``1``.
bbox (tuple, optional): predetermined ``(x1, y1, x2, y2)``
coordinates of the bounding box. Default: ``None``.
indices (torch.Tensor, optional): Permutation of the samples to use.
Default: ``None``.
uniform_sampling (bool, optional): If ``True``, sample the bounding box
such that each pixel has an equal probability of being mixed.
If ``False``, defaults to the sampling used in the original paper
implementation. Default: ``False``.
Returns:
input_mixed (torch.Tensor): batch of inputs after cutmix has been
applied.
target_perm (torch.Tensor): The labels of the mixed-in examples
area (float): The fractional area of the unmixed region.
bounding_box (tuple): the ``(left, top, right, bottom)`` coordinates of
the bounding box that defines the mixed region.
Raises:
ValueError: If both ``length`` and ``bbox`` are provided.
Example:
.. testcode::
import torch
from composer.functional import cutmix_batch
N, C, H, W = 2, 3, 4, 5
num_classes = 10
X = torch.randn(N, C, H, W)
y = torch.randint(num_classes, size=(N,))
X_mixed, target_perm, area, _ = cutmix_batch(X, y, alpha=0.2)
"""
if bbox is not None and length is not None:
raise ValueError(f'Cannot provide both length and bbox; got {length} and {bbox}')
# Create shuffled indicies across the batch in preparation for cutting and mixing.
# Use given indices if there are any.
if indices is None:
shuffled_idx = _gen_indices(input)
else:
shuffled_idx = indices
H, W = input.shape[-2], input.shape[-1]
# figure out fraction of area to cut
if length is None:
cutmix_lambda = _gen_cutmix_coef(alpha)
else:
cut_w = int(length * W)
cut_h = int(length * H)
cutmix_lambda = (cut_w * cut_h) / (H * W)
# Create the new inputs.
X_cutmix = torch.clone(input)
# Sample a rectangular box using lambda. Use variable names from the paper.
if bbox:
rx, ry, rw, rh = bbox[0], bbox[1], bbox[2], bbox[3]
box_area = (rw - rx) * (rh - ry)
cutmix_lambda = box_area / (H * W)
else:
rx, ry, rw, rh = _rand_bbox(input.shape[2], input.shape[3], cutmix_lambda, uniform_sampling=uniform_sampling)
bbox = (rx, ry, rw, rh)
# Fill in the box with a part of a random image.
X_cutmix[:, :, rx:rw, ry:rh] = X_cutmix[shuffled_idx, :, rx:rw, ry:rh]
# adjust lambda to exactly match pixel ratio. This is an implementation detail taken from
# the original implementation, and implies lambda is not actually beta distributed.
adjusted_lambda = _adjust_lambda(input, bbox)
# Make a shuffled version of y for interpolation
y_shuffled = target[shuffled_idx]
return X_cutmix, y_shuffled, adjusted_lambda, bbox
class CutMix(Algorithm):
"""`CutMix <https://arxiv.org/abs/1905.04899>`_ trains the network on non-overlapping combinations
of pairs of examples and interpolated targets rather than individual examples and targets.
This is done by taking a non-overlapping combination of a given batch X with a
randomly permuted copy of X. The area is drawn from a ``Beta(alpha, alpha)``
distribution.
Training in this fashion sometimes reduces generalization error.
Args:
alpha (float, optional): the psuedocount for the Beta distribution
used to sample area parameters. As ``alpha`` grows, the two samples
in each pair tend to be weighted more equally. As ``alpha``
approaches 0 from above, the combination approaches only using
one element of the pair. Default: ``1``.
interpolate_loss (bool, optional): Interpolates the loss rather than the labels.
A useful trick when using a cross entropy loss. Will produce incorrect behavior
if the loss is not a linear function of the targets. Default: ``False``
uniform_sampling (bool, optional): If ``True``, sample the bounding
box such that each pixel has an equal probability of being mixed.
If ``False``, defaults to the sampling used in the original
paper implementation. Default: ``False``.
input_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the input
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 0, which corresponds to any sequence, where the first element
is the input. Default: ``0``.
target_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the target
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 1, which corresponds to any sequence, where the second element
is the target. Default: ``1``.
Example:
.. testcode::
from composer.algorithms import CutMix
algorithm = CutMix(alpha=0.2)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(
self,
alpha: float = 1.,
interpolate_loss: bool = False,
uniform_sampling: bool = False,
input_key: Union[str, int, Tuple[Callable, Callable], Any] = 0,
target_key: Union[str, int, Tuple[Callable, Callable], Any] = 1,
):
self.alpha = alpha
self.interpolate_loss = interpolate_loss
self._uniform_sampling = uniform_sampling
self._indices = torch.Tensor()
self._cutmix_lambda = 0.0
self._bbox: Tuple[int, int, int, int] = (0, 0, 0, 0)
self._permuted_target = torch.Tensor()
self._adjusted_lambda = 0.0
self.input_key, self.target_key = input_key, target_key
def match(self, event: Event, state: State) -> bool:
if self.interpolate_loss:
return event in [Event.BEFORE_FORWARD, Event.BEFORE_BACKWARD]
else:
return event in [Event.BEFORE_FORWARD, Event.BEFORE_LOSS]
def apply(self, event: Event, state: State, logger: Logger) -> None:
input = state.batch_get_item(key=self.input_key)
target = state.batch_get_item(key=self.target_key)
if not isinstance(input, torch.Tensor):
raise NotImplementedError('Multiple tensors for inputs not supported yet.')
if not isinstance(target, torch.Tensor):
raise NotImplementedError('Multiple tensors for targets not supported yet.')
alpha = self.alpha
if event == Event.BEFORE_FORWARD:
# these are saved only for testing
self._indices = _gen_indices(input)
_cutmix_lambda = _gen_cutmix_coef(alpha)
self._bbox = _rand_bbox(input.shape[2],
input.shape[3],
_cutmix_lambda,
uniform_sampling=self._uniform_sampling)
self._adjusted_lambda = _adjust_lambda(input, self._bbox)
new_input, self._permuted_target, _, _ = cutmix_batch(input=input,
target=target,
alpha=self.alpha,
bbox=self._bbox,
indices=self._indices,
uniform_sampling=self._uniform_sampling)
state.batch_set_item(key=self.input_key, value=new_input)
if not self.interpolate_loss and event == Event.BEFORE_LOSS:
# Interpolate the targets
if not isinstance(state.outputs, torch.Tensor):
raise NotImplementedError('Multiple output tensors not supported yet')
if not isinstance(target, torch.Tensor):
raise NotImplementedError('Multiple target tensors not supported yet')
if self._permuted_target.ndim > 2 and self._permuted_target.shape[-2:] == input.shape[-2:]:
# Target has the same height and width as the input, no need to interpolate.
x1, y1, x2, y2 = self._bbox
target[..., x1:x2, y1:y2] = self._permuted_target[..., x1:x2, y1:y2]
else:
# Need to interpolate on dense/one-hot targets.
target = ensure_targets_one_hot(state.outputs, target)
permuted_target = ensure_targets_one_hot(state.outputs, self._permuted_target)
# Interpolate to get the new target
target = self._adjusted_lambda * target + (1 - self._adjusted_lambda) * permuted_target
# Create the new batch
state.batch_set_item(key=self.target_key, value=target)
if self.interpolate_loss and event == Event.BEFORE_BACKWARD:
if self._permuted_target.ndim > 2 and self._permuted_target.shape[-2:] == input.shape[-2:]:
raise ValueError("Can't interpolate loss when target has the same height and width as the input")
# Grab the loss function
if hasattr(state.model, 'loss'):
loss_fn = state.model.loss
elif hasattr(state.model, 'module') and hasattr(state.model.module, 'loss'):
if isinstance(state.model.module, torch.nn.Module):
loss_fn = state.model.module.loss
else:
raise TypeError('state.model.module must be a torch module')
else:
raise AttributeError('Loss must be accessible via model.loss or model.module.loss')
# Verify that the loss is callable
if not callable(loss_fn):
raise TypeError('Loss must be callable')
# Interpolate the loss
new_loss = loss_fn(state.outputs, (input, self._permuted_target))
if not isinstance(state.loss, torch.Tensor):
raise NotImplementedError('Multiple losses not supported yet')
if not isinstance(new_loss, torch.Tensor):
raise NotImplementedError('Multiple losses not supported yet')
state.loss = self._adjusted_lambda * state.loss + (1 - self._adjusted_lambda) * new_loss
def _gen_indices(x: Tensor) -> Tensor:
"""Generates indices of a random permutation of elements of a batch.
Args:
x (torch.Tensor): input tensor of shape ``(B, d1, d2, ..., dn)``,
B is batch size, d1-dn are feature dimensions.
Returns:
indices: A random permutation of the batch indices.
"""
return torch.randperm(x.shape[0])
def _gen_cutmix_coef(alpha: float) -> float:
"""Generates lambda from ``Beta(alpha, alpha)``.
Args:
alpha (float): Parameter for the ``Beta(alpha, alpha)`` distribution.
Returns:
cutmix_lambda: Lambda parameter for performing CutMix.
"""
# First check if alpha is positive.
assert alpha >= 0
# Draw the area parameter from a beta distribution.
# Check here is needed because beta distribution requires alpha > 0
# but alpha = 0 is fine for cutmix.
if alpha == 0:
cutmix_lambda = 0
else:
cutmix_lambda = np.random.beta(alpha, alpha)
return cutmix_lambda
def _rand_bbox(W: int,
H: int,
cutmix_lambda: float,
cx: Optional[int] = None,
cy: Optional[int] = None,
uniform_sampling: bool = False) -> Tuple[int, int, int, int]:
"""Randomly samples a bounding box with area determined by ``cutmix_lambda``.
Adapted from original implementation https://github.com/clovaai/CutMix-PyTorch
Args:
W (int): Width of the image
H (int): Height of the image
cutmix_lambda (float): Lambda param from cutmix, used to set the area of the
box if ``cut_w`` or ``cut_h`` is not provided.
cx (int, optional): Optional x coordinate of the center of the box.
cy (int, optional): Optional y coordinate of the center of the box.
uniform_sampling (bool, optional): If true, sample the bounding box such that each pixel
has an equal probability of being mixed. If false, defaults to the
sampling used in the original paper implementation. Default: ``False``.
Returns:
bbx1: Leftmost edge of the bounding box
bby1: Top edge of the bounding box
bbx2: Rightmost edge of the bounding box
bby2: Bottom edge of the bounding box
"""
cut_ratio = np.sqrt(1.0 - cutmix_lambda)
cut_w = int(W * cut_ratio)
cut_h = int(H * cut_ratio)
# uniform
if cx is None:
if uniform_sampling is True:
cx = np.random.randint(-cut_w // 2, high=W + cut_w // 2)
else:
cx = np.random.randint(W)
if cy is None:
if uniform_sampling is True:
cy = np.random.randint(-cut_h // 2, high=H + cut_h // 2)
else:
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def _adjust_lambda(x: Tensor, bbox: Tuple) -> float:
"""Rescale the cutmix lambda according to the size of the clipped bounding box.
Args:
x (torch.Tensor): input tensor of shape ``(B, d1, d2, ..., dn)``, B is batch size, d1-dn
are feature dimensions.
bbox (tuple): (x1, y1, x2, y2) coordinates of the boundind box, obeying x2 > x1, y2 > y1.
Returns:
adjusted_lambda: Rescaled cutmix_lambda to account for part of the bounding box
being potentially out of bounds of the input.
"""
rx, ry, rw, rh = bbox[0], bbox[1], bbox[2], bbox[3]
adjusted_lambda = 1 - ((rw - rx) * (rh - ry) / (x.size()[-1] * x.size()[-2]))
return adjusted_lambda
| composer-dev | composer/algorithms/cutmix/cutmix.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core SelectiveBackprop class and functions."""
from __future__ import annotations
import inspect
from typing import Any, Callable, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from torch.nn import functional as F
from composer.core import Algorithm, Event, State, get_precision_context
from composer.loggers import Logger
from composer.models import ComposerModel
__all__ = ['SelectiveBackprop', 'select_using_loss', 'should_selective_backprop']
def should_selective_backprop(
current_duration: float,
batch_idx: int,
start: float = 0.5,
end: float = 0.9,
interrupt: int = 2,
) -> bool:
"""Decides if selective backprop should be run based on time in training.
Returns true if the ``current_duration`` is between ``start`` and
``end``. It is recommended that SB be applied during the later stages of
a training run, once the model has already "learned" easy examples.
To preserve convergence, SB can be interrupted with vanilla minibatch
gradient steps every ``interrupt`` steps. When ``interrupt=0``, SB will be
used at every step during the SB interval. When ``interrupt=2``, SB will
alternate with vanilla minibatch steps.
Args:
current_duration (float): The elapsed training duration. Must be
within ``[0.0, 1.0)``.
batch_idx (int): The current batch within the epoch.
start (float, optional): The duration at which selective backprop
should be enabled, as a percentage. Default: ``0.5``.
end (float, optional): The duration at which selective backprop
should be disabled. Default: ``0.9``.
interrupt (int, optional): The number of batches between vanilla
minibatch gradient updates. Default: ``2``.
Returns:
bool: If selective backprop should be performed on this batch.
"""
is_interval = ((current_duration >= start) and (current_duration < end))
is_step = ((interrupt == 0) or ((batch_idx + 1) % interrupt != 0))
return is_interval and is_step
def select_using_loss(input: torch.Tensor,
target: torch.Tensor,
model: Callable[[Union[torch.Tensor, Sequence[torch.Tensor]]], torch.Tensor],
loss_fun: Callable,
keep: float = 0.5,
scale_factor: float = 1) -> Tuple[torch.Tensor, torch.Tensor]:
"""Prunes minibatches as a subroutine of :class:`.SelectiveBackprop`. Computes the loss function on the provided training
examples and runs minibatches according to the difficulty. The fraction of the minibatch that is kept for gradient
computation is specified by the argument ``0 <= keep <= 1``.
To speed up SB's selection forward pass, the argument ``scale_factor`` can
be used to spatially downsample input tensors. The full-sized inputs
will still be used for the weight gradient computation.
Args:
input (torch.Tensor): Input tensor to prune.
target (torch.Tensor): Output tensor to prune.
model (Callable): Model with which to predict outputs.
loss_fun (Callable): Loss function of the form ``loss(outputs, targets, reduction='none')``.
The function must take the keyword argument ``reduction='none'``
to ensure that per-sample losses are returned.
keep (float, optional): Fraction of examples in the batch to keep. Default: ``0.5``.
scale_factor (float, optional): Multiplier between 0 and 1 for spatial size. Downsampling
requires the input tensor to be at least 3D. Default: ``1``.
Returns:
(torch.Tensor, torch.Tensor): The pruned batch of inputs and targets
Raises:
ValueError: If ``scale_factor > 1``.
TypeError: If ``loss_fun > 1`` has the wrong signature or is not callable.
.. note::
This function runs an extra forward pass through the model on the batch of data.
If you are using a non-default precision, ensure that this forward pass
runs in your desired precision. For example:
.. testsetup::
N_sb, D_sb = 16, 8
X_sb, y_sb = torch.randn(N_sb, D_sb), torch.randint(2, (N_sb,))
lin_model = torch.nn.Linear(X_sb.shape[1], 1)
.. doctest::
>>> import torch
>>> from composer.algorithms.selective_backprop import select_using_loss
>>> with torch.cuda.amp.autocast(True):
... X_new, y_new = select_using_loss(
... X_sb,
... y_sb,
... lin_model,
... loss_fun,
... keep=0.5,
... scale_factor=1
... )
"""
INTERPOLATE_MODES = {3: 'linear', 4: 'bilinear', 5: 'trilinear'}
interp_mode = 'bilinear'
if scale_factor != 1:
if input.dim() not in INTERPOLATE_MODES:
raise ValueError(f'Input must be 3D, 4D, or 5D if scale_factor != 1, got {input.dim()}')
interp_mode = INTERPOLATE_MODES[input.dim()]
if scale_factor > 1:
raise ValueError('scale_factor must be <= 1')
if callable(loss_fun):
sig = inspect.signature(loss_fun)
if not 'reduction' in sig.parameters:
raise TypeError('Loss function `loss_fun` must take a keyword argument `reduction`.')
else:
raise TypeError('Loss function must be callable')
with torch.no_grad():
N = input.shape[0]
# Maybe interpolate
if scale_factor < 1:
X_scaled = F.interpolate(input,
scale_factor=scale_factor,
mode=interp_mode,
align_corners=False,
recompute_scale_factor=False)
else:
X_scaled = input
# Get per-examples losses
out = model(X_scaled)
losses = loss_fun(out, target, reduction='none')
# Sort losses
sorted_idx = torch.argsort(losses)
n_select = int(keep * N)
# Sample by loss
percs = np.arange(0.5, N, 1) / N
probs = percs**((1.0 / keep) - 1.0)
probs = probs / np.sum(probs)
select_percs_idx = np.random.choice(N, n_select, replace=False, p=probs)
select_idx = sorted_idx[select_percs_idx]
return input[select_idx], target[select_idx]
class SelectiveBackprop(Algorithm):
"""Selectively backpropagate gradients from a subset of each batch.
Based on (`Jiang et al, 2019`_), Selective Backprop (SB) prunes minibatches
according to the difficulty of the individual training examples, and only
computes weight gradients over the pruned subset, reducing iteration time, and
speeding up training.
The fraction of the minibatch that is kept for gradient computation is
specified by the argument ``0 <= keep <= 1``.
To speed up SB's selection forward pass, the argument ``scale_factor`` can
be used to spatially downsample input image tensors. The full-sized inputs
will still be used for the weight gradient computation.
To preserve convergence, SB can be interrupted with vanilla minibatch
gradient steps every ``interrupt`` steps. When ``interrupt=0``, SB will be
used at every step during the SB interval. When ``interrupt=2``, SB will
alternate with vanilla minibatch steps.
.. _Jiang et al, 2019: https://arxiv.org/abs/1910.00762
Args:
start (float, optional): SB interval start as fraction of training duration.
Default: ``0.5``.
end (float, optional): SB interval end as fraction of training duration.
Default: ``0.9``.
keep (float, optional): fraction of minibatch to select and keep for gradient computation.
Default: ``0.5``.
scale_factor (float, optional): scale for downsampling input for selection forward pass.
Default: ``1.``.
interrupt (int, optional): interrupt SB with a vanilla minibatch step every
``interrupt`` batches. Default: ``2``.
input_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the input
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 0, which corresponds to any sequence, where the first element
is the input. Default: ``0``.
target_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the target
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 1, which corresponds to any sequence, where the second element
is the target. Default: ``1``.
Example:
.. testcode::
from composer.algorithms import SelectiveBackprop
algorithm = SelectiveBackprop(start=0.5, end=0.9, keep=0.5)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(
self,
start: float = 0.5,
end: float = 0.9,
keep: float = 0.5,
scale_factor: float = 1.,
interrupt: int = 2,
input_key: Union[str, int, Tuple[Callable, Callable], Any] = 0,
target_key: Union[str, int, Tuple[Callable, Callable], Any] = 1,
):
self.start = start
self.end = end
self.keep = keep
self.scale_factor = scale_factor
self.interrupt = interrupt
self._loss_fn = None # set on Event.INIT
self.input_key, self.target_key = input_key, target_key
def match(self, event: Event, state: State) -> bool:
if event == Event.INIT:
return True
if event != Event.AFTER_DATALOADER:
return False
is_keep = (self.keep < 1)
if not is_keep:
return False
elapsed_duration = state.get_elapsed_duration()
assert elapsed_duration is not None, 'elapsed duration should be set on Event.AFTER_DATALOADER'
is_chosen = should_selective_backprop(
current_duration=float(elapsed_duration),
batch_idx=int(state.timestamp.batch_in_epoch),
start=self.start,
end=self.end,
interrupt=self.interrupt,
)
return is_chosen
def apply(self, event: Event, state: State, logger: Optional[Logger] = None) -> None:
if event == Event.INIT:
if self._loss_fn is None:
if not isinstance(state.model, ComposerModel):
raise RuntimeError('Model must be of type ComposerModel')
self._loss_fn = state.model.loss
return
input, target = state.batch_get_item(key=self.input_key), state.batch_get_item(key=self.target_key)
assert isinstance(input, torch.Tensor) and isinstance(target, torch.Tensor), \
'Multiple tensors not supported for this method yet.'
# Model expected to only take in input, not the full batch
model = lambda X: state.model((X, None))
def loss(p, y, reduction='none'):
assert self._loss_fn is not None, 'loss_fn should be set on Event.INIT'
return self._loss_fn(p, (torch.Tensor(), y), reduction=reduction)
with get_precision_context(state.precision):
new_input, new_target = select_using_loss(input, target, model, loss, self.keep, self.scale_factor)
state.batch_set_item(self.input_key, new_input)
state.batch_set_item(self.target_key, new_target)
| composer-dev | composer/algorithms/selective_backprop/selective_backprop.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""`Selective Backprop <https://arxiv.org/abs/1910.00762>`_ prunes minibatches according to the difficulty of the
individual training examples, and only computes weight gradients over the pruned subset, reducing iteration time and
speeding up training.
The algorithm runs on :attr:`.Event.INIT` and :attr:`.Event.AFTER_DATLOADER`.
On Event.INIT, it gets the loss function before the model is wrapped. On Event.AFTER_DATALOADER, it applies selective
backprop if the time is between ``self.start`` and ``self.end``.
See the :doc:`Method Card </method_cards/selective_backprop>` for more details.
"""
from composer.algorithms.selective_backprop.selective_backprop import SelectiveBackprop as SelectiveBackprop
from composer.algorithms.selective_backprop.selective_backprop import select_using_loss as select_using_loss
from composer.algorithms.selective_backprop.selective_backprop import \
should_selective_backprop as should_selective_backprop
__all__ = ['SelectiveBackprop', 'select_using_loss', 'should_selective_backprop']
| composer-dev | composer/algorithms/selective_backprop/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Implements stochastic depth (`Huang et al, 2016 <https://arxiv.org/abs/1603.09382>`_) for ResNet blocks.
See :class:`.StochasticDepth`, the sample-wise stochastic depth :doc:`method card
</method_cards/stochastic_depth_samplewise>`, or the block-wise stochastic depth :doc:`method card
</method_cards/stochastic_depth>` for details.
"""
from composer.algorithms.stochastic_depth.stochastic_depth import StochasticDepth as StochasticDepth
from composer.algorithms.stochastic_depth.stochastic_depth import apply_stochastic_depth as apply_stochastic_depth
__all__ = ['StochasticDepth', 'apply_stochastic_depth']
| composer-dev | composer/algorithms/stochastic_depth/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Modules and layers for applying the Stochastic Depth algorithm."""
from __future__ import annotations
import functools
import logging
from typing import Optional, Type, Union
import torch
from torchvision.models.resnet import Bottleneck
from composer.algorithms.stochastic_depth.stochastic_layers import make_resnet_bottleneck_stochastic
from composer.core import Algorithm, Event, State
from composer.core.time import Time, TimeUnit
from composer.loggers import Logger
from composer.utils import module_surgery
log = logging.getLogger(__name__)
_VALID_LAYER_DISTRIBUTIONS = ('uniform', 'linear')
_VALID_STOCHASTIC_METHODS = ('block', 'sample')
_STOCHASTIC_LAYER_MAPPING = {'ResNetBottleneck': (Bottleneck, make_resnet_bottleneck_stochastic)}
__all__ = ['apply_stochastic_depth', 'StochasticDepth']
def apply_stochastic_depth(model: torch.nn.Module,
target_layer_name: str,
stochastic_method: str = 'block',
drop_rate: float = 0.2,
drop_distribution: str = 'linear') -> None:
"""Applies Stochastic Depth (`Huang et al, 2016 <https://arxiv.org/abs/1603.09382>`_) to the specified model.
The algorithm replaces the specified target layer with a stochastic version
of the layer. The stochastic layer will randomly drop either samples or the
layer itself depending on the stochastic method specified. The block-wise
version follows the original paper. The sample-wise version follows the
implementation used for EfficientNet in the
`Tensorflow/TPU repo <https://github.com/tensorflow/tpu>`_.
.. note::
Stochastic Depth only works on instances of :class:`torchvision.models.resnet.ResNet`
for now.
Args:
model (torch.nn.Module): model containing modules to be replaced with
stochastic versions.
target_layer_name (str): Block to replace with a stochastic block
equivalent. The name must be registered in ``STOCHASTIC_LAYER_MAPPING``
dictionary with the target layer class and the stochastic layer class.
Currently, only :class:`torchvision.models.resnet.Bottleneck` is supported.
stochastic_method (str, optional): The version of stochastic depth to use.
``"block"`` randomly drops blocks during training. ``"sample"`` randomly
drops samples within a block during training. Default: ``"block"``.
drop_rate (float, optional): The base probability of dropping a layer or sample.
Must be between 0.0 and 1.0. Default: `0.2``.
drop_distribution (str, optional): How ``drop_rate`` is distributed across
layers. Value must be one of ``"uniform"`` or ``"linear"``.
``"uniform"`` assigns the same ``drop_rate`` across all layers.
``"linear"`` linearly increases the drop rate across layer depth,
starting with 0 drop rate and ending with ``drop_rate``.
Default: ``"linear"``.
Example:
.. testcode::
import composer.functional as cf
from torchvision import models
model = models.resnet50()
cf.apply_stochastic_depth(
model,
target_layer_name='ResNetBottleneck'
)
"""
_validate_stochastic_hparams(target_layer_name=target_layer_name,
stochastic_method=stochastic_method,
drop_rate=drop_rate,
drop_distribution=drop_distribution)
transforms = {}
target_layer, stochastic_converter = _STOCHASTIC_LAYER_MAPPING[target_layer_name]
module_count = module_surgery.count_module_instances(model, target_layer)
stochastic_from_target_layer = functools.partial(stochastic_converter,
drop_rate=drop_rate,
drop_distribution=drop_distribution,
module_count=module_count,
stochastic_method=stochastic_method)
transforms[target_layer] = stochastic_from_target_layer
module_surgery.replace_module_classes(model, policies=transforms)
class StochasticDepth(Algorithm):
"""Applies Stochastic Depth (`Huang et al, 2016 <https://arxiv.org/abs/1603.09382>`_) to the specified model.
The algorithm replaces the specified target layer with a stochastic version
of the layer. The stochastic layer will randomly drop either samples or the
layer itself depending on the stochastic method specified. The block-wise
version follows the original paper. The sample-wise version follows the
implementation used for EfficientNet in the
`Tensorflow/TPU repo <https://github.com/tensorflow/tpu>`_.
Runs on :attr:`.Event.INIT`, as well as
:attr:`.Event.BATCH_START` if ``drop_warmup > 0``.
.. note::
Stochastic Depth only works on instances of :class:`torchvision.models.resnet.ResNet` for now.
Args:
target_layer_name (str): Block to replace with a stochastic block
equivalent. The name must be registered in ``STOCHASTIC_LAYER_MAPPING``
dictionary with the target layer class and the stochastic layer class.
Currently, only :class:`torchvision.models.resnet.Bottleneck` is supported.
stochastic_method (str, optional): The version of stochastic depth to use.
``"block"`` randomly drops blocks during training. ``"sample"`` randomly drops
samples within a block during training. Default: ``"block"``.
drop_rate (float, optional): The base probability of dropping a layer or sample.
Must be between 0.0 and 1.0. Default: ``0.2``.
drop_distribution (str, optional): How ``drop_rate`` is distributed across
layers. Value must be one of ``"uniform"`` or ``"linear"``.
``"uniform"`` assigns the same ``drop_rate`` across all layers.
``"linear"`` linearly increases the drop rate across layer depth,
starting with 0 drop rate and ending with ``drop_rate``. Default: ``"linear"``.
drop_warmup (str | Time | float, optional): A :class:`Time` object,
time-string, or float on ``[0.0, 1.0]`` representing the fraction of the
training duration to linearly increase the drop probability to
`linear_drop_rate`. Default: ``0.0``.
"""
def __init__(self,
target_layer_name: str,
stochastic_method: str = 'block',
drop_rate: float = 0.2,
drop_distribution: str = 'linear',
drop_warmup: Union[float, Time, str] = 0.0):
log.warning(
'Stochastic depth has known issues of weight mismatch when loading from a checkpoint, which will cause an error when resuming without `load_weights_only=True`.'
)
if drop_rate == 0.0:
log.warning('Stochastic Depth will have no effect when drop_rate set to 0')
self.target_layer_name = target_layer_name
self.stochastic_method = stochastic_method
self.drop_rate = drop_rate
self.drop_distribution = drop_distribution
if isinstance(drop_warmup, str):
drop_warmup = Time.from_timestring(drop_warmup)
if isinstance(drop_warmup, float):
drop_warmup = Time(drop_warmup, TimeUnit.DURATION)
self.drop_warmup = drop_warmup
self.num_stochastic_layers = 0 # Initial count of stochastic layers
_validate_stochastic_hparams(stochastic_method=self.stochastic_method,
target_layer_name=self.target_layer_name,
drop_rate=self.drop_rate,
drop_distribution=self.drop_distribution,
drop_warmup=str(self.drop_warmup))
def __repr__(self) -> str:
return f"{self.__class__.__name__}(target_layer_name='{self.target_layer_name}',stochastic_method='{self.stochastic_method}',drop_rate={self.drop_rate},drop_distribution='{self.drop_distribution}',drop_warmup={repr(self.drop_warmup)})"
@property
def find_unused_parameters(self) -> bool:
return self.stochastic_method == 'block'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
return (event == Event.INIT) or (event == Event.BATCH_START and self.drop_warmup > 0.0)
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
assert state.model is not None
target_block, _ = _STOCHASTIC_LAYER_MAPPING[self.target_layer_name]
if event == Event.INIT:
if module_surgery.count_module_instances(state.model, target_block) == 0:
log.warning(f'No {self.target_layer_name} found in model! Algorithm will function as a no-op.')
apply_stochastic_depth(state.model,
target_layer_name=self.target_layer_name,
stochastic_method=self.stochastic_method,
drop_rate=self.drop_rate,
drop_distribution=self.drop_distribution)
self.num_stochastic_layers = module_surgery.count_module_instances(state.model, target_block)
logger.log_metrics({'stochastic_depth/num_stochastic_layers': self.num_stochastic_layers})
elif event == Event.BATCH_START and self.num_stochastic_layers:
elapsed_duration = state.get_elapsed_duration()
assert elapsed_duration is not None, 'elapsed duration is set on BATCH_START'
if elapsed_duration < self.drop_warmup:
current_drop_rate = float(elapsed_duration / self.drop_warmup) * self.drop_rate
_update_drop_rate(module=state.model,
target_block=target_block,
drop_rate=current_drop_rate,
drop_distribution=self.drop_distribution,
module_count=self.num_stochastic_layers)
else:
current_drop_rate = self.drop_rate
logger.log_metrics({'stochastic_depth/drop_rate': current_drop_rate})
def _validate_stochastic_hparams(target_layer_name: str,
stochastic_method: str,
drop_rate: float,
drop_distribution: str,
drop_warmup: str = '0dur'):
"""Helper function to validate the Stochastic Depth hyperparameter values.
"""
if stochastic_method and (stochastic_method not in _VALID_STOCHASTIC_METHODS):
raise ValueError(f'stochastic_method {stochastic_method} is not supported.'
f' Must be one of {_VALID_STOCHASTIC_METHODS}')
if target_layer_name and (target_layer_name not in _STOCHASTIC_LAYER_MAPPING):
raise ValueError(f'target_layer_name {target_layer_name} is not supported with {stochastic_method}.'
f' Must be one of {list(_STOCHASTIC_LAYER_MAPPING.keys())}')
if drop_rate and (drop_rate < 0 or drop_rate > 1):
raise ValueError(f'drop_rate must be between 0 and 1: {drop_rate}')
if drop_distribution and (drop_distribution not in _VALID_LAYER_DISTRIBUTIONS):
raise ValueError(f'drop_distribution "{drop_distribution}" is'
f' not supported. Must be one of {list(_VALID_LAYER_DISTRIBUTIONS)}')
if stochastic_method == 'sample' and Time.from_timestring(drop_warmup).value != 0:
raise ValueError(f'drop_warmup can not be used with "sample" stochastic_method')
def _update_drop_rate(module: torch.nn.Module,
target_block: Type[torch.nn.Module],
drop_rate: float,
drop_distribution: str,
module_count: int,
module_id: int = 0):
"""Recursively updates a module's drop_rate attributes with a new value.
"""
for child in module.children():
if isinstance(child, target_block) and hasattr(child, 'drop_rate'):
module_id += 1
if drop_distribution == 'linear':
current_drop_rate = (module_id / module_count) * drop_rate # type: ignore
else:
current_drop_rate = drop_rate
child.drop_rate = torch.tensor(current_drop_rate)
module_id = _update_drop_rate(child, target_block, drop_rate, drop_distribution, module_count, module_id)
return module_id
| composer-dev | composer/algorithms/stochastic_depth/stochastic_depth.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Stochastic forward functions for ResNet Bottleneck modules."""
from typing import Optional
import torch
import torch.nn as nn
from torch.fx import GraphModule
from torchvision.models.resnet import Bottleneck
__all__ = ['make_resnet_bottleneck_stochastic', 'BlockStochasticModule']
def block_stochastic_forward(self, x):
"""ResNet Bottleneck forward function where the layers are randomly
skipped with probability ``drop_rate`` during training.
"""
identity = x
sample = (not self.training) or bool(torch.bernoulli(1 - self.drop_rate))
if sample:
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if not self.training:
out = out * (1 - self.drop_rate)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
else:
if self.downsample is not None:
out = self.relu(self.downsample(identity))
else:
out = identity
return out
def _sample_drop(x: torch.Tensor, sample_drop_rate: float, is_training: bool):
"""Randomly drops samples from the input batch according to the `sample_drop_rate`.
This is implemented by setting the samples to be dropped to zeros.
"""
keep_probability = (1 - sample_drop_rate)
if not is_training:
return x * keep_probability
rand_dim = [x.shape[0]] + [1] * len(x.shape[1:])
sample_mask = keep_probability + torch.rand(rand_dim, dtype=x.dtype, device=x.device)
sample_mask.floor_() # binarize
x *= sample_mask
return x
def sample_stochastic_forward(self, x):
"""ResNet Bottleneck forward function where samples are randomly
dropped with probability ``drop_rate`` during training.
"""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
if self.drop_rate:
out = _sample_drop(out, self.drop_rate, self.training)
out += identity
return self.relu(out)
def make_resnet_bottleneck_stochastic(module: Bottleneck, module_index: int, module_count: int, drop_rate: float,
drop_distribution: str, stochastic_method: str):
"""Model surgery policy that dictates how to convert a ResNet Bottleneck layer into a stochastic version.
"""
if drop_distribution == 'linear':
drop_rate = ((module_index + 1) / module_count) * drop_rate
module.drop_rate = torch.tensor(drop_rate)
stochastic_func = block_stochastic_forward if stochastic_method == 'block' else sample_stochastic_forward
module.forward = stochastic_func.__get__(module) # Bind new forward function to ResNet Bottleneck Module
return module
class BlockStochasticModule(nn.Module):
"""A convenience class that stochastically executes the provided main path of a residual block.
Args:
main (GraphModule): Operators in the main (non-residual) path of a residual block.
residual (GraphModule | None): Operators, if any, in the residual path of a residual block.
drop_rate: The base probability of dropping this layer. Must be between 0.0 (inclusive) and 1.0 (inclusive).
Returns:
BlockStochasticModule: An instance of :class:`.BlockStochasticModule`.
"""
def __init__(self, main: GraphModule, residual: Optional[GraphModule] = None, drop_rate: float = 0.2):
super().__init__()
self.drop_rate = torch.tensor(drop_rate)
self.main = main
self.residual = residual
def forward(self, x):
sample = (not self.training) or bool(torch.bernoulli(1 - self.drop_rate))
# main side is the non-residual connection
residual_result = x
# residual side may or may not have any operations
if self.residual:
residual_result = self.residual(x)
if sample:
main_result = self.main(x)
if not self.training:
main_result = main_result * (1 - self.drop_rate)
residual_result = torch.add(main_result, residual_result)
return residual_result
| composer-dev | composer/algorithms/stochastic_depth/stochastic_layers.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Callable, Iterable, Type, TypeVar, cast
import torch
import torchvision.transforms.functional
from PIL.Image import Image as PillowImage
_InputImgT = TypeVar('_InputImgT', torch.Tensor, PillowImage)
_OutputImgT = TypeVar('_OutputImgT', torch.Tensor, PillowImage)
def image_as_type(image: _InputImgT, typ: Type[_OutputImgT]) -> _OutputImgT:
"""Converts between :class:`torch.Tensor` and :class:`PIL.Image.Image` image representations.
Args:
image (torch.Tensor | PIL.Image.Image): A single image
represented as a :class:`PIL.Image.Image` or
a rank 2 or rank 3 :class:`torch.Tensor` in ``HW`` or ``CHW`` format.
A rank 4 or higher tensor can also be provided as long as no type
conversion is needed; in this case, the input tensor will be
returned. This case is allowed so that functions that natively
operate on batch tensors can safely call
``image_as_type(image, torch.Tensor)`` without additional error
and type checking.
typ (torch.Tensor | PIL.Image.Image): Type of the
copied image. Must be :class:`PIL.Image.Image` or :class:`torch.Tensor`.
Returns:
A copy of ``image`` with type ``typ``.
Raises:
TypeError: if ``typ`` is not one of :class:`torch.Tensor` or
:class:`PIL.Image.Image`.
ValueError: if ``image`` cannot be converted to the ``typ``,
such as when requesting conversion of a rank 4 tensor to
:class:`PIL.Image.Image`.
"""
if isinstance(image, typ):
return image
if not typ in (torch.Tensor, PillowImage):
raise TypeError(f'Only typ={{torch.Tensor, Image}} is supported; got {typ}')
if typ is torch.Tensor:
return cast(_OutputImgT, torchvision.transforms.functional.to_tensor(image)) # PIL -> Tensor
return cast(_OutputImgT, torchvision.transforms.functional.to_pil_image(image)) # Tensor -> PIL
def map_pillow_function(f_pil: Callable[[PillowImage], PillowImage], imgs: _OutputImgT) -> _OutputImgT:
"""Lifts a function that requires pillow images to also work on tensors.
Args:
f_pil ((PIL.Image.Image) -> PIL.Image.Image): A callable that takes maps :class:`PIL.Image.Image` objects.
to other :class:`PIL.Image.Image` objects.
imgs (torch.Tensor | PIL.Image.Image): a :class:`PIL.Image.Image` or a :class:`torch.Tensor` in ``HW``,
``CHW`` or ``NCHW`` format.
Returns:
The result of applying ``f_pil`` to each image in ``imgs``, converted
back to the same type and (if applicable) tensor layout as ``imgs``.
"""
single_image_input = not isinstance(imgs, Iterable)
single_image_input |= isinstance(imgs, torch.Tensor) and imgs.ndim == 3
imgs_as_iterable = [imgs] if single_image_input else imgs
imgs_as_iterable = cast(type(imgs_as_iterable), imgs_as_iterable)
imgs_pil = [image_as_type(img, PillowImage) for img in imgs_as_iterable]
imgs_out_pil = [f_pil(img_pil) for img_pil in imgs_pil]
imgs_out = [image_as_type(img_pil, type(imgs_as_iterable[0])) for img_pil in imgs_out_pil]
if isinstance(imgs, torch.Tensor) and imgs.ndim == 4: # batch of imgs
imgs_out = [torch.unsqueeze(cast(torch.Tensor, img), 0) for img in imgs_out]
imgs_out = torch.cat(imgs_out, dim=0)
if single_image_input:
imgs_out = imgs_out[0]
imgs_out = cast(_OutputImgT, imgs_out)
return imgs_out
| composer-dev | composer/algorithms/utils/augmentation_common.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper utilities for algorithms."""
from composer.algorithms.utils.augmentation_primitives import augmentation_sets
__all__ = ['augmentation_sets']
| composer-dev | composer/algorithms/utils/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper functions to perform augmentations on a :class:`PIL.Image.Image`.
Augmentations that take an intensity value are normalized on a scale of 1-10,
where 10 is the strongest and maximum value an augmentation function will accept.
Adapted from
`AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty
<https://github.com/google-research/augmix/blob/master/augmentations.py>`_.
Attributes:
AugmentationFn ((PIL.Image.Image, float) -> PIL.Image.Image):
The type annotation for describing an augmentation function.
Each augmentation takes a :class:`PIL.Image.Image` and an intensity level in the range ``[0, 10]``,
and returns an augmented image.
augmentation_sets (Dict[str, List[AugmentationFn]]): The collection of all augmentations.
This dictionary has the following entries:
* ``augmentation_sets["safe"]`` contains augmentations that do not overlap with
ImageNet-C/CIFAR10-C test sets.
* ``augmentation_sets["original"]`` contains augmentations that use the original
implementations of enhancing color, contrast, brightness, and sharpness.
* ``augmentation_sets["all"]`` contains all augmentations.
"""
from typing import Callable
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
AugmentationFn = Callable[[Image.Image, float], Image.Image]
__all__ = [
'AugmentationFn',
'autocontrast',
'equalize',
'posterize',
'rotate',
'solarize',
'shear_x',
'shear_y',
'translate_x',
'translate_y',
'color',
'color_original',
'contrast',
'contrast_original',
'brightness',
'brightness_original',
'sharpness',
'sharpness_original',
'augmentation_sets',
]
def _int_parameter(level: float, maxval: float):
"""Helper function to scale a value between ``0`` and ``maxval`` and return as an int.
Args:
level (float): Level of the operation that will be between ``[0, 10]``.
maxval (float): Maximum value that the operation can have. This will be scaled to
``level/10``.
Returns:
int: The result from scaling ``maxval`` according to ``level``.
"""
return int(level * maxval / 10)
def _float_parameter(level: float, maxval: float):
"""Helper function to scale a value between ``0`` and ``maxval`` and return as a float.
Args:
level (float): Level of the operation that will be between [0, 10].
maxval (float): Maximum value that the operation can have. This will be scaled to
``level/10``.
Returns:
float: The result from scaling ``maxval`` according to ``level``.
"""
return float(level) * maxval / 10.
def _sample_level(n: float):
"""Helper function to sample from a uniform distribution between ``0.1`` and some value ``n``."""
return np.random.uniform(low=0.1, high=n)
def _symmetric_sample(level: float):
"""Helper function to sample from a symmetric distribution.
The distribution over the domain [0.1, 10] with ``median == 1`` and uniform probability of ``x | 0.1 β€ x β€ 1``,
and ``x | 1 β€ x β€ 10``.
Used for sampling transforms that can range from intensity 0 to infinity and for which an intensity
of 1 meaning no change.
"""
if np.random.uniform() > 0.5:
return np.random.uniform(1, level)
else:
return np.random.uniform(1 - (0.09 * level), 1)
def autocontrast(pil_img: Image.Image, level: float = 0.0):
"""Autocontrast an image.
.. seealso:: :func:`PIL.ImageOps.autocontrast`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity.
"""
del level # unused
return ImageOps.autocontrast(pil_img)
def equalize(pil_img: Image.Image, level: float):
"""Equalize an image.
.. seealso:: :func:`PIL.ImageOps.equalize`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity.
"""
del level # unused
return ImageOps.equalize(pil_img)
def posterize(pil_img: Image.Image, level: float):
"""Posterize an image.
.. seealso:: :func:`PIL.ImageOps.posterize`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _int_parameter(_sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img: Image.Image, level: float):
"""Rotate an image.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
degrees = _int_parameter(_sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img: Image.Image, level: float):
"""Solarize an image.
.. seealso:: :func:`PIL.ImageOps.solarize`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _int_parameter(_sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img: Image.Image, level: float):
"""Shear an image horizontally.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _float_parameter(_sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(pil_img.size, Image.AFFINE, (1, level, 0, 0, 1, 0), resample=Image.BILINEAR)
def shear_y(pil_img: Image.Image, level: float):
"""Shear an image vertically.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _float_parameter(_sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, level, 1, 0), resample=Image.BILINEAR)
def translate_x(pil_img: Image.Image, level: float):
"""Shear an image horizontally.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _int_parameter(_sample_level(level), pil_img.size[0] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, level, 0, 1, 0), resample=Image.BILINEAR)
def translate_y(pil_img: Image.Image, level: float):
"""Shear an image vertically.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _int_parameter(_sample_level(level), pil_img.size[1] / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, 0, 1, level), resample=Image.BILINEAR)
# The following augmentations overlap with corruptions in the ImageNet-C/CIFAR10-C test
# sets. Their original implementations also have an intensity sampling scheme that
# samples a value bounded by 0.118 at a minimum, and a maximum value of intensity*0.18+
# 0.1, which ranged from 0.28 (intensity = 1) to 1.9 (intensity 10). These augmentations
# have different effects depending on whether they are < 0 or > 0, so the original
# sampling scheme does not make sense to me. Accordingly, I replaced it with the
# _symmetric_sample() above.
def color(pil_img: Image.Image, level: float):
"""Enhance color on an image.
.. seealso:: :class:`PIL.ImageEnhance.Color`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _symmetric_sample(level)
return ImageEnhance.Color(pil_img).enhance(level)
def color_original(pil_img: Image.Image, level: float):
"""Enhance color on an image, following the
corruptions in the ImageNet-C/CIFAR10-C test sets.
.. seealso :class:`PIL.ImageEnhance.Color`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _float_parameter(_sample_level(level), 1.8) + 0.1
return ImageEnhance.Color(pil_img).enhance(level)
def contrast(pil_img: Image.Image, level: float):
"""Enhance contrast on an image.
.. seealso:: :class:`PIL.ImageEnhance.Contrast`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _symmetric_sample(level)
return ImageEnhance.Contrast(pil_img).enhance(level)
def contrast_original(pil_img: Image.Image, level: float):
"""Enhance contrast on an image, following the
corruptions in the ImageNet-C/CIFAR10-C test sets.
.. seealso:: :class:`PIL.ImageEnhance.Contrast`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _float_parameter(_sample_level(level), 1.8) + 0.1
return ImageEnhance.Contrast(pil_img).enhance(level)
def brightness(pil_img: Image.Image, level: float):
"""Enhance brightness on an image.
.. seealso:: :class:`PIL.ImageEnhance.Brightness`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should be
in ``[0, 10]``.
"""
level = _symmetric_sample(level)
# Reduce intensity of brightness increases
if level > 1:
level = level * .75
return ImageEnhance.Brightness(pil_img).enhance(level)
def brightness_original(pil_img: Image.Image, level: float):
"""Enhance brightness on an image, following the
corruptions in the ImageNet-C/CIFAR10-C test sets.
.. seealso:: :class:`PIL.ImageEnhance.Brightness`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _float_parameter(_sample_level(level), 1.8) + 0.1
return ImageEnhance.Brightness(pil_img).enhance(level)
def sharpness(pil_img: Image.Image, level: float):
"""Enhance sharpness on an image.
.. seealso:: :class:`PIL.ImageEnhance.Sharpness`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _symmetric_sample(level)
return ImageEnhance.Sharpness(pil_img).enhance(level)
def sharpness_original(pil_img: Image.Image, level: float):
"""Enhance sharpness on an image, following the
corruptions in the ImageNet-C/CIFAR10-C test sets.
.. seealso:: :class:`PIL.ImageEnhance.Sharpness`.
Args:
pil_img (PIL.Image.Image): The image.
level (float): The intensity, which should
be in ``[0, 10]``.
"""
level = _float_parameter(_sample_level(level), 1.8) + 0.1
return ImageEnhance.Sharpness(pil_img).enhance(level)
augmentation_sets = {
'all': [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y, translate_x, translate_y, color,
contrast, brightness, sharpness
],
# Augmentations that don't overlap with ImageNet-C/CIFAR10-C test sets
'safe': [autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y, translate_x, translate_y],
# Augmentations that use original implementations of color, contrast, brightness, and sharpness
'original': [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y, translate_x, translate_y, color_original,
contrast_original, brightness_original, sharpness_original
],
}
| composer-dev | composer/algorithms/utils/augmentation_primitives.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core Layer Freezing classes and functions."""
from __future__ import annotations
import logging
import textwrap
import warnings
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import torch
from torch.optim import Optimizer
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import ensure_tuple
log = logging.getLogger(__name__)
__all__ = ['LayerFreezing', 'freeze_layers']
def freeze_layers(
model: torch.nn.Module,
optimizers: Union[Optimizer, Sequence[Optimizer]],
current_duration: float,
freeze_start: float = 0.5,
freeze_level: float = 1.0,
) -> Tuple[int, float]:
"""Progressively freeze the layers of the network in-place
during training, starting with the earlier layers.
Example:
.. testcode::
from composer.algorithms.layer_freezing import freeze_layers
freeze_depth, feeze_level = freeze_layers(
model=model,
optimizers=optimizer,
current_duration=0.5,
freeze_start=0.0,
freeze_level=1.0
)
Args:
model (torch.nn.Module): The model being trained.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer]):
The optimizers used during training.
current_duration (float): The fraction, in ``[0, 1)`` of the training process complete.
freeze_start (float, optional): The fraction of the training process in ``[0, 1)`` to run
before freezing begins. Default: ``0.5``.
freeze_level (float, optional): The maximum fraction of layers on ``[0, 1)`` to freeze.
Default: ``1.0``.
Return:
(int, float): The number of layers frozen, and the percentage of the total model frozen.
"""
# Flatten out the layers
flat_children = []
_get_layers(model, flat_children)
# Determine how many layers to freeze
freeze_percentage = _freeze_schedule(current_duration=current_duration,
freeze_start=freeze_start,
freeze_level=freeze_level)
freeze_depth = int(freeze_percentage * len(flat_children[0:-1]))
# Freeze the parameters in the chosen layers
for i, child in enumerate(flat_children[0:-1]):
if i < freeze_depth:
for p in child.parameters():
_remove_param_from_optimizers(p, optimizers)
# Do not compute gradients for this param.
p.requires_grad = False
# Log results
log.info(
textwrap.dedent(f"""\
Applied Layer Freezing with freeze_start={freeze_start},
freeze_level={freeze_level}. Froze {freeze_depth} layers in the model which
equates to {freeze_percentage * 100}% of all layers."""))
return freeze_depth, freeze_percentage
class LayerFreezing(Algorithm):
"""Progressively freeze the layers of the network during training, starting with the earlier layers.
Freezing starts after the fraction of training specified by ``freeze_start``
has elapsed. The fraction of layers frozen increases linearly until it
reaches ``freeze_level`` at the end of training.
This freezing schedule is most similar to
`FreezeOut <https://arxiv.org/abs/1706.04983>`_ and
`Freeze Training <https://arxiv.org/abs/1706.05806>`_.
Runs on :attr:`.Event.EPOCH_END`.
Example:
.. testcode::
from composer.algorithms import LayerFreezing
from composer.trainer import Trainer
layer_freezing_algorithm = LayerFreezing(
freeze_start=0.0,
freeze_level=1.0
)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[layer_freezing_algorithm],
optimizers=[optimizer]
)
Args:
freeze_start (float): The fraction of training to run before freezing begins. Default: ``0.5``.
freeze_level (float): The maximum fraction of layers to freeze. Default: ``1.0``.
"""
def __init__(self, freeze_start: float = 0.5, freeze_level: float = 1.0):
self.freeze_start = freeze_start
self.freeze_level = freeze_level
@property
def find_unused_parameters(self) -> bool:
"""Override in order to tell DDP that some parameters will not have gradients computed for them after layer
freezing is applied."""
return True
def match(self, event: Event, state: State) -> bool:
del state # unused
return event == Event.EPOCH_END
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
del event # unused
optimizers = state.optimizers
assert optimizers is not None
elapsed_duration = state.get_elapsed_duration()
assert elapsed_duration is not None, 'elapsed duration should be set on Event.EPOCH_END'
freeze_depth, freeze_percentage = freeze_layers(
model=state.model,
optimizers=optimizers,
current_duration=float(elapsed_duration),
freeze_start=self.freeze_start,
freeze_level=self.freeze_level,
)
logger.log_metrics({
'layer_freezing/layers_frozen': freeze_depth,
'layer_freezing/percentage_frozen': freeze_percentage
})
def state_dict(self) -> Dict[str, Any]:
warnings.warn(('Checkpoints with layer freezing cannot reliably be used to resume training.'
'See: https://github.com/mosaicml/composer/issues/1002'))
return {}
def load_state_dict(self, state: Dict[str, Any]) -> None:
warnings.warn(('Checkpoints with layer freezing cannot reliably be used to resume training.'
'See: https://github.com/mosaicml/composer/issues/1002'))
def _freeze_schedule(current_duration: float, freeze_start: float, freeze_level: float) -> float:
"""Implements a linear schedule for freezing.
The schedule is linear and begins with no freezing and linearly
increases the fraction of layers frozen, reaching the fraction specified by ``freeze_level`` at the end of training.
The start of freezing is given as a fraction of the total training duration and is set with ``freeze_start``.
Args:
current_duration (float): The elapsed training duration.
freeze_start (float): The fraction of training to run before freezing begins.
freeze_level (float): The maximum fraction of levels to freeze.
"""
# No freezing if the current epoch is less than this
if current_duration <= freeze_start:
return 0.0
# `Calculate the total time for freezing to occur
total_freezing_time = 1.0 - freeze_start
# Calculate the amount of freezing time that has elapsed
freezing_time_elapsed = current_duration - freeze_start
# Calculate the fraction of the freezing time elapsed.
freezing_time_elapsed_frac = freezing_time_elapsed / total_freezing_time
# Scale this fraction by the amount of freezing to do.
return freeze_level * freezing_time_elapsed_frac
def _get_layers(module: torch.nn.Module, flat_children: List[torch.nn.Module]):
"""Helper function to get all submodules.
Does a depth first search to flatten out modules which
contain parameters.
Args:
module (torch.nn.Module): Current module to search.
flat_children (List[torch.nn.Module]): List containing modules.
"""
# Check if given module has no children and parameters.
if (len(list(module.children())) == 0 and len(list(module.parameters())) > 0):
flat_children.append(module)
else:
# Otherwise, continue the search over its children.
for child in module.children():
_get_layers(child, flat_children)
def _remove_param_from_optimizers(p: torch.nn.Parameter, optimizers: Union[Optimizer, Sequence[Optimizer]]):
"""Helper function to freeze the training of a parameter.
To freeze a parameter, it must be removed from the optimizer,
otherwise momentum and weight decay may still be applied.
Args:
p (torch.nn.Parameter): The parameter being frozen.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer]): The optimizers used during training.
"""
# Search over params in the optimizers to find and remove the
# given param. Necessary due to the way params are stored.
for optimizer in ensure_tuple(optimizers):
for group in optimizer.param_groups:
group['params'] = list(filter(lambda x: id(x) != id(p), group['params']))
| composer-dev | composer/algorithms/layer_freezing/layer_freezing.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Progressively freeze the layers of the network during training, starting with the earlier layers.
See the :doc:`Method Card </method_cards/layer_freezing>` for more details.
"""
from composer.algorithms.layer_freezing.layer_freezing import LayerFreezing as LayerFreezing
from composer.algorithms.layer_freezing.layer_freezing import freeze_layers as freeze_layers
__all__ = ['LayerFreezing', 'freeze_layers']
| composer-dev | composer/algorithms/layer_freezing/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Replaces all instances of `torch.nn.LayerNorm` with a `apex.normalization.fused_layer_norm.FusedLayerNorm
<https://nvidia.github.io/apex/layernorm.html>`_.
By fusing multiple kernel launches into one, this usually improves GPU utilization.
See the :doc:`Method Card </method_cards/fused_layernorm>` for more details.
"""
from composer.algorithms.fused_layernorm.fused_layernorm import FusedLayerNorm, apply_fused_layernorm
__all__ = ['FusedLayerNorm', 'apply_fused_layernorm']
| composer-dev | composer/algorithms/fused_layernorm/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
# Copyright 2022 MosaicML. All Rights Reserved.
from __future__ import annotations
import logging
import warnings
from typing import Dict, Optional, Sequence, Type, Union
import torch
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as APEXFusedLayerNorm
APEX_INSTALLED = True
except ImportError as e:
APEX_INSTALLED = False
from composer.algorithms.warnings import NoEffectWarning
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import module_surgery
log = logging.getLogger(__name__)
def check_if_apex_installed():
if not APEX_INSTALLED:
raise ImportError(
'https://github.com/NVIDIA/apex is not installed. The Fused LayerNorm algorithm cannot be applied. The MosaicML Docker Images (https://hub.docker.com/r/mosaicml/pytorch) contain a copy of APEX for easy use.'
)
def from_LayerNorm(layer: torch.nn.Module, module_index: int) -> APEXFusedLayerNorm:
"""Defines a replacement policy from a `torch.nn.LayerNorm` to a `apex.normalization.fused_layer_norm`"""
assert isinstance(layer,
torch.nn.LayerNorm), 'The replacement policy will look for all instances of torch.nn.LayerNorm'
return APEXFusedLayerNorm(normalized_shape=layer.normalized_shape, eps=layer.eps)
def apply_fused_layernorm(model: torch.nn.Module, optimizers: Union[torch.optim.Optimizer,
Sequence[torch.optim.Optimizer]]) -> None:
"""Replaces all instances of `torch.nn.LayerNorm` with a `apex.normalization.fused_layer_norm.FusedLayerNorm
<https://nvidia.github.io/apex/layernorm.html>`_.
By fusing multiple kernel launches into one, this usually improves GPU utilization.
"""
check_if_apex_installed()
# prepare the replacement policy and perform replacement
policy: Dict[Type[torch.nn.Module], module_surgery.ReplacementFunction] = {torch.nn.LayerNorm: from_LayerNorm}
replaced_instances = module_surgery.replace_module_classes(module=model, optimizers=optimizers, policies=policy)
if len(replaced_instances) == 0:
warnings.warn(
NoEffectWarning(
'No instances of `torch.nn.LayerNorm` were found, and therefore, there were no modules to replace.'))
log.info(f'Successfully replaced {len(replaced_instances)} of LayerNorm with a Fused LayerNorm.')
class FusedLayerNorm(Algorithm):
"""Replaces all instances of `torch.nn.LayerNorm` with a `apex.normalization.fused_layer_norm.FusedLayerNorm
<https://nvidia.github.io/apex/layernorm.html>`_.
By fusing multiple kernel launches into one, this usually improves GPU utilization.
Runs on ``Event.INIT``, so it can replace all instances of `torch.nn.LayerNorm` before the model is DDP wrapped. Has no hyperparameters.
Example:
.. testsetup::
from tests.common.models import configure_tiny_bert_hf_model
from tests.common.datasets import dummy_bert_lm_dataloader
def no_op(self, *args): pass
from composer.algorithms import FusedLayerNorm
FusedLayerNorm.__init__ = no_op
FusedLayerNorm.apply = no_op
model, train_dataloader = configure_tiny_bert_hf_model(), dummy_bert_lm_dataloader()
.. testcode::
from composer.algorithms import FusedLayerNorm
algorithm = FusedLayerNorm()
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(self):
# FusedLayerNorm takes no arguments
check_if_apex_installed()
def __repr__(self) -> str:
return f'{self.__class__.__name__}()'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
del state # unused
return event == Event.INIT
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
del event, logger # unused
apply_fused_layernorm(model=state.model, optimizers=state.optimizers)
| composer-dev | composer/algorithms/fused_layernorm/fused_layernorm.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import abc
import math
from typing import Optional, Tuple, Union, cast
import numpy as np
import torch
from torch import nn
from torch.nn.common_types import _size_2_t
from composer.algorithms.factorize.factorize_core import LowRankSolution, factorize_conv2d, factorize_matrix
def _clean_latent_size(latent_size: Union[int, float], in_size: int, out_size: int) -> int:
if latent_size < 1: # fraction of input or output channels
latent_channels = int(latent_size * min(in_size, out_size))
return max(1, latent_channels)
return int(latent_size)
def _max_rank_with_possible_speedup(in_channels: int,
out_channels: int,
kernel_size: Optional[_size_2_t] = None) -> int:
# TODO less naive cost model than counting multiply-adds
fan_in = in_channels
if kernel_size is not None:
fan_in *= np.prod(kernel_size)
breakeven = (fan_in * out_channels) / (fan_in + out_channels)
return int(math.ceil(breakeven - 1)) # round down, or 1 lower if divides evenly
def factorizing_could_speedup(module: torch.nn.Module, latent_size: Union[int, float]):
"""Whether factorizing a module a given amount could possibly yield a benefit.
This computation is based on the number of multiply-add operations involved
in the module's current forward pass versus the number that would be involved
if it were factorized into two modules using the specified latent size. The
operations are assumed to be dense and of the same data type in all cases.
Note that this function returning true does not guarantee a wall-clock
speedup, since splitting one operation into two involves more data movement
and more per-op overhead.
Args:
module (torch.nn.Module): A :class:`torch.nn.Conv2d`, :class:`torch.nn.Linear`,
:class:`.FactorizedConv2d`, or :class:`.FactorizedLinear`.
latent_size (int | float): number of channels (for convolution) or
features (for linear) in the latent representation. Can be
specified as either an integer > 1 or as float within ``[0, 1)``.
In the latter case, the value is interpreted as a fraction of
``min(in_features, out_features)`` for a linear module or
``min(in_channels, out_channels)`` for a convolution.
Returns:
bool: A ``bool`` indicating whether the provided amount of factorization
could accelerate the provided module. If ``module`` is not one of
the allowed types, always returns ``False``, since there is no
supported way to factorize that module.
"""
if isinstance(module, _FactorizedModule):
return module.should_factorize(latent_size)
elif isinstance(module, torch.nn.Conv2d):
if module.groups > 1:
return False # can't factorize grouped convolutions yet
latent_size = _clean_latent_size(latent_size, module.in_channels, module.out_channels)
max_rank = _max_rank_with_possible_speedup(module.in_channels,
module.out_channels,
kernel_size=cast(_size_2_t, module.kernel_size))
return latent_size <= max_rank
elif isinstance(module, torch.nn.Linear):
latent_size = _clean_latent_size(latent_size, module.in_features, module.out_features)
max_rank = _max_rank_with_possible_speedup(module.in_features, module.out_features)
return latent_size <= max_rank
else:
return False
def _apply_solution_to_module_parameters(solution: LowRankSolution, module0: torch.nn.Module, module1: torch.nn.Module,
transpose: bool) -> None:
error_msg = "Can't apply unititalized solution!"
assert solution.bias is not None, error_msg
assert solution.Wa is not None, error_msg
assert solution.Wb is not None, error_msg
with torch.no_grad():
# first op always has no bias since adds no expressivity
if module0.bias is not None:
assert isinstance(module0.bias, torch.Tensor)
module0.bias = torch.nn.parameter.Parameter(
torch.zeros(solution.rank, dtype=module0.bias.dtype).to(device=module0.bias.device)) # type: ignore
assert isinstance(module1.bias, torch.Tensor)
module1.bias.copy_(solution.bias)
Wa = solution.Wa
Wb = solution.Wb
if transpose:
Wa = torch.transpose(Wa, 0, 1)
Wb = torch.transpose(Wb, 0, 1)
module0.weight = torch.nn.parameter.Parameter(Wa.to(device=module0.weight.device)) # type: ignore
module1.weight = torch.nn.parameter.Parameter(Wb.to(device=module1.weight.device)) # type: ignore
class _FactorizedModule(nn.Module, abc.ABC):
def __init__(self, in_size: int, out_size: int, latent_size: Union[int, float], kernel_size: _size_2_t = 1):
super().__init__()
self.in_size = in_size
self.out_size = out_size
self.latent_size = _clean_latent_size(latent_size, in_size, out_size)
self.kernel_size = kernel_size
def _check_child_modules_present(self):
assert hasattr(self, 'module0'), 'module0 must be set during child class __init__!'
assert hasattr(self, 'module1'), 'module1 must be set during child class __init__!'
assert isinstance(self.module0, torch.nn.Module)
assert isinstance(self.module1, torch.nn.Module)
def forward(self, input: torch.Tensor): # type: ignore reportIncompatibleMethodOverride
self._check_child_modules_present()
ret = self.module0(input) # type: ignore reportGeneralTypeIssues
if self.module1 is not None:
ret = self.module1(ret) # type: ignore reportGeneralTypeIssues
return ret
def reset_parameters(self):
self._check_child_modules_present()
cast(torch.nn.Module, self.module0).reset_parameters() # type: ignore reportGeneralTypeIssues
cast(torch.nn.Module, self.module1).reset_parameters() # type: ignore reportGeneralTypeIssues
def set_rank(self, input: torch.Tensor, rank: int) -> None:
"""Makes the module factorize using a ``rank``-dimensional latent representation.
``rank`` can be large enough that the factorization increases the
number of multiply-add operations, but not larger than the current
latent rank.
Args:
input (torch.Tensor): Tensor that can be passed to the model's `forward()` method.
rank (int): Dimensionality of the latent representation; this is the
size of the vector space when factorizing linear modules and
the number of channels for convolutional modules.
Raises:
ValueError:
If ``rank`` is larger than the current latent rank.
"""
if rank > self.latent_size:
raise ValueError(f'Requested rank {rank} exceeds current rank {self.latent_size}')
if rank == self.latent_size:
return
soln = self.solution_for_rank(input, rank)
self.apply_solution(soln)
def _clean_latent_size(self, latent_size: Union[int, float]):
return _clean_latent_size(latent_size, self.in_size, self.out_size)
def _max_rank_with_speedup(self):
if hasattr(self, 'module1') and self.module1 is not None:
# already factorized, so reducing rank at all helps
return self.latent_size - 1
else:
# not factorized yet; has to factorize enough to be worthwhile
return _max_rank_with_possible_speedup(self.in_size, self.out_size, kernel_size=self.kernel_size)
def should_factorize(self, proposed_rank: Union[int, float]) -> bool:
"""Whether factorizing with a given rank would reduce the number of multiply-add operations."""
proposed_rank = self._clean_latent_size(proposed_rank)
return proposed_rank <= self._max_rank_with_speedup()
@abc.abstractmethod
def _create_child_modules(self) -> Tuple[torch.nn.Module, torch.nn.Module]:
"""This is used to populate the self.module0 and self.module1 attributes; it's not part of __init__ because the
logic to initialize them is subclass-specific and might depend on the shared logic in __init__"""
...
@abc.abstractmethod
def solution_for_rank(self, input: torch.Tensor, rank: int) -> LowRankSolution:
"""Returns a solution that :meth:`.apply_solution` can use to update the module's level of factorization.
This is seperate from :meth:`set_rank` so that one can generate and assess
many possible solutions for a given module before choosing one.
Args:
input (torch.Tensor): An input to the module used to optimize the solution's
weights. The optimization seeks to preserve the module's
input-output mapping as much as possible, subject to the
specified rank constraint.
rank (int): The number of dimensions in the latent space into which
the input is mapped.
Returns:
solution:
An object encapsulating the new parameters to be used and their
associated mean squared error on the input
"""
...
@abc.abstractmethod
def apply_solution(self, solution: LowRankSolution) -> None:
"""Updates module's child modules to reflect the factorization solution.
This *always* applies the solution and doesn't check whether
using the solution is worthwhile.
Args:
solution (LowRankSolution): An object encapsulating the new
parameters to be used and their associated mean squared error on
the input for which they were optimized. Can be obtained using
:meth:`.solution_for_rank`.
"""
...
class FactorizedConv2d(_FactorizedModule):
"""Factorized replacement for :class:`torch.nn.Conv2d`.
Splits the conv2d operation into two smaller conv2d operations, which
are executed sequentially with no nonlinearity in between. This first
conv2d can be thought of as projecting the feature maps into a
lower-dimensional space, similar to PCA. The second produces outputs
of the same shape as the unfactorized version based on the embeddings
within this lower-dimensional space. Note that "dimensionality" here
refers to the number of channels, not the spatial extent or tensor rank.
The first conv2d has a kernel size of ``kernel_size``, while the second
one always has a kernel size of :math:`1 \\times 1`. For large kernel sizes, the
lower-dimensional space can be nearly as large as
``min(in_channels, out_channels)`` and still yield a reduction in
multiply-add operations. For kernels sizes of :math:`1 \\times 1`,
the break-even point is a 2x reduction in channel count, similar to
:class:`.FactorizedLinear`.
See :func:`.factorize_conv2d` for more details.
Args:
in_channels (int): number of channels in the input image.
out_channels (int): number of channels produced by the convolution.
kernel_size (int | tuple): size of the convolving kernel.
latent_channels (int | float, optional): number of channels in the
latent representation produced by the first small convolution.
Can be specified as either an integer > 1 or as float within
``[0, 1)``. In the latter case, the value is interpreted as a fraction
of ``min(in_features, out_features)`` for each linear module and
is converted to the equivalent integer value, with a minimum of 1.
Default: ``.25``.
**kwargs: other arguments to :class:`torch.nn.Conv2d` are supported
and will be used with the first of the two smaller ``Conv2d``
operations. However, ``groups > 1`` and ``dilation > 1`` are
not currently supported.
Raises:
ValueError:
If ``latent_channels`` is not small enough for factorization
to reduce the number of multiply-add operations. In this regime,
factorization is both slower and less expressive than a
non-factorized operation. Setting
``latent_features`` to :meth:`.max_allowed_latent_channels`
or a smaller value is sufficient to avoid this.
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
latent_channels: Union[int, float] = .25,
**kwargs):
super().__init__(in_size=in_channels,
out_size=out_channels,
latent_size=latent_channels,
kernel_size=kernel_size)
if kwargs.get('groups', 1) > 1:
raise NotImplementedError('Factorizing grouped convolutions is not supported.')
self.kwargs = kwargs
# conv2d factorization code requires most Conv2d arguments, but
# not boolean 'bias'
self.convolution_kwargs = {k: v for k, v in kwargs.items() if k != 'bias'}
self.module0, self.module1 = self._create_child_modules()
def _create_child_modules(self) -> Tuple[torch.nn.Module, torch.nn.Module]:
if not self.should_factorize(self.latent_channels):
raise ValueError(
f'latent_channels {self.latent_size} is not small enough to merit factorization! Must be <= {self._max_rank_with_speedup()}'
)
# this one produces identical output as a regular Conv2d would,
# except with fewer output channels
conv0 = nn.Conv2d(self.in_channels,
self.latent_channels,
self.kernel_size,
bias=False,
**self.convolution_kwargs)
# this one increases the number of output channels
conv1 = nn.Conv2d(self.latent_channels, self.out_channels, kernel_size=1, bias=True)
return conv0, conv1
# wrap shared fields in read-only properties matching the torch conv module API
@property
def in_channels(self) -> int:
"""See :class:`torch.nn.Conv2d`."""
return self.in_size
@property
def out_channels(self) -> int:
"""See :class:`torch.nn.Conv2d`."""
return self.out_size
@property
def latent_channels(self) -> int:
"""The number of of output channels for the first convolution,
which is also the number of input channels for the second convolution."""
return self.latent_size
def solution_for_rank(self, input: torch.Tensor, rank: int) -> LowRankSolution:
weight0 = self.module0.weight
bias0 = self.module0.bias
weight1, bias1 = self.module1.weight, self.module1.bias
assert (bias0 is None) or isinstance(bias0, torch.Tensor)
assert isinstance(bias1, torch.Tensor)
assert isinstance(weight0, torch.Tensor)
assert isinstance(weight1, torch.Tensor)
return factorize_conv2d(input, weight0, weight1, rank=rank, biasA=bias0, biasB=bias1, **self.convolution_kwargs)
def apply_solution(self, solution: LowRankSolution):
self.latent_size = solution.rank
self.module0.out_channels = solution.rank
self.module1.in_channels = solution.rank
_apply_solution_to_module_parameters(solution, self.module0, self.module1, transpose=False)
@staticmethod
def max_allowed_latent_features(in_features: int, out_features: int, kernel_size: _size_2_t) -> int:
"""Returns the largest latent channel count that reduces the number of multiply-adds.
Args:
in_channels (int): number of channels in the input image
out_channels (int): number of channels produced by the convolution
kernel_size (int | tuple): size of the convolving kernel
Returns:
latent_channels: the largest allowable number of latent channels
"""
return _max_rank_with_possible_speedup(in_features, out_features, kernel_size=kernel_size)
@staticmethod
def from_conv2d(module: torch.nn.Conv2d, module_ix: int = -1, **kwargs) -> FactorizedConv2d:
conv = FactorizedConv2d(
in_channels=module.in_channels,
out_channels=module.out_channels,
kernel_size=cast(_size_2_t, module.kernel_size),
stride=module.stride,
padding=module.padding,
dilation=module.dilation,
groups=module.groups,
bias=((module.bias is not None) and (module.bias is not False)),
**kwargs # custom params
)
conv.reset_parameters()
return conv
class FactorizedLinear(_FactorizedModule):
"""Factorized replacement for :class:`torch.nn.Linear`.
Splits the linear operation into two smaller linear operations which
are executed sequentially with no nonlinearity in between. This first
linear operation can be thought of as projecting the inputs into a
lower-dimensional space, similar to PCA. The second produces outputs
of the same shape as the unfactorized version based on the embeddings
within this lower-dimensional space.
If the lower-dimensional space is less than half the size of the
smaller of the input and output dimensionality, this factorization
can reduce the number of multiply-adds necessary to compute the output.
However, because larger matrix products tend to utilize the hardware
better, it may take a reduction of more than 2x to get a speedup
in practice.
See :func:`.factorize_matrix` for more details.
Args:
in_features (int): Size of each input sample
out_features (int): size of each output sample
bias (bool, optional): If set to False, the layer will not learn an additive bias.
Default: ``True``.
latent_features (int | float, optional): Size of the latent space.
Can be specified as either an integer > 1 or as a float within
``[0, 0.5)``. In the latter case, the value is interpreted as a fraction
of ``min(in_features, out_features)``, and is converted to the
equivalent integer value, with a minimum of 1. Default: ``.25``.
Raises:
ValueError:
If ``latent_features`` is not small enough for factorization
to reduce the number of multiply-add operations. In this regime,
factorization is both slower and less expressive than a
non-factorized operation. Setting
``latent_features < min(in_features, out_features) / 2`` or
using :meth:`.max_allowed_latent_features` is sufficient to avoid
this.
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
latent_features: Union[int, float] = .25):
super().__init__(in_size=in_features, out_size=out_features, latent_size=latent_features)
self.bias = bias
self.module0, self.module1 = self._create_child_modules()
def _create_child_modules(self) -> Tuple[torch.nn.Module, torch.nn.Module]:
if not self.should_factorize(self.latent_size):
raise ValueError(
f'latent_features {self.latent_size} is not small enough to merit factorization! Must be <= {self._max_rank_with_speedup()}'
)
module0 = nn.Linear(in_features=self.in_features, out_features=self.latent_size, bias=False)
module1 = nn.Linear(in_features=self.latent_size, out_features=self.out_features, bias=self.bias)
return module0, module1
# wrap shared fields in read-only properties matching the torch conv module API
@property
def in_features(self) -> int:
"""See :class:`torch.nn.Linear`."""
return self.in_size
@property
def out_features(self) -> int:
"""See :class:`torch.nn.Linear`."""
return self.out_size
@property
def latent_features(self) -> int:
"""The dimensionality of the space into which the input is
projected by the first matrix in the factorization."""
return self.latent_size
def solution_for_rank(self, input: torch.Tensor, rank: int) -> LowRankSolution:
assert isinstance(self.module0.weight, torch.Tensor)
assert isinstance(self.module1.weight, torch.Tensor)
assert isinstance(self.module1.bias, torch.Tensor)
weight0 = torch.transpose(self.module0.weight, 0, 1)
weight1 = torch.transpose(self.module1.weight, 0, 1)
bias1 = self.module1.bias
target = self(input)
return factorize_matrix(input, target, weight0, weight1, bias=bias1, rank=rank)
def apply_solution(self, solution: LowRankSolution) -> None:
self.latent_size = solution.rank
self.module0.out_features = solution.rank
self.module1.in_features = solution.rank
_apply_solution_to_module_parameters(solution, self.module0, self.module1, transpose=True)
@staticmethod
def max_allowed_latent_channels(in_features: int, out_features: int) -> int:
"""Returns the largest latent feature count that reduces the number of multiply-adds.
Args:
in_features (int): Size of each input sample.
out_features (int): Size of each output sample.
Returns:
int: The largest allowable number of latent features.
"""
return _max_rank_with_possible_speedup(in_features, out_features)
@staticmethod
def from_linear(module: torch.nn.Linear, module_ix: int = -1, **kwargs) -> FactorizedLinear:
ret = FactorizedLinear(in_features=module.in_features,
out_features=module.out_features,
bias=((module.bias is not None) and (module.bias is not False)),
**kwargs)
ret.reset_parameters()
return ret
| composer-dev | composer/algorithms/factorize/factorize_modules.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Decomposes linear operators into pairs of smaller linear operators.
See :class:`.Factorize` or the :doc:`Method Card </method_cards/factorize>` for details.
"""
from composer.algorithms.factorize.factorize import Factorize as Factorize
from composer.algorithms.factorize.factorize import apply_factorization as apply_factorization
from composer.algorithms.factorize.factorize_core import LowRankSolution as LowRankSolution
from composer.algorithms.factorize.factorize_core import factorize_conv2d as factorize_conv2d
from composer.algorithms.factorize.factorize_core import factorize_matrix as factorize_matrix
from composer.algorithms.factorize.factorize_modules import FactorizedConv2d as FactorizedConv2d
from composer.algorithms.factorize.factorize_modules import FactorizedLinear as FactorizedLinear
from composer.algorithms.factorize.factorize_modules import factorizing_could_speedup as factorizing_could_speedup
__all__ = [
'Factorize',
'apply_factorization',
'LowRankSolution',
'factorize_conv2d',
'factorize_matrix',
'FactorizedConv2d',
'FactorizedLinear',
'factorizing_could_speedup',
]
| composer-dev | composer/algorithms/factorize/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
import dataclasses
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
@dataclasses.dataclass
class LowRankSolution:
"""Bundles tensors used by a factorized linear operator.
The factorization always splits the operator into two smaller linear
operators. The first takes in input of the original shape and embeds it
in a lower-dimensional space. The second maps this lower-dimensional space
to the original output space.
Args:
Wa (torch.Tensor, optional): First linear operation in the
factorized approximation. For a
factorized linear operation, ``Wa`` is a matrix. For a factorized
convolution, ``Wa`` matches the shape of the convolution's
original weight parameter, except along the channel axis.
Wb (torch.Tensor, optional): Second linear operation in the
factorized approximation. Shape
is such that composing ``Wb`` with ``Wb`` yields an output of
the same size as the original operation.
bias (torch.Tensor, optional): Vector added to the output of
the second linear operation.
rank (int, optional): Output dimensionality (channels or features) of
the first linear operation, and input dimensionality of the second
input operation. Default: ``-1``.
nmse (float, optional): Normalized mean squared error obtained during
the optimization procedure used to derive ``Wa``, ``Wb``, and
``bias``. This is equal to the raw mean squared error between
the factorized approximation's output and the original output,
divided by the variance of the original output. A value of 0
means no error was introduced, and a value of 1 corresponds to
capturing the output no better than chance. Default: ``0.0``.
"""
Wa: Optional[torch.Tensor] = None
Wb: Optional[torch.Tensor] = None
bias: Optional[torch.Tensor] = None
rank: int = -1
nmse: float = 0
def _lstsq(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
if A.shape[0] != B.shape[0]:
raise RuntimeError(f'A has different number of rows than B! A.shape = {A.shape}, B.shape = {B.shape}')
if A.ndim != 2:
raise RuntimeError('A is not a rank 2 tensor: has shape', A.shape)
if B.ndim != 2:
raise RuntimeError('B is not a rank 2 tensor: has shape', A.shape)
# TODO more intelligence regarding choice of lstsq `driver` arg
return torch.linalg.lstsq(A, B).solution
def _nmse(Y: torch.Tensor, Y_hat: torch.Tensor) -> float:
diffs = Y.detach() - Y_hat.detach()
return float((diffs * diffs).mean() / Y.var())
def _svd_initialize(Wa: torch.Tensor, Wb: Optional[torch.Tensor], k: int) -> Tuple[torch.Tensor, torch.Tensor]:
if Wb is None:
W = Wa
else:
W = Wa @ Wb
# TODO rank k randomized svd if k small enough
U, s, Vt = torch.linalg.svd(W, full_matrices=False)
Wa = U[:, :k]
Wb = Vt[:k]
# scale matrices equally for numerical "load-balancing"
s_sqrt = torch.sqrt(s[:k]) # s is already a vector, not mat
Wa *= s_sqrt
Wb *= s_sqrt.reshape(-1, 1)
return Wa, Wb
def factorize_matrix(X: torch.Tensor,
Y: torch.Tensor,
Wa: torch.Tensor,
Wb: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
rank: Union[int, float] = .25,
n_iters: int = 3) -> LowRankSolution:
"""Approximates a matrix by factorizing it into a product of two smaller matrices.
Given a matrix ``W`` of shape ``[D, M]``, a bias vector of length ``M``,
and a target rank ``rank < D``, returns a solution ``(Wa, Wb, new_bias)`` of
tensors of shapes ``[N, rank]``, ``[rank, D]``, and ``M``, respectively.
These tensors are chosen so as to minimize:
:math:`||` ``Y - (X @ Wa @ Wb + new_bias)`` :math:`||_F`,
where ``Y = X @ W + bias``, ``@`` denotes matrix multiplication,
``new_bias`` broadcasts along the row dimension,
and :math:`||\\cdot||_F` denotes the sum of squared elements.
In the case that rows of ``X`` correspond to samples
from some distribution, this amounts to minimizing the expected mean
squared error in the output.
The input matrix can either be a single matrix ``W`` or a pair of matrices
``(Wa, Wb)``. The latter case corresponds to using a matrix ``W = Wa @ Wb``
that has already been factorized and is supported in order to facilitate
progressively decreasing the rank of the matrix.
Args:
X (torch.Tensor): Input used to evaluate the quality of the approximation.
Shape is ``[N, D]``, where ``N`` is often the number of input samples and
``D`` is the dimensionality of each sample.
Y (torch.Tensor): Output of applying the original matrix to ``X``.
Must have shape ``[N, M]`` for some ``M``.
Wa (torch.Tensor): Either the matrix to be factorized, or the first of the two smaller
matrices in the already-factorized representation of this matrix.
Must be of shape ``[D, M]`` in the former case and shape ``[D, d]``
in the latter, for some ``d < D``.
Wb (torch.Tensor, optional): If present, ``Wa`` is interpreted
as the first of two smaller matrices, and ``Wb`` is taken to be the second.
Must be of shape ``[d, M]``.
bias (torch.Tensor, optional): A vector added to the output after
performing the matrix product with X.
rank (int | float, optional): the number of columns in the latent representation of X.
Default: ``.25``.
n_iters (int, optional): number of iterations used in the optimization process. Higher
numbers yield lower mean squared error, though there are usually
diminishing returns after a handful of iterations. Default: ``3``.
Returns:
LowRankSolution: A solution of rank ``rank`` that approximates the original convolution operation.
"""
X = X.detach()
Y = Y.detach()
Wa = Wa.detach()
Wb = Wb.detach() if Wb is not None else None
if rank < 1:
# fraction of input dimensionality (or current rank, if smaller)
rank = min(int(rank * X.shape[1]), Wa.shape[1])
k = int(rank)
solution = LowRankSolution()
original_bias = None
if bias is not None:
original_bias = bias.detach()
Y = Y - original_bias
solution.bias = original_bias
# if requested latent rank is greater than or equal to either
# input rank or output rank, factorization is counterproductive, so
# return a single matrix
if k >= X.shape[1] or k >= Y.shape[1]:
Wa = _lstsq(X, Y)
solution.Wa = Wa
solution.rank = -1
return solution
# if requested latent rank is greater than current latent rank,
# skip the factorization
if k >= Wa.shape[1]:
solution.Wa = Wa
solution.Wb = Wb
solution.rank = -1
return solution
Wa, Wb = _svd_initialize(Wa, Wb, k)
Ya = _lstsq(X, Y)
for _ in range(n_iters):
# update Wb
Xb = X @ Wa
Yb = Y
Wb = _lstsq(Xb, Yb)
# update Wa
# We need to solve (AXB = Y) <=> (AX = B.I @ Y) not (AX = BY).
# Since X and Y are constants, we can precompute pinv(A) @ Y.
# We then have:
# pinv(A) @ A @ X @ B = pinv(A) @ Y
# (A.T@A).I @ A.T @ A @ X @ B = pinv(A) @ Y
# X @ B = pinv(A) @ Y
# Y.T @ pinv(A).T = B.T @ X.T
# then we just solve for X.T:
# B.T @ X.T = Y.T @ pinv(A).T
# also, note that pinv(A) @ Y = lstsq(A, Y); this makes sense;
# means that targets for XB are the optimal coeffs mapping A to Y
# also, observe that AXB = Y is using X and Y as variable to solve
# for and targets, not the X and Y vars we have in this function
Xa = Wb
Wa_T = _lstsq(Xa.T, Ya.T)
Wa = Wa_T.T
solution.Wa = Wa
solution.Wb = Wb
solution.rank = k
Y_hat = (X @ Wa) @ Wb
bias = (Y - Y_hat).mean(dim=0)
if original_bias is not None:
bias += original_bias
solution.bias = bias
Y_hat += bias
solution.nmse = _nmse(Y, Y_hat)
return solution
def _activations_conv2d_to_mat(activations,
kernel_size,
padding=0,
padding_mode='zeros',
stride=1,
dilation=1,
groups=1):
if np.max(stride) > 1:
raise NotImplementedError(f'Stride != 1 not implemented; got {stride}')
if np.max(dilation) > 1:
raise NotImplementedError(f'Dilation != 1 not implemented; got {dilation}')
if groups != 1:
raise NotImplementedError(f'Groups != 1 not implemented; got {groups}')
if np.max(padding) > 0 and padding_mode.lower() != 'zeros':
if not isinstance(padding, list):
padding = [padding]
activations = F.pad(activations, pad=padding, mode=padding_mode)
padding = 0
# always default to stride=1 to maximize amount of data we get here
# TODO downsample in batch size dim or use stride > 1 if it looks like
# materializing full matrix will OOM
ret = F.unfold(activations, kernel_size=kernel_size, padding=padding)
ret = ret.transpose(1, 2) # batch_sz, n_positions, fan_in
return ret.reshape(-1, ret.shape[2]) # batch_sz * n_positions, fan_in
def _weights_conv2d_to_mat(weights: torch.Tensor):
return weights.reshape(weights.shape[0], -1).T # fan_in, out_channels
def _mat_to_weights_conv2d(mat: Optional[torch.Tensor], kernel_size) -> Optional[torch.Tensor]:
if mat is None:
return None
w = mat.T # fan_in, out_channels -> out_channels, fan_in
# XXX(nchw) This might silently do the wrong thing with nhwc layout
return w.reshape(w.shape[0], -1, *kernel_size)
def factorize_conv2d(X: torch.Tensor,
Wa: torch.Tensor,
Wb: Optional[torch.Tensor] = None,
rank: Union[int, float] = .25,
biasA: Optional[torch.Tensor] = None,
biasB: Optional[torch.Tensor] = None,
n_iters=3,
**conv2d_kwargs) -> LowRankSolution:
"""Approximates a :math:`K \\times K` convolution by factorizing it into a
:math:`K \\times K` convolution with fewer channels followed by a
:math:`1 \\times 1` convolution.
Given a convolutional weight tensor ``W`` for a 2d convolution of shape
``[out_channels, in_channels, k_h, k_w]`` and a vector ``bias`` of length
``out_channels``, returns a triple ``(Wa, Wb, new_bias)`` of
tensors with shapes ``[rank, in_channels, k_h, k_w]``,
``[out_channels, rank, 1, 1]``, and ``[out_channels]``, respectively.
``Wa``, ``Wb``, and ``new_bias`` are chosen so as to minimize:
:math:`||` ``(W * X + bias) - (Wb * (Wa * X) + new_bias)`` :math:`||_F`,
where :math:`*` denotes convolution, ``bias`` broadcasts along all
non-channel dimensions, and :math:`||\\cdot||_F` denotes the sum of
squared elements.
Similar to :func:`.factorize_matrix`, this function allows passing in an
already-factorized weight tensor in order to enable progressive
factorization. In this case, the single tensor ``W`` is replaced with
a similar ``(Wa, Wb)`` pair as the output, though not necessarily with
the same rank.
Args:
X (torch.Tensor): A tensor of shape ``[N, in_channels, H, W]``, for some
``N``, ``H``, and ``W``.
Wa (torch.Tensor): The first weight tensor to convolve with ``X``. If
``Wb`` is not provided, must be of shape
``[out_channels, in_channels, k_h, k_w]``. Otherwise, must be of
shape ``[original_rank, in_channels, k_h, k_w]`` for some
``original_rank < min(in_channels, out_channels)``.
Wb (torch.Tensor, optional): The second weight tensor to convolve.
with the input. If provided, must be of shape ``[out_channels, original_rank, 1, 1]``.
rank (int | float, optional): number of channels in the latent representation of ``X``.
Default: ``.25``.
biasA (torch.Tensor, optional): Optional vector of biases. If ``Wb`` is
``None``, must have length ``out_channels``. Otherwise must have length
``original_rank``.
biasB (torch.Tensor, optional): If provided, must have length ``out_channels``.
n_iters (int, optional): number of iterations used in the optimization process.
Higher numbers yield lower mean squared error, though there are usually
diminishing returns after a handful of iterations. Default: ``3``.
**conv2d_kwargs: Arguments such as ``padding``, ``stride``,
``dilation``, ``groups``, etc used in the original convolution. If
these are not provided, the factorized tensors might not preserve
the function computed by the original weight tensor as well.
Note that not all combinations of arguments are supported.
Returns:
LowRankSolution: A solution of rank ``rank`` that approximates the original convolution operation.
Raises:
RuntimeError:
If ``biasB`` is provided but not ``Wb`` is not.
NotImplementedError:
if ``conv2d_kwargs['dilation'] != 1`` or ``conv2d_kwargs['groups'] != 1``.
"""
X = X.detach()
Wa = Wa.detach()
kernel_size = Wa.shape[2:]
X_mat = _activations_conv2d_to_mat(X, kernel_size=kernel_size, **conv2d_kwargs)
Wa = _weights_conv2d_to_mat(Wa)
# NOTE: we compute outputs ourselves, instead of having an arg for them,
# since 1) we ignore input stride, and 2) any other intermediate ops
# or other discrepancies between user's actual settings and args they pass
# would either cause errors or silently mess up the regression
Y_mat = (X_mat @ Wa)
if biasA is not None:
biasA = biasA.detach()
Y_mat += biasA
if Wb is not None:
Wb = Wb.detach()
Wb = _weights_conv2d_to_mat(Wb)
Y_mat = Y_mat @ Wb
if biasB is not None:
biasB = biasB.detach()
Y_mat += biasB
elif biasB is not None:
# fail fast if user passes in inconsistent combination of args
raise RuntimeError('Got biasB, but Wb=None; cannot apply bias')
ret = factorize_matrix(X_mat, Y_mat, Wa, Wb, rank=rank, n_iters=n_iters)
# now we need to convert from two matrices to one kxk conv kernel and one
# 1x1 conv kernel. Here's why the 2nd has to be a 1x1: if it were instead
# k'xk' for some k' > 1, we would either be doing k'^2 as much work
# for fixed embedding size at each pixel, or we'd be need to have the
# intermediate embeddings be 1/k'^2 as large. In the latter case, we'd
# lose a lot of representational capacity. Also, the first op has to match
# the kernel size of the original conv or the shapes don't work out.
assert ret.Wa is not None
ret.Wa = _mat_to_weights_conv2d(ret.Wa, kernel_size=kernel_size)
ret.Wb = _mat_to_weights_conv2d(ret.Wb, kernel_size=(1, 1))
return ret
| composer-dev | composer/algorithms/factorize/factorize_core.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import logging
from typing import Optional, Sequence, Type, Union, cast
import torch
from torch.optim import Optimizer
from composer.algorithms.factorize.factorize_modules import (FactorizedConv2d, FactorizedLinear,
factorizing_could_speedup)
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.utils import module_surgery
log = logging.getLogger(__name__)
LOG_NUM_CONV2D_REPLACEMENTS_KEY = 'factorize/num_conv2d_replacements'
LOG_NUM_LINEAR_REPLACEMENTS_KEY = 'factorize/num_linear_replacements'
def apply_factorization(model: torch.nn.Module,
factorize_convs: bool = True,
factorize_linears: bool = True,
min_channels: int = 512,
latent_channels: Union[int, float] = 0.25,
min_features: int = 512,
latent_features: Union[int, float] = 0.25,
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None) -> None:
"""Replaces :class:`torch.nn.Linear` and :class:`torch.nn.Conv2d` modules with
:class:`.FactorizedLinear` and :class:`.FactorizedConv2d` modules.
Factorized modules replace one full-rank operation with a sequence of two
lower-rank operations. When the rank is low enough, this can save
computation, at the cost of expressive power. See :class:`.Factorize` for details.
Args:
model (torch.nn.Module): the model to modify in-place.
factorize_convs (bool, optional): whether to try factorizing :class:`torch.nn.Conv2d` modules.
Default: ``True``.
factorize_linears (bool, optional): whether to try factorizing :class:`torch.nn.Linear` modules.
Default: ``True``.
min_channels (int, optional): if a :class:`torch.nn.Conv2d` module does not have at least
this many input and output channels, it will be ignored. Modules with
few channels are unlikely to be accelerated by factorization due
to poor hardware utilization. Default: ``512``.
latent_channels (int | float, optional): number of latent channels to use in factorized
convolutions. Can be specified as either an integer > 1 or as a
float within ``[0, 1)``. In the latter case, the value is
interpreted as a fraction of ``min(in_channels, out_channels)``
for each :class:`torch.nn.Conv2d` module, and is converted to
the equivalent integer value, with a minimum of 1. Default: ``0.25``.
min_features (int, optional): if a :class:`torch.nn.Linear` module does not have at least
this many input and output features, it will be ignored. Modules with
few features are unlikely to be accelerated by factorization due
to poor hardware utilization. Default: ``512``.
latent_features (int | float, optional): size of the latent space for factorized linear modules.
Can be specified as either an integer > 1 or as a float within ``[0, 0.5)``.
In the latter case, the value is interpreted as a fraction of
``min(in_features, out_features)`` for each :class:`torch.nn.Linear`
module, and is converted to the equivalent integer value, with a
minimum of 1. Default: ``0.25``.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional):
Existing optimizers bound to ``model.parameters()``. All optimizers that have already been
constructed with ``model.parameters()`` must be specified here so
that they will optimize the correct parameters.
If the optimizer(s) are constructed *after* calling this function,
then it is safe to omit this parameter. These optimizers will see
the correct model parameters.
Example:
.. testcode::
import composer.functional as cf
from torchvision import models
model = models.resnet50()
cf.apply_factorization(model)
"""
if factorize_convs:
_factorize_conv2d_modules(model,
min_channels=min_channels,
latent_channels=latent_channels,
optimizers=optimizers)
if factorize_linears:
_factorize_linear_modules(model,
min_features=min_features,
latent_features=latent_features,
optimizers=optimizers)
class Factorize(Algorithm):
"""Decomposes linear operators into pairs of smaller linear operators.
Specifically, this algorithm replaces :class:`torch.nn.Conv2d` and
:class:`torch.nn.Linear` modules with :class:`.FactorizedConv2d` and
:class:`.FactorizedLinear` modules.
The replacement is only performed if doing so would reduce the number of
multiply-adds used to compute each module's output. For linear
layers and pointwise convolutions, this means that the factorization must
use an intermediate rank of less than half the input and output ranks, since
it must perform two operations instead of one.
For convolutions with kernel sizes greater than 1, the threshold for
factorization being worthwhile varies with kernel size. Larger kernels
allow larger intermediate ranks.
See :func:`.factorize_matrix` and :func:`.factorize_conv2d` for more
information about the factorization process. See :class:`.FactorizedConv2d`
and :class:`.FactorizedLinear` for more information about the factorized modules
used to replace the original modules.
Runs on :attr:`.Event.INIT`.
Args:
factorize_convs (bool): whether to try factorizing :class:`torch.nn.Conv2d` modules.
Default: ``True``.
factorize_linears (bool): whether to try factorizing :class:`torch.nn.Linear` modules.
Default: ``True``.
min_channels (int): if a :class:`torch.nn.Conv2d` module does not have at least
this many input and output channels, it will be ignored. Modules with
few channels are unlikely to be accelerated by factorization due
to poor hardware utilization. Default: ``256``.
latent_channels (int, float): number of latent channels to use in factorized
convolutions. Can be specified as either an integer > 1 or as
a float within ``[0, 1)``. In the latter case, the value is
interpreted as a fraction of ``min(in_channels, out_channels)``
for each :class:`torch.nn.Conv2d` module, and is converted to
the equivalent integer value, with a minimum of 1. Default: ``0.25``.
min_features (int): if a :class:`torch.nn.Linear` module does not have at least
this many input and output features, it will be ignored. Modules with
few features are unlikely to be accelerated by factorization due
to poor hardware utilization. Default: ``256``.
latent_features (int, float): size of the latent space for factorized linear modules.
Can be specified as either an integer > 1 or as a float within ``[0, 0.5)``.
In the latter case, the value is interpreted as a fraction of
``min(in_features, out_features)`` for each :class:`torch.nn.Linear`
module and is converted to the equivalent integer value, with a
minimum of 1. Default: ``128``.
"""
def __init__(self,
factorize_convs: bool = True,
factorize_linears: bool = True,
min_channels: int = 256,
latent_channels: Union[int, float] = 0.25,
min_features: int = 256,
latent_features: Union[int, float] = 128):
self.factorize_convs = factorize_convs
self.factorize_linears = factorize_linears
self.min_channels = min_channels
self.latent_channels = latent_channels
self.min_features = min_features
self.latent_features = latent_features
def __repr__(self) -> str:
return f'{self.__class__.__name__}(factorize_convs={self.factorize_convs},factorize_linears={self.factorize_linears},min_channels={self.min_channels},latent_channels={self.latent_channels},min_features={self.min_features},latent_features={self.latent_features})'
@staticmethod
def required_on_load() -> bool:
return True
def match(self, event: Event, state: State) -> bool:
return event == Event.INIT
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
assert state.model is not None, 'Model must be part of state!'
apply_factorization(model=state.model,
factorize_convs=self.factorize_convs,
factorize_linears=self.factorize_linears,
min_channels=self.min_channels,
latent_channels=self.latent_channels,
min_features=self.min_features,
latent_features=self.latent_features,
optimizers=state.optimizers)
if self.factorize_convs:
num_factorized = module_surgery.count_module_instances(state.model, FactorizedConv2d)
logger.log_hyperparameters({
LOG_NUM_CONV2D_REPLACEMENTS_KEY: num_factorized,
})
if self.factorize_linears:
num_factorized = module_surgery.count_module_instances(state.model, FactorizedLinear)
logger.log_hyperparameters({
LOG_NUM_LINEAR_REPLACEMENTS_KEY: num_factorized,
})
def _python_log_surgery_result(model: torch.nn.Module, new_class: Type[torch.nn.Module]):
num_replaced_modules = module_surgery.count_module_instances(model, new_class)
log.info(f'Applied factorization to model {model.__class__.__name__}. ' +
f'Model now has {num_replaced_modules} {new_class.__name__} modules')
def _factorize_conv2d_modules(model: torch.nn.Module,
min_channels: int = 512,
latent_channels: Union[int, float] = 0.25,
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None):
"""Replaces :class:`torch.nn.Conv2d` modules in ``model`` with
:class:`.FactorizedConv2d` modules.
See :class:`.Factorize` for details.
"""
def _maybe_replace_conv2d(module: torch.nn.Module, module_index: int) -> Optional[torch.nn.Module]:
module = cast(torch.nn.Conv2d, module)
wide_enough = min(module.out_channels, module.in_channels) >= min_channels
if factorizing_could_speedup(module, latent_channels) and wide_enough:
return FactorizedConv2d.from_conv2d(module, module_index, latent_channels=latent_channels)
return None # not enough rank reduction to be worth it
ret = module_surgery.replace_module_classes(model,
optimizers=optimizers,
policies={torch.nn.Conv2d: _maybe_replace_conv2d})
_python_log_surgery_result(model, FactorizedConv2d)
return ret
def _factorize_linear_modules(model: torch.nn.Module,
min_features: int = 512,
latent_features: Union[int, float] = 0.25,
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None):
"""Replaces :class:`torch.nn.Linear` modules in ``model`` with
:class:`.FactorizedLinear` modules.
See :class:`.Factorize` for details.
"""
def _maybe_replace_linear(module: torch.nn.Module, module_index: int) -> Optional[torch.nn.Module]:
module = cast(torch.nn.Linear, module)
wide_enough = min(module.in_features, module.out_features) >= min_features
if factorizing_could_speedup(module, latent_features) and wide_enough:
return FactorizedLinear.from_linear(module, module_index, latent_features=latent_features)
return None # not enough rank reduction to be worth it
ret = module_surgery.replace_module_classes(model,
optimizers=optimizers,
policies={torch.nn.Linear: _maybe_replace_linear})
_python_log_surgery_result(model, FactorizedLinear)
return ret
| composer-dev | composer/algorithms/factorize/factorize.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Core MixUp classes and functions."""
from __future__ import annotations
import logging
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
import torch
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
from composer.loss.utils import ensure_targets_one_hot
log = logging.getLogger(__name__)
__all__ = ['MixUp', 'mixup_batch']
def mixup_batch(input: torch.Tensor,
target: torch.Tensor,
mixing: Optional[float] = None,
alpha: float = 0.2,
indices: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor, float]:
"""Create new samples using convex combinations of pairs of samples.
This is done by taking a convex combination of ``input`` with a randomly
permuted copy of ``input``. The permutation takes place along the sample
axis (``dim=0``).
The relative weight of the original ``input`` versus the permuted copy is
defined by the ``mixing`` parameter. This parameter should be chosen
from a ``Beta(alpha, alpha)`` distribution for some parameter ``alpha > 0``.
Note that the same ``mixing`` is used for the whole batch.
Args:
input (torch.Tensor): input tensor of shape ``(minibatch, ...)``, where
``...`` indicates zero or more dimensions.
target (torch.Tensor): target tensor of shape ``(minibatch, ...)``, where
``...`` indicates zero or more dimensions.
mixing (float, optional): coefficient used to interpolate
between the two examples. If provided, must be in :math:`[0, 1]`.
If ``None``, value is drawn from a ``Beta(alpha, alpha)``
distribution. Default: ``None``.
alpha (float, optional): parameter for the Beta distribution over
``mixing``. Ignored if ``mixing`` is provided. Default: ``0.2``.
indices (torch.Tensor, optional): Permutation of the samples to use.
Default: ``None``.
Returns:
input_mixed (torch.Tensor): batch of inputs after mixup has been applied
target_perm (torch.Tensor): The labels of the mixed-in examples
mixing (torch.Tensor): the amount of mixing used
Example:
.. testcode::
import torch
from composer.functional import mixup_batch
N, C, H, W = 2, 3, 4, 5
X = torch.randn(N, C, H, W)
y = torch.randint(num_classes, size=(N,))
X_mixed, y_perm, mixing = mixup_batch(
X,
y,
alpha=0.2,
)
"""
if mixing is None:
mixing = _gen_mixing_coef(alpha)
# Create permuted versions of x and y in preparation for interpolation
# Use given indices if there are any.
if indices is None:
permuted_idx = _gen_indices(input.shape[0])
else:
permuted_idx = indices
x_permuted = input[permuted_idx]
permuted_target = target[permuted_idx]
# Interpolate between the inputs
x_mixup = (1 - mixing) * input + mixing * x_permuted
return x_mixup, permuted_target, mixing
class MixUp(Algorithm):
"""`MixUp <https://arxiv.org/abs/1710.09412>`_ trains the network on convex batch combinations.
The algorithm uses individual examples and targets to make a convex combination of a given batch X with a
randomly permuted copy of X. The mixing coefficient is drawn from a
``Beta(alpha, alpha)`` distribution.
Training in this fashion sometimes reduces generalization error.
Args:
alpha (float, optional): the psuedocount for the Beta distribution used to sample
mixing parameters. As ``alpha`` grows, the two samples
in each pair tend to be weighted more equally. As ``alpha``
approaches 0 from above, the combination approaches only using
one element of the pair. Default: ``0.2``.
interpolate_loss (bool, optional): Interpolates the loss rather than the labels.
A useful trick when using a cross entropy loss. Will produce incorrect behavior
if the loss is not a linear function of the targets. Default: ``False``
input_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the input
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 0, which corresponds to any sequence, where the first element
is the input. Default: ``0``.
target_key (str | int | Tuple[Callable, Callable] | Any, optional): A key that indexes to the target
from the batch. Can also be a pair of get and set functions, where the getter
is assumed to be first in the pair. The default is 1, which corresponds to any sequence, where the second element
is the target. Default: ``1``.
Example:
.. testcode::
from composer.algorithms import MixUp
algorithm = MixUp(alpha=0.2)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(
self,
alpha: float = 0.2,
interpolate_loss: bool = False,
input_key: Union[str, int, Tuple[Callable, Callable], Any] = 0,
target_key: Union[str, int, Tuple[Callable, Callable], Any] = 1,
):
self.alpha = alpha
self.interpolate_loss = interpolate_loss
self.mixing = 0.0
self.indices = torch.Tensor()
self.permuted_target = torch.Tensor()
self.input_key, self.target_key = input_key, target_key
def match(self, event: Event, state: State) -> bool:
if self.interpolate_loss:
return event in [Event.BEFORE_FORWARD, Event.BEFORE_BACKWARD]
else:
return event in [Event.BEFORE_FORWARD, Event.BEFORE_LOSS]
def apply(self, event: Event, state: State, logger: Logger) -> None:
input, target = state.batch_get_item(key=self.input_key), state.batch_get_item(key=self.target_key)
if event == Event.BEFORE_FORWARD:
if not isinstance(input, torch.Tensor):
raise NotImplementedError('Multiple tensors for inputs not supported yet.')
if not isinstance(target, torch.Tensor):
raise NotImplementedError('Multiple tensors for targets not supported yet.')
self.mixing = _gen_mixing_coef(self.alpha)
self.indices = _gen_indices(input.shape[0])
new_input, self.permuted_target, _ = mixup_batch(
input,
target,
mixing=self.mixing,
indices=self.indices,
)
state.batch_set_item(self.input_key, new_input)
if not self.interpolate_loss and event == Event.BEFORE_LOSS:
# Interpolate the targets
if not isinstance(state.outputs, torch.Tensor):
raise NotImplementedError('Multiple output tensors not supported yet')
if not isinstance(target, torch.Tensor):
raise NotImplementedError('Multiple target tensors not supported yet')
# Make sure that the targets are dense/one-hot
target = ensure_targets_one_hot(state.outputs, target)
permuted_target = ensure_targets_one_hot(state.outputs, self.permuted_target)
# Interpolate to get the new target
mixed_up_target = (1 - self.mixing) * target + self.mixing * permuted_target
# Create the new batch
state.batch_set_item(self.target_key, mixed_up_target)
if self.interpolate_loss and event == Event.BEFORE_BACKWARD:
# Grab the loss function
if hasattr(state.model, 'loss'):
loss_fn = state.model.loss
elif hasattr(state.model, 'module') and hasattr(state.model.module, 'loss'):
if isinstance(state.model.module, torch.nn.Module):
loss_fn = state.model.module.loss
else:
raise TypeError('state.model.module must be a torch module')
else:
raise AttributeError('Loss must be accesable via model.loss or model.module.loss')
# Verify that the loss is callable
if not callable(loss_fn):
raise TypeError('Loss must be callable')
# Interpolate the loss
new_loss = loss_fn(state.outputs, (input, self.permuted_target))
if not isinstance(state.loss, torch.Tensor):
raise NotImplementedError('Multiple losses not supported yet')
if not isinstance(new_loss, torch.Tensor):
raise NotImplementedError('Multiple losses not supported yet')
state.loss = (1 - self.mixing) * state.loss + self.mixing * new_loss
def _gen_mixing_coef(alpha: float) -> float:
"""Samples ``max(z, 1-z), z ~ Beta(alpha, alpha)``."""
# First check if alpha is positive.
assert alpha >= 0
# Draw the mixing parameter from a beta distribution.
# Check here is needed because beta distribution requires alpha > 0
# but alpha = 0 is fine for mixup.
if alpha == 0:
mixing_lambda = 0
else:
mixing_lambda = np.random.beta(alpha, alpha)
# for symmetric beta distribution, can always use 0 <= lambda <= .5;
# this way the "main" label is always the original one, which keeps
# the training accuracy meaningful
return min(mixing_lambda, 1. - mixing_lambda)
def _gen_indices(num_samples: int) -> torch.Tensor:
"""Generates a random permutation of the batch indices."""
return torch.randperm(num_samples)
| composer-dev | composer/algorithms/mixup/mixup.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Create new samples using convex combinations of pairs of samples.
This is done by taking a convex combination of x with a randomly permuted copy of x.
See the :doc:`Method Card </method_cards/mixup>` for more details.
"""
from composer.algorithms.mixup.mixup import MixUp as MixUp
from composer.algorithms.mixup.mixup import mixup_batch as mixup_batch
__all__ = ['MixUp', 'mixup_batch']
| composer-dev | composer/algorithms/mixup/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Changes the memory format of the model to ``torch.channels_last``.
This usually improves GPU utilization. See the :doc:`Method Card </method_cards/channels_last>` for more details.
"""
from composer.algorithms.channels_last.channels_last import ChannelsLast as ChannelsLast
from composer.algorithms.channels_last.channels_last import apply_channels_last as apply_channels_last
__all__ = ['ChannelsLast', 'apply_channels_last']
| composer-dev | composer/algorithms/channels_last/__init__.py |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""ChannelsLast algorithm."""
from __future__ import annotations
import logging
from typing import Optional
import torch
from composer.core import Algorithm, Event, State
from composer.loggers import Logger
log = logging.getLogger(__name__)
__all__ = ['ChannelsLast', 'apply_channels_last']
def apply_channels_last(model: torch.nn.Module) -> None:
"""Changes the memory format of the model to `torch.channels_last <https://\\
pytorch.org/tutorials/intermediate/memory_format_tutorial.html>`_.
This usually yields improved GPU utilization.
Args:
model (torch.nn.Module): The model or module to modify.
"""
model.to(memory_format=torch.channels_last) # type: ignore
class ChannelsLast(Algorithm):
"""Changes the memory format of the model to `torch.channels_last <https://\\
pytorch.org/tutorials/intermediate/memory_format_tutorial.html>`_. This usually improves GPU utilization.
Runs on :attr:`.Event.INIT``, so it can set the memory format before the model is DDP wrapped.
Has no hyperparameters.
Example:
.. testcode::
from composer.algorithms import ChannelsLast
algorithm = ChannelsLast()
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[algorithm],
optimizers=[optimizer]
)
"""
def __init__(self):
# ChannelsLast takes no arguments
pass
def match(self, event: Event, state: State) -> bool:
del state # unused
return event == Event.INIT
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
del event, logger # unused
# TODO: Double check model is moved to cuda with device type
apply_channels_last(state.model)
log.info(f'Model {state.model.__class__.__name__} changed to channels_last format.')
| composer-dev | composer/algorithms/channels_last/channels_last.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.